From b9cf501c1818d736cc316010693484d2cb091add Mon Sep 17 00:00:00 2001 From: Xiaochu Liu Date: Fri, 25 Jan 2019 11:05:58 -0800 Subject: [PATCH 001/624] update_engine: use shared DLC path strings Use shared path strings for DLC install path. BUG=chromium:923592 TEST=emerge-kefka update_engine CQ-DEPEND=CL:1436377 Change-Id: Ica9ba9ae3432cb3fafdb4be54413fb61c2477c88 Reviewed-on: https://chromium-review.googlesource.com/1436118 Commit-Ready: Xiaochu Liu Tested-by: Xiaochu Liu Reviewed-by: Amin Hassani --- boot_control_chromeos.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/boot_control_chromeos.cc b/boot_control_chromeos.cc index ccba316c..b390f61e 100644 --- a/boot_control_chromeos.cc +++ b/boot_control_chromeos.cc @@ -24,6 +24,7 @@ #include #include #include +#include #include extern "C" { @@ -43,7 +44,6 @@ const char* kChromeOSPartitionNameRoot = "root"; const char* kAndroidPartitionNameKernel = "boot"; const char* kAndroidPartitionNameRoot = "system"; -const char kDlcInstallRootDirectoryEncrypted[] = "/home/chronos/dlc"; const char kPartitionNamePrefixDlc[] = "dlc_"; const char kPartitionNameDlcA[] = "dlc_a"; const char kPartitionNameDlcB[] = "dlc_b"; @@ -161,7 +161,7 @@ bool BootControlChromeOS::GetPartitionDevice(const string& partition_name, << partition_name; return false; } - *device = base::FilePath(kDlcInstallRootDirectoryEncrypted) + *device = base::FilePath(imageloader::kDlcImageRootpath) .Append(dlc_module_id) .Append(slot == 0 ? kPartitionNameDlcA : kPartitionNameDlcB) .Append(kPartitionNameDlcImage) From 0ef9a2fc5b51ae22721dcd95b088d3274d2ec060 Mon Sep 17 00:00:00 2001 From: Zentaro Kavanagh Date: Mon, 2 Jul 2018 12:05:07 -0700 Subject: [PATCH 002/624] update_engine: Update the TPM with max_rollforward on rollback - Determines the value from max_rollforward_(kernel|firmware) based on the list of the last N release values from stable. - Sets the TPM values once it has been determined that the new image will boot and be installed. BUG=chromium:840432 TEST=cros_run_unit_tests --board=samus --packages update_engine Change-Id: I9620fe01cfea49e798e1397dada55ec6bec93047 Reviewed-on: https://chromium-review.googlesource.com/1419006 Commit-Ready: ChromeOS CL Exonerator Bot Tested-by: Bailey Berro Reviewed-by: Amin Hassani --- mock_update_attempter.h | 3 +- omaha_request_action.cc | 37 ++++++++- omaha_request_action_unittest.cc | 88 +++++++++++++++++++- omaha_request_params.h | 11 +++ omaha_response.h | 7 ++ omaha_response_handler_action.cc | 57 +++++++++++++ omaha_response_handler_action_unittest.cc | 97 ++++++++++++++++++++++- update_attempter.cc | 8 ++ update_attempter.h | 2 + update_attempter_unittest.cc | 36 +++++---- 10 files changed, 324 insertions(+), 22 deletions(-) diff --git a/mock_update_attempter.h b/mock_update_attempter.h index 5df5a6b1..d97163d3 100644 --- a/mock_update_attempter.h +++ b/mock_update_attempter.h @@ -30,12 +30,13 @@ class MockUpdateAttempter : public UpdateAttempter { public: using UpdateAttempter::UpdateAttempter; - MOCK_METHOD7(Update, + MOCK_METHOD8(Update, void(const std::string& app_version, const std::string& omaha_url, const std::string& target_channel, const std::string& target_version_prefix, bool rollback_allowed, + int rollback_allowed_milestones, bool obey_proxies, bool interactive)); diff --git a/omaha_request_action.cc b/omaha_request_action.cc index fae9471c..f1678eef 100644 --- a/omaha_request_action.cc +++ b/omaha_request_action.cc @@ -1002,7 +1002,8 @@ bool ParsePackage(OmahaParserData::App* app, // Parses the 2 key version strings kernel_version and firmware_version. If the // field is not present, or cannot be parsed the values default to 0xffff. -void ParseRollbackVersions(OmahaParserData* parser_data, +void ParseRollbackVersions(int allowed_milestones, + OmahaParserData* parser_data, OmahaResponse* output_object) { utils::ParseRollbackKeyVersion( parser_data->updatecheck_attrs[kAttrFirmwareVersion], @@ -1012,6 +1013,37 @@ void ParseRollbackVersions(OmahaParserData* parser_data, parser_data->updatecheck_attrs[kAttrKernelVersion], &output_object->rollback_key_version.kernel_key, &output_object->rollback_key_version.kernel); + + // Create the attribute name strings for milestone N - allowed_milestones. + const string firmware_max_rollforward_attr = + base::StringPrintf("%s_%i", kAttrFirmwareVersion, allowed_milestones); + const string kernel_max_rollforward_attr = + base::StringPrintf("%s_%i", kAttrKernelVersion, allowed_milestones); + + const bool max_firmware_and_kernel_exist = + parser_data->updatecheck_attrs.count(firmware_max_rollforward_attr) > 0 && + parser_data->updatecheck_attrs.count(kernel_max_rollforward_attr) > 0; + + string firmware_version; + string kernel_version; + if (max_firmware_and_kernel_exist) { + firmware_version = + parser_data->updatecheck_attrs[firmware_max_rollforward_attr]; + kernel_version = + parser_data->updatecheck_attrs[kernel_max_rollforward_attr]; + } + + LOG(INFO) << "For milestone N-" << allowed_milestones + << " firmware_key_version=" << firmware_version + << " kernel_key_version=" << kernel_version; + + OmahaResponse::RollbackKeyVersion version; + utils::ParseRollbackKeyVersion( + firmware_version, &version.firmware_key, &version.firmware); + utils::ParseRollbackKeyVersion( + kernel_version, &version.kernel_key, &version.kernel); + + output_object->past_rollback_key_version = std::move(version); } } // namespace @@ -1083,7 +1115,8 @@ bool OmahaRequestAction::ParseResponse(OmahaParserData* parser_data, // Parses the rollback versions of the current image. If the fields do not // exist they default to 0xffff for the 4 key versions. - ParseRollbackVersions(parser_data, output_object); + ParseRollbackVersions( + params_->rollback_allowed_milestones(), parser_data, output_object); if (!ParseStatus(parser_data, output_object, completer)) return false; diff --git a/omaha_request_action_unittest.cc b/omaha_request_action_unittest.cc index 1786bcc2..66fc6fe9 100644 --- a/omaha_request_action_unittest.cc +++ b/omaha_request_action_unittest.cc @@ -18,6 +18,7 @@ #include +#include #include #include #include @@ -56,6 +57,7 @@ using base::Time; using base::TimeDelta; using chromeos_update_manager::kRollforwardInfinity; +using std::pair; using std::string; using std::vector; using testing::_; @@ -86,7 +88,18 @@ const char kTestAppIdSkipUpdatecheck[] = "test-app-id-skip-updatecheck"; // values they care about. struct FakeUpdateResponse { string GetRollbackVersionAttributes() const { - return (rollback ? " _rollback=\"true\"" : "") + + string num_milestones; +#if BASE_VER < 576279 + num_milestones = base::IntToString(rollback_allowed_milestones); +#else + num_milestones = base::NumberToString(rollback_allowed_milestones); +#endif + const string rollback_version = + " _firmware_version_" + num_milestones + "=\"" + + past_rollback_key_version.first + "\"" + " _kernel_version_" + + num_milestones + "=\"" + past_rollback_key_version.second + "\""; + + return (rollback ? " _rollback=\"true\"" : "") + rollback_version + (!rollback_firmware_version.empty() ? " _firmware_version=\"" + rollback_firmware_version + "\"" : "") + @@ -239,6 +252,14 @@ struct FakeUpdateResponse { string rollback_firmware_version = ""; // The verified boot kernel key version for the rollback image. string rollback_kernel_version = ""; + // The number of milestones back that the verified boot key version has been + // supplied. + uint32_t rollback_allowed_milestones = 0; + // The verified boot key version for the + // |current - rollback_allowed_milestones| most recent release. + // The pair contains each + // of which is in the form "key_version.version". + pair past_rollback_key_version; }; } // namespace @@ -3144,4 +3165,69 @@ TEST_F(OmahaRequestActionTest, InstallMissingPlatformVersionTest) { EXPECT_EQ(fake_update_response_.current_version, response.version); } +TEST_F(OmahaRequestActionTest, PastRollbackVersionsNoEntries) { + OmahaResponse response; + fake_update_response_.rollback = true; + fake_update_response_.rollback_allowed_milestones = 4; + request_params_.set_rollback_allowed_milestones(4); + TestRollbackCheck(false /* is_consumer_device */, + 4 /* rollback_allowed_milestones */, + true /* is_policy_loaded */, + &response); + EXPECT_TRUE(response.is_rollback); + EXPECT_EQ(std::numeric_limits::max(), + response.past_rollback_key_version.firmware_key); + EXPECT_EQ(std::numeric_limits::max(), + response.past_rollback_key_version.firmware); + EXPECT_EQ(std::numeric_limits::max(), + response.past_rollback_key_version.kernel_key); + EXPECT_EQ(std::numeric_limits::max(), + response.past_rollback_key_version.kernel); +} + +TEST_F(OmahaRequestActionTest, PastRollbackVersionsValidEntries) { + OmahaResponse response; + fake_update_response_.rollback = true; + fake_update_response_.rollback_allowed_milestones = 4; + fake_update_response_.rollback_firmware_version = "4.3"; + fake_update_response_.rollback_kernel_version = "2.1"; + fake_update_response_.past_rollback_key_version = + std::make_pair("16.15", "14.13"); + TestRollbackCheck(false /* is_consumer_device */, + 4 /* rollback_allowed_milestones */, + true /* is_policy_loaded */, + &response); + EXPECT_TRUE(response.is_rollback); + EXPECT_EQ(16, response.past_rollback_key_version.firmware_key); + EXPECT_EQ(15, response.past_rollback_key_version.firmware); + EXPECT_EQ(14, response.past_rollback_key_version.kernel_key); + EXPECT_EQ(13, response.past_rollback_key_version.kernel); +} + +TEST_F(OmahaRequestActionTest, MismatchNumberOfVersions) { + OmahaResponse response; + fake_update_response_.rollback = true; + fake_update_response_.rollback_allowed_milestones = 2; + request_params_.set_rollback_allowed_milestones(4); + + // Since |request_params_.rollback_allowed_milestones| is 4 but the response + // is constructed with |fake_update_response_.rollback_allowed_milestones| set + // to 2, OmahaRequestAction will look for the key values of N-4 version but + // only the N-2 version will exist. + + TestRollbackCheck(false /* is_consumer_device */, + 2 /* rollback_allowed_milestones */, + true /* is_policy_loaded */, + &response); + EXPECT_TRUE(response.is_rollback); + EXPECT_EQ(std::numeric_limits::max(), + response.past_rollback_key_version.firmware_key); + EXPECT_EQ(std::numeric_limits::max(), + response.past_rollback_key_version.firmware); + EXPECT_EQ(std::numeric_limits::max(), + response.past_rollback_key_version.kernel_key); + EXPECT_EQ(std::numeric_limits::max(), + response.past_rollback_key_version.kernel); +} + } // namespace chromeos_update_engine diff --git a/omaha_request_params.h b/omaha_request_params.h index 18235c04..6691bee4 100644 --- a/omaha_request_params.h +++ b/omaha_request_params.h @@ -132,6 +132,14 @@ class OmahaRequestParams { inline bool rollback_allowed() const { return rollback_allowed_; } + inline void set_rollback_allowed_milestones(int rollback_allowed_milestones) { + rollback_allowed_milestones_ = rollback_allowed_milestones; + } + + inline int rollback_allowed_milestones() const { + return rollback_allowed_milestones_; + } + inline void set_wall_clock_based_wait_enabled(bool enabled) { wall_clock_based_wait_enabled_ = enabled; } @@ -322,6 +330,9 @@ class OmahaRequestParams { // Whether the client is accepting rollback images too. bool rollback_allowed_; + // How many milestones the client can rollback to. + int rollback_allowed_milestones_; + // True if scattering or staging are enabled, in which case waiting_period_ // specifies the amount of absolute time that we've to wait for before sending // a request to Omaha. diff --git a/omaha_response.h b/omaha_response.h index 0ac09df1..ab253a19 100644 --- a/omaha_response.h +++ b/omaha_response.h @@ -102,6 +102,13 @@ struct OmahaResponse { // Key versions of the returned rollback image. Values are 0xffff if the // image not a rollback, or the fields were not present. RollbackKeyVersion rollback_key_version; + + // Key versions of the N - rollback_allowed_milestones release. For example, + // if the current version is 70 and rollback_allowed_milestones is 4, this + // will contain the key versions of version 66. This is used to ensure that + // the kernel and firmware keys are at most those of v66 so that v66 can be + // rolled back to. + RollbackKeyVersion past_rollback_key_version; }; static_assert(sizeof(off_t) == 8, "off_t not 64 bit"); diff --git a/omaha_response_handler_action.cc b/omaha_response_handler_action.cc index ab41b848..d05bc467 100644 --- a/omaha_response_handler_action.cc +++ b/omaha_response_handler_action.cc @@ -34,6 +34,7 @@ #include "update_engine/update_manager/policy.h" #include "update_engine/update_manager/update_manager.h" +using chromeos_update_manager::kRollforwardInfinity; using chromeos_update_manager::Policy; using chromeos_update_manager::UpdateManager; using std::numeric_limits; @@ -145,10 +146,13 @@ void OmahaResponseHandlerAction::PerformAction() { completer.set_code(ErrorCode::kOmahaResponseInvalid); return; } + + // Calculate the values on the version values on current device. auto min_kernel_key_version = static_cast( system_state_->hardware()->GetMinKernelKeyVersion()); auto min_firmware_key_version = static_cast( system_state_->hardware()->GetMinFirmwareKeyVersion()); + uint32_t kernel_key_version = static_cast(response.rollback_key_version.kernel_key) << 16 | static_cast(response.rollback_key_version.kernel); @@ -157,6 +161,12 @@ void OmahaResponseHandlerAction::PerformAction() { << 16 | static_cast(response.rollback_key_version.firmware); + LOG(INFO) << "Rollback image versions:" + << " device_kernel_key_version=" << min_kernel_key_version + << " image_kernel_key_version=" << kernel_key_version + << " device_firmware_key_version=" << min_firmware_key_version + << " image_firmware_key_version=" << firmware_key_version; + // Don't attempt a rollback if the versions are incompatible or the // target image does not specify the version information. if (kernel_key_version == numeric_limits::max() || @@ -208,6 +218,53 @@ void OmahaResponseHandlerAction::PerformAction() { update_manager->PolicyRequest( &Policy::UpdateCanBeApplied, &ec, &install_plan_); completer.set_code(ec); + + const auto allowed_milestones = params->rollback_allowed_milestones(); + if (allowed_milestones > 0) { + auto max_firmware_rollforward = numeric_limits::max(); + auto max_kernel_rollforward = numeric_limits::max(); + + // Determine the version to update the max rollforward verified boot + // value. + OmahaResponse::RollbackKeyVersion version = + response.past_rollback_key_version; + + // Determine the max rollforward values to be set in the TPM. + max_firmware_rollforward = static_cast(version.firmware_key) + << 16 | + static_cast(version.firmware); + max_kernel_rollforward = static_cast(version.kernel_key) << 16 | + static_cast(version.kernel); + + // In the case that the value is 0xffffffff, log a warning because the + // device should not be installing a rollback image without having version + // information. + if (max_firmware_rollforward == numeric_limits::max() || + max_kernel_rollforward == numeric_limits::max()) { + LOG(WARNING) + << "Max rollforward values were not sent in rollback response: " + << " max_kernel_rollforward=" << max_kernel_rollforward + << " max_firmware_rollforward=" << max_firmware_rollforward + << " rollback_allowed_milestones=" + << params->rollback_allowed_milestones(); + } else { + LOG(INFO) << "Setting the max rollforward values: " + << " max_kernel_rollforward=" << max_kernel_rollforward + << " max_firmware_rollforward=" << max_firmware_rollforward + << " rollback_allowed_milestones=" + << params->rollback_allowed_milestones(); + system_state_->hardware()->SetMaxKernelKeyRollforward( + max_kernel_rollforward); + // TODO(crbug/783998): Set max firmware rollforward when implemented. + } + } else { + LOG(INFO) << "Rollback is not allowed. Setting max rollforward values" + << " to infinity"; + // When rollback is not allowed, explicitly set the max roll forward to + // infinity. + system_state_->hardware()->SetMaxKernelKeyRollforward(kRollforwardInfinity); + // TODO(crbug/783998): Set max firmware rollforward when implemented. + } } bool OmahaResponseHandlerAction::AreHashChecksMandatory( diff --git a/omaha_response_handler_action_unittest.cc b/omaha_response_handler_action_unittest.cc index b47040b6..0ebf8483 100644 --- a/omaha_response_handler_action_unittest.cc +++ b/omaha_response_handler_action_unittest.cc @@ -38,6 +38,7 @@ using chromeos_update_engine::test_utils::System; using chromeos_update_engine::test_utils::WriteFileString; using chromeos_update_manager::EvalStatus; using chromeos_update_manager::FakeUpdateManager; +using chromeos_update_manager::kRollforwardInfinity; using chromeos_update_manager::MockPolicy; using std::string; using testing::_; @@ -534,21 +535,44 @@ TEST_F(OmahaResponseHandlerActionTest, RollbackTest) { .size = 1, .hash = kPayloadHashHex}); in.is_rollback = true; - in.rollback_key_version.kernel = 1; - in.rollback_key_version.kernel = 2; - in.rollback_key_version.firmware_key = 3; - in.rollback_key_version.firmware = 4; + + // The rollback payload is 2 versions behind stable. + in.rollback_key_version.kernel = 24; + in.rollback_key_version.kernel = 23; + in.rollback_key_version.firmware_key = 22; + in.rollback_key_version.firmware = 21; + + OmahaResponse::RollbackKeyVersion m4; + m4.firmware_key = 16; + m4.firmware = 15; + m4.kernel_key = 14; + m4.kernel = 13; + + in.past_rollback_key_version = m4; fake_system_state_.fake_hardware()->SetMinKernelKeyVersion(0x00010002); fake_system_state_.fake_hardware()->SetMinFirmwareKeyVersion(0x00030004); + fake_system_state_.fake_hardware()->SetMaxKernelKeyRollforward(0xaaaaaaaa); + // TODO(crbug/783998): Add support for firmware when implemented. + OmahaRequestParams params(&fake_system_state_); params.set_rollback_allowed(true); + params.set_rollback_allowed_milestones(4); fake_system_state_.set_request_params(¶ms); InstallPlan install_plan; EXPECT_TRUE(DoTest(in, "", &install_plan)); EXPECT_TRUE(install_plan.is_rollback); + + // The max rollforward should be set the values of the image + // rollback_allowed_milestones (4 for this test) in the past. + const uint32_t expected_max_kernel_rollforward = + static_cast(m4.kernel_key) << 16 | + static_cast(m4.kernel); + EXPECT_EQ(expected_max_kernel_rollforward, + fake_system_state_.fake_hardware()->GetMaxKernelKeyRollforward()); + // TODO(crbug/783998): Add support for firmware when implemented. } TEST_F(OmahaResponseHandlerActionTest, RollbackKernelVersionErrorTest) { @@ -563,18 +587,36 @@ TEST_F(OmahaResponseHandlerActionTest, RollbackKernelVersionErrorTest) { in.rollback_key_version.firmware_key = 3; in.rollback_key_version.firmware = 4; + OmahaResponse::RollbackKeyVersion m4; + m4.firmware_key = 16; + m4.firmware = 15; + m4.kernel_key = 14; + m4.kernel = 13; + in.past_rollback_key_version = m4; + fake_system_state_.fake_hardware()->SetMinKernelKeyVersion(0x00010002); fake_system_state_.fake_hardware()->SetMinFirmwareKeyVersion(0x00030004); + const uint32_t current_kernel_max_rollforward = 0xaaaaaaaa; + fake_system_state_.fake_hardware()->SetMaxKernelKeyRollforward( + current_kernel_max_rollforward); OmahaRequestParams params(&fake_system_state_); params.set_rollback_allowed(true); + params.set_rollback_allowed_milestones(4); fake_system_state_.set_request_params(¶ms); InstallPlan install_plan; EXPECT_FALSE(DoTest(in, "", &install_plan)); + + // Max rollforward is not changed in error cases. + EXPECT_EQ(current_kernel_max_rollforward, + fake_system_state_.fake_hardware()->GetMaxKernelKeyRollforward()); + // TODO(crbug/783998): Add support for firmware when implemented. } TEST_F(OmahaResponseHandlerActionTest, RollbackFirmwareVersionErrorTest) { + // TODO(crbug/783998): Add handling for max_firmware_rollforward when + // implemented. OmahaResponse in; in.update_exists = true; in.packages.push_back({.payload_urls = {"https://RollbackTest"}, @@ -591,6 +633,7 @@ TEST_F(OmahaResponseHandlerActionTest, RollbackFirmwareVersionErrorTest) { OmahaRequestParams params(&fake_system_state_); params.set_rollback_allowed(true); + params.set_rollback_allowed_milestones(4); fake_system_state_.set_request_params(¶ms); InstallPlan install_plan; @@ -605,13 +648,23 @@ TEST_F(OmahaResponseHandlerActionTest, RollbackNotRollbackTest) { .hash = kPayloadHashHex}); in.is_rollback = false; + const uint32_t current_kernel_max_rollforward = 0xaaaaaaaa; + fake_system_state_.fake_hardware()->SetMaxKernelKeyRollforward( + current_kernel_max_rollforward); + OmahaRequestParams params(&fake_system_state_); params.set_rollback_allowed(true); + params.set_rollback_allowed_milestones(4); fake_system_state_.set_request_params(¶ms); InstallPlan install_plan; EXPECT_TRUE(DoTest(in, "", &install_plan)); EXPECT_FALSE(install_plan.is_rollback); + + // Max rollforward is not changed for non-rollback cases. + EXPECT_EQ(current_kernel_max_rollforward, + fake_system_state_.fake_hardware()->GetMaxKernelKeyRollforward()); + // TODO(crbug/783998): Add support for firmware when implemented. } TEST_F(OmahaResponseHandlerActionTest, RollbackNotAllowedTest) { @@ -624,10 +677,46 @@ TEST_F(OmahaResponseHandlerActionTest, RollbackNotAllowedTest) { OmahaRequestParams params(&fake_system_state_); params.set_rollback_allowed(false); + params.set_rollback_allowed_milestones(4); + + const uint32_t current_kernel_max_rollforward = 0xaaaaaaaa; + fake_system_state_.fake_hardware()->SetMaxKernelKeyRollforward( + current_kernel_max_rollforward); fake_system_state_.set_request_params(¶ms); InstallPlan install_plan; EXPECT_FALSE(DoTest(in, "", &install_plan)); + + // This case generates an error so, do not update max rollforward. + EXPECT_EQ(current_kernel_max_rollforward, + fake_system_state_.fake_hardware()->GetMaxKernelKeyRollforward()); + // TODO(crbug/783998): Add support for firmware when implemented. +} + +TEST_F(OmahaResponseHandlerActionTest, NormalUpdateWithZeroMilestonesAllowed) { + OmahaResponse in; + in.update_exists = true; + in.packages.push_back({.payload_urls = {"https://RollbackTest"}, + .size = 1, + .hash = kPayloadHashHex}); + in.is_rollback = false; + + OmahaRequestParams params(&fake_system_state_); + params.set_rollback_allowed(true); + params.set_rollback_allowed_milestones(0); + + const uint32_t current_kernel_max_rollforward = 0xaaaaaaaa; + fake_system_state_.fake_hardware()->SetMaxKernelKeyRollforward( + current_kernel_max_rollforward); + + fake_system_state_.set_request_params(¶ms); + InstallPlan install_plan; + EXPECT_TRUE(DoTest(in, "", &install_plan)); + + // When allowed_milestones is 0, this is set to infinity. + EXPECT_EQ(kRollforwardInfinity, + fake_system_state_.fake_hardware()->GetMaxKernelKeyRollforward()); + // TODO(crbug/783998): Add support for firmware when implemented. } TEST_F(OmahaResponseHandlerActionTest, SystemVersionTest) { diff --git a/update_attempter.cc b/update_attempter.cc index ee571db6..31a6ce47 100644 --- a/update_attempter.cc +++ b/update_attempter.cc @@ -239,6 +239,7 @@ void UpdateAttempter::Update(const string& app_version, const string& target_channel, const string& target_version_prefix, bool rollback_allowed, + int rollback_allowed_milestones, bool obey_proxies, bool interactive) { // This is normally called frequently enough so it's appropriate to use as a @@ -274,6 +275,7 @@ void UpdateAttempter::Update(const string& app_version, target_channel, target_version_prefix, rollback_allowed, + rollback_allowed_milestones, obey_proxies, interactive)) { return; @@ -347,6 +349,7 @@ bool UpdateAttempter::CalculateUpdateParams(const string& app_version, const string& target_channel, const string& target_version_prefix, bool rollback_allowed, + int rollback_allowed_milestones, bool obey_proxies, bool interactive) { http_response_code_ = 0; @@ -371,6 +374,10 @@ bool UpdateAttempter::CalculateUpdateParams(const string& app_version, CalculateScatteringParams(interactive); } + // Set how many milestones of rollback are allowed. + omaha_request_params_->set_rollback_allowed_milestones( + rollback_allowed_milestones); + CalculateP2PParams(interactive); if (payload_state->GetUsingP2PForDownloading() || payload_state->GetUsingP2PForSharing()) { @@ -946,6 +953,7 @@ void UpdateAttempter::OnUpdateScheduled(EvalStatus status, params.target_channel, params.target_version_prefix, params.rollback_allowed, + params.rollback_allowed_milestones, /*obey_proxies=*/false, params.interactive); // Always clear the forced app_version and omaha_url after an update attempt diff --git a/update_attempter.h b/update_attempter.h index c27f8a40..c106001a 100644 --- a/update_attempter.h +++ b/update_attempter.h @@ -84,6 +84,7 @@ class UpdateAttempter : public ActionProcessorDelegate, const std::string& target_channel, const std::string& target_version_prefix, bool rollback_allowed, + int rollback_allowed_milestones, bool obey_proxies, bool interactive); @@ -339,6 +340,7 @@ class UpdateAttempter : public ActionProcessorDelegate, const std::string& target_channel, const std::string& target_version_prefix, bool rollback_allowed, + int rollback_allowed_milestones, bool obey_proxies, bool interactive); diff --git a/update_attempter_unittest.cc b/update_attempter_unittest.cc index 579c7368..ec6066ba 100644 --- a/update_attempter_unittest.cc +++ b/update_attempter_unittest.cc @@ -506,7 +506,7 @@ void UpdateAttempterTest::UpdateTestStart() { EXPECT_CALL(*processor_, StartProcessing()); } - attempter_.Update("", "", "", "", false, false, false); + attempter_.Update("", "", "", "", false, 0, false, false); loop_.PostTask(FROM_HERE, base::Bind(&UpdateAttempterTest::UpdateTestVerify, base::Unretained(this))); @@ -706,7 +706,7 @@ void UpdateAttempterTest::P2PNotEnabledStart() { fake_system_state_.set_p2p_manager(&mock_p2p_manager); mock_p2p_manager.fake().SetP2PEnabled(false); EXPECT_CALL(mock_p2p_manager, PerformHousekeeping()).Times(0); - attempter_.Update("", "", "", "", false, false, false); + attempter_.Update("", "", "", "", false, 0, false, false); EXPECT_FALSE(actual_using_p2p_for_downloading_); EXPECT_FALSE(actual_using_p2p_for_sharing()); ScheduleQuitMainLoop(); @@ -728,7 +728,7 @@ void UpdateAttempterTest::P2PEnabledStartingFailsStart() { mock_p2p_manager.fake().SetEnsureP2PRunningResult(false); mock_p2p_manager.fake().SetPerformHousekeepingResult(false); EXPECT_CALL(mock_p2p_manager, PerformHousekeeping()).Times(0); - attempter_.Update("", "", "", "", false, false, false); + attempter_.Update("", "", "", "", false, 0, false, false); EXPECT_FALSE(actual_using_p2p_for_downloading()); EXPECT_FALSE(actual_using_p2p_for_sharing()); ScheduleQuitMainLoop(); @@ -751,7 +751,7 @@ void UpdateAttempterTest::P2PEnabledHousekeepingFailsStart() { mock_p2p_manager.fake().SetEnsureP2PRunningResult(true); mock_p2p_manager.fake().SetPerformHousekeepingResult(false); EXPECT_CALL(mock_p2p_manager, PerformHousekeeping()); - attempter_.Update("", "", "", "", false, false, false); + attempter_.Update("", "", "", "", false, 0, false, false); EXPECT_FALSE(actual_using_p2p_for_downloading()); EXPECT_FALSE(actual_using_p2p_for_sharing()); ScheduleQuitMainLoop(); @@ -773,7 +773,7 @@ void UpdateAttempterTest::P2PEnabledStart() { mock_p2p_manager.fake().SetEnsureP2PRunningResult(true); mock_p2p_manager.fake().SetPerformHousekeepingResult(true); EXPECT_CALL(mock_p2p_manager, PerformHousekeeping()); - attempter_.Update("", "", "", "", false, false, false); + attempter_.Update("", "", "", "", false, 0, false, false); EXPECT_TRUE(actual_using_p2p_for_downloading()); EXPECT_TRUE(actual_using_p2p_for_sharing()); ScheduleQuitMainLoop(); @@ -801,6 +801,7 @@ void UpdateAttempterTest::P2PEnabledInteractiveStart() { "", "", false, + /*rollback_allowed_milestones=*/0, false, /*interactive=*/true); EXPECT_FALSE(actual_using_p2p_for_downloading()); @@ -832,7 +833,7 @@ void UpdateAttempterTest::ReadScatterFactorFromPolicyTestStart() { attempter_.policy_provider_.reset( new policy::PolicyProvider(std::move(device_policy))); - attempter_.Update("", "", "", "", false, false, false); + attempter_.Update("", "", "", "", false, 0, false, false); EXPECT_EQ(scatter_factor_in_seconds, attempter_.scatter_factor_.InSeconds()); ScheduleQuitMainLoop(); @@ -870,7 +871,7 @@ void UpdateAttempterTest::DecrementUpdateCheckCountTestStart() { attempter_.policy_provider_.reset( new policy::PolicyProvider(std::move(device_policy))); - attempter_.Update("", "", "", "", false, false, false); + attempter_.Update("", "", "", "", false, 0, false, false); EXPECT_EQ(scatter_factor_in_seconds, attempter_.scatter_factor_.InSeconds()); // Make sure the file still exists. @@ -886,7 +887,7 @@ void UpdateAttempterTest::DecrementUpdateCheckCountTestStart() { // However, if the count is already 0, it's not decremented. Test that. initial_value = 0; EXPECT_TRUE(fake_prefs.SetInt64(kPrefsUpdateCheckCount, initial_value)); - attempter_.Update("", "", "", "", false, false, false); + attempter_.Update("", "", "", "", false, 0, false, false); EXPECT_TRUE(fake_prefs.Exists(kPrefsUpdateCheckCount)); EXPECT_TRUE(fake_prefs.GetInt64(kPrefsUpdateCheckCount, &new_value)); EXPECT_EQ(initial_value, new_value); @@ -938,6 +939,7 @@ void UpdateAttempterTest::NoScatteringDoneDuringManualUpdateTestStart() { "", "", false, + /*rollback_allowed_milestones=*/0, false, /*interactive=*/true); EXPECT_EQ(scatter_factor_in_seconds, attempter_.scatter_factor_.InSeconds()); @@ -991,7 +993,7 @@ void UpdateAttempterTest::StagingSetsPrefsAndTurnsOffScatteringStart() { FakePrefs fake_prefs; SetUpStagingTest(kValidStagingSchedule, &fake_prefs); - attempter_.Update("", "", "", "", false, false, false); + attempter_.Update("", "", "", "", false, 0, false, false); // Check that prefs have the correct values. int64_t update_count; EXPECT_TRUE(fake_prefs.GetInt64(kPrefsUpdateCheckCount, &update_count)); @@ -1048,7 +1050,7 @@ void UpdateAttempterTest::StagingOffIfInteractiveStart() { FakePrefs fake_prefs; SetUpStagingTest(kValidStagingSchedule, &fake_prefs); - attempter_.Update("", "", "", "", false, false, /* interactive = */ true); + attempter_.Update("", "", "", "", false, 0, false, /* interactive = */ true); CheckStagingOff(); ScheduleQuitMainLoop(); @@ -1068,7 +1070,7 @@ void UpdateAttempterTest::StagingOffIfOobeStart() { FakePrefs fake_prefs; SetUpStagingTest(kValidStagingSchedule, &fake_prefs); - attempter_.Update("", "", "", "", false, false, /* interactive = */ true); + attempter_.Update("", "", "", "", false, 0, false, /* interactive = */ true); CheckStagingOff(); ScheduleQuitMainLoop(); @@ -1238,11 +1240,11 @@ TEST_F(UpdateAttempterTest, UpdateAfterInstall) { } TEST_F(UpdateAttempterTest, TargetVersionPrefixSetAndReset) { - attempter_.CalculateUpdateParams("", "", "", "1234", false, false, false); + attempter_.CalculateUpdateParams("", "", "", "1234", false, 4, false, false); EXPECT_EQ("1234", fake_system_state_.request_params()->target_version_prefix()); - attempter_.CalculateUpdateParams("", "", "", "", false, false, false); + attempter_.CalculateUpdateParams("", "", "", "", false, 4, false, false); EXPECT_TRUE( fake_system_state_.request_params()->target_version_prefix().empty()); } @@ -1253,18 +1255,24 @@ TEST_F(UpdateAttempterTest, RollbackAllowedSetAndReset) { "", "1234", /*rollback_allowed=*/true, + /*rollback_allowed_milestones=*/4, false, false); EXPECT_TRUE(fake_system_state_.request_params()->rollback_allowed()); + EXPECT_EQ(4, + fake_system_state_.request_params()->rollback_allowed_milestones()); attempter_.CalculateUpdateParams("", "", "", "1234", /*rollback_allowed=*/false, + /*rollback_allowed_milestones=*/4, false, false); EXPECT_FALSE(fake_system_state_.request_params()->rollback_allowed()); + EXPECT_EQ(4, + fake_system_state_.request_params()->rollback_allowed_milestones()); } TEST_F(UpdateAttempterTest, UpdateDeferredByPolicyTest) { @@ -1384,7 +1392,7 @@ void UpdateAttempterTest::ResetRollbackHappenedStart(bool is_consumer, SetRollbackHappened(false)) .Times(expected_reset ? 1 : 0); attempter_.policy_provider_ = std::move(mock_policy_provider); - attempter_.Update("", "", "", "", false, false, false); + attempter_.Update("", "", "", "", false, 0, false, false); ScheduleQuitMainLoop(); } From 77c25fce5fe363cf84d521ed2c0148b4e0efdc07 Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Tue, 29 Jan 2019 10:24:19 -0800 Subject: [PATCH 003/624] update_engine: Add delta payload support for squashfs This patch adds support for generating delta payloads for squashfs images. This is needed to get delta payloads for DLC images. In order to get the supported major and minor versions of the update_engine that matches the squashfs image (either squashfs image contains the entire rootfs, including the update_engine, or the image is a DLC), we need to read /etc/update_engine inside the image. We do this by calling unsquashfs and only unsquashing the target file and later reading its content into a key-value store to be used for delta payload generation. BUG=chromium:926986 TEST=unittest TEST=delta_generator --out_file=output --partition_names=dlc --new_partitions=dlc.img --old_partitions=dlc.img Change-Id: Ib5599032c873223a5caca82918e138d8b4fcec43 Reviewed-on: https://chromium-review.googlesource.com/1446278 Commit-Ready: ChromeOS CL Exonerator Bot Tested-by: Amin Hassani Reviewed-by: Sen Jiang --- payload_generator/deflate_utils.cc | 5 +- .../payload_generation_config.cc | 9 +++ payload_generator/squashfs_filesystem.cc | 63 ++++++++++++++++-- payload_generator/squashfs_filesystem.h | 5 +- .../squashfs_filesystem_unittest.cc | 16 ++++- sample_images/generate_images.sh | 1 + sample_images/sample_images.tar.bz2 | Bin 5273 -> 6833 bytes 7 files changed, 89 insertions(+), 10 deletions(-) diff --git a/payload_generator/deflate_utils.cc b/payload_generator/deflate_utils.cc index a7a05032..ef8d257d 100644 --- a/payload_generator/deflate_utils.cc +++ b/payload_generator/deflate_utils.cc @@ -273,8 +273,9 @@ bool PreprocessPartitionFiles(const PartitionConfig& part, TEST_AND_RETURN_FALSE( CopyExtentsToFile(part.path, file.extents, path.value(), kBlockSize)); // Test if it is actually a Squashfs file. - auto sqfs = - SquashfsFilesystem::CreateFromFile(path.value(), extract_deflates); + auto sqfs = SquashfsFilesystem::CreateFromFile(path.value(), + extract_deflates, + /*load_settings=*/false); if (sqfs) { // It is an squashfs file. Get its files to replace with itself. vector files; diff --git a/payload_generator/payload_generation_config.cc b/payload_generator/payload_generation_config.cc index 694c71fa..c364797c 100644 --- a/payload_generator/payload_generation_config.cc +++ b/payload_generator/payload_generation_config.cc @@ -32,6 +32,7 @@ #include "update_engine/payload_generator/ext2_filesystem.h" #include "update_engine/payload_generator/mapfile_filesystem.h" #include "update_engine/payload_generator/raw_filesystem.h" +#include "update_engine/payload_generator/squashfs_filesystem.h" using std::string; @@ -86,6 +87,14 @@ bool PartitionConfig::OpenFilesystem() { return true; } + fs_interface = SquashfsFilesystem::CreateFromFile(path, + /*extract_deflates=*/true, + /*load_settings=*/true); + if (fs_interface) { + TEST_AND_RETURN_FALSE(fs_interface->GetBlockSize() == kBlockSize); + return true; + } + // Fall back to a RAW filesystem. TEST_AND_RETURN_FALSE(size % kBlockSize == 0); fs_interface = RawFilesystem::Create( diff --git a/payload_generator/squashfs_filesystem.cc b/payload_generator/squashfs_filesystem.cc index 6c892f53..c423b69c 100644 --- a/payload_generator/squashfs_filesystem.cc +++ b/payload_generator/squashfs_filesystem.cc @@ -23,6 +23,7 @@ #include #include +#include #include #include #include @@ -36,6 +37,8 @@ #include "update_engine/payload_generator/extent_utils.h" #include "update_engine/update_metadata.pb.h" +using base::FilePath; +using base::ScopedTempDir; using std::string; using std::unique_ptr; using std::vector; @@ -49,6 +52,8 @@ constexpr size_t kSquashfsSuperBlockSize = 96; constexpr uint64_t kSquashfsCompressedBit = 1 << 24; constexpr uint32_t kSquashfsZlibCompression = 1; +constexpr char kUpdateEngineConf[] = "etc/update_engine.conf"; + bool ReadSquashfsHeader(const brillo::Blob blob, SquashfsFilesystem::SquashfsHeader* header) { if (blob.size() < kSquashfsSuperBlockSize) { @@ -88,6 +93,45 @@ bool GetFileMapContent(const string& sqfs_path, string* map) { return true; } +bool GetUpdateEngineConfig(const std::string& sqfs_path, string* config) { + ScopedTempDir unsquash_dir; + if (!unsquash_dir.CreateUniqueTempDir()) { + PLOG(ERROR) << "Failed to create a temporary directory."; + return false; + } + + // Run unsquashfs to extract update_engine.conf + // -f: To force overriding if the target directory exists. + // -d: The directory to unsquash the files. + vector cmd = {"unsquashfs", + "-f", + "-d", + unsquash_dir.GetPath().value(), + sqfs_path, + kUpdateEngineConf}; + int exit_code; + if (!Subprocess::SynchronousExec(cmd, &exit_code, nullptr) || + exit_code != 0) { + PLOG(ERROR) << "Failed to unsquashfs etc/update_engine.conf: "; + return false; + } + + auto config_path = unsquash_dir.GetPath().Append(kUpdateEngineConf); + string config_content; + if (!utils::ReadFile(config_path.value(), &config_content)) { + PLOG(ERROR) << "Failed to read " << config_path.value(); + return false; + } + + if (config_content.empty()) { + LOG(ERROR) << "update_engine config file was empty!!"; + return false; + } + + *config = std::move(config_content); + return true; +} + } // namespace bool SquashfsFilesystem::Init(const string& map, @@ -239,12 +283,12 @@ bool SquashfsFilesystem::Init(const string& map, } unique_ptr SquashfsFilesystem::CreateFromFile( - const string& sqfs_path, bool extract_deflates) { + const string& sqfs_path, bool extract_deflates, bool load_settings) { if (sqfs_path.empty()) return nullptr; brillo::StreamPtr sqfs_file = - brillo::FileStream::Open(base::FilePath(sqfs_path), + brillo::FileStream::Open(FilePath(sqfs_path), brillo::Stream::AccessMode::READ, brillo::FileStream::Disposition::OPEN_EXISTING, nullptr); @@ -278,6 +322,12 @@ unique_ptr SquashfsFilesystem::CreateFromFile( return nullptr; } + if (load_settings) { + if (!GetUpdateEngineConfig(sqfs_path, &sqfs->update_engine_config_)) { + return nullptr; + } + } + return sqfs; } @@ -311,9 +361,12 @@ bool SquashfsFilesystem::GetFiles(vector* files) const { } bool SquashfsFilesystem::LoadSettings(brillo::KeyValueStore* store) const { - // Settings not supported in squashfs. - LOG(ERROR) << "squashfs doesn't support LoadSettings()."; - return false; + if (!store->LoadFromString(update_engine_config_)) { + LOG(ERROR) << "Failed to load the settings with config: " + << update_engine_config_; + return false; + } + return true; } bool SquashfsFilesystem::IsSquashfsImage(const brillo::Blob& blob) { diff --git a/payload_generator/squashfs_filesystem.h b/payload_generator/squashfs_filesystem.h index b79f8c7d..5045dfc6 100644 --- a/payload_generator/squashfs_filesystem.h +++ b/payload_generator/squashfs_filesystem.h @@ -59,7 +59,7 @@ class SquashfsFilesystem : public FilesystemInterface { // |extract_deflates| is true, it will process files to find location of all // deflate streams. static std::unique_ptr CreateFromFile( - const std::string& sqfs_path, bool extract_deflates); + const std::string& sqfs_path, bool extract_deflates, bool load_settings); // Creates the file system from a file map |filemap| which is a multi-line // string with each line with the following format: @@ -113,6 +113,9 @@ class SquashfsFilesystem : public FilesystemInterface { // All the files in the filesystem. std::vector files_; + // The content of /etc/update_engine.conf. + std::string update_engine_config_; + DISALLOW_COPY_AND_ASSIGN(SquashfsFilesystem); }; diff --git a/payload_generator/squashfs_filesystem_unittest.cc b/payload_generator/squashfs_filesystem_unittest.cc index 29fcf1cd..68ca9df2 100644 --- a/payload_generator/squashfs_filesystem_unittest.cc +++ b/payload_generator/squashfs_filesystem_unittest.cc @@ -112,7 +112,7 @@ class SquashfsFilesystemTest : public ::testing::Test { #ifdef __CHROMEOS__ TEST_F(SquashfsFilesystemTest, EmptyFilesystemTest) { unique_ptr fs = SquashfsFilesystem::CreateFromFile( - GetBuildArtifactsPath("gen/disk_sqfs_empty.img"), true); + GetBuildArtifactsPath("gen/disk_sqfs_empty.img"), true, false); CheckSquashfs(fs); // Even an empty squashfs filesystem is rounded up to 4K. @@ -133,7 +133,7 @@ TEST_F(SquashfsFilesystemTest, EmptyFilesystemTest) { TEST_F(SquashfsFilesystemTest, DefaultFilesystemTest) { unique_ptr fs = SquashfsFilesystem::CreateFromFile( - GetBuildArtifactsPath("gen/disk_sqfs_default.img"), true); + GetBuildArtifactsPath("gen/disk_sqfs_default.img"), true, false); CheckSquashfs(fs); vector files; @@ -148,6 +148,18 @@ TEST_F(SquashfsFilesystemTest, DefaultFilesystemTest) { EXPECT_EQ(files[0].name, file.name); EXPECT_EQ(files[0].extents, file.extents); } + +TEST_F(SquashfsFilesystemTest, UpdateEngineConfigTest) { + unique_ptr fs = SquashfsFilesystem::CreateFromFile( + GetBuildArtifactsPath("gen/disk_sqfs_unittest.img"), true, true); + CheckSquashfs(fs); + + brillo::KeyValueStore kvs; + EXPECT_TRUE(fs->LoadSettings(&kvs)); + string minor_version; + EXPECT_TRUE(kvs.GetString("PAYLOAD_MINOR_VERSION", &minor_version)); + EXPECT_EQ(minor_version, "1234"); +} #endif // __CHROMEOS__ TEST_F(SquashfsFilesystemTest, SimpleFileMapTest) { diff --git a/sample_images/generate_images.sh b/sample_images/generate_images.sh index 8478682e..e0b54ae9 100755 --- a/sample_images/generate_images.sh +++ b/sample_images/generate_images.sh @@ -270,6 +270,7 @@ main() { # Add squashfs sample images. generate_image disk_sqfs_empty sqfs empty $((1024 * 4096)) 4096 generate_image disk_sqfs_default sqfs default $((1024 * 4096)) 4096 + generate_image disk_sqfs_unittest sqfs unittest $((1024 * 4096)) 4096 # Generate the tarball and delete temporary images. echo "Packing tar file sample_images.tar.bz2" diff --git a/sample_images/sample_images.tar.bz2 b/sample_images/sample_images.tar.bz2 index 6215482254262b767526bc886f6dad4d4746c73e..5c80a5112f1d4aea138ba3155807e3c0498d98af 100644 GIT binary patch literal 6833 zcmV;i8cyXxT4*^jL0KkKSt}pwoB$g<|NsC0|NsC0|NsC0|NsC0|NsC0|NsC0|NsC0 z|NsC0|Nr0>obBHppggPZdAYRPZS`qcoW(l4Na7qMpe#$e-$))M$FC>9n4SJ&{KA#Aw9I zdTMzxo(i9Wo&`Nq@flO}+KJ+5H9Z(geto}s3I%0gg4rkMbfX^19MC#dx{rcYBenrL}U z6F|ag>N6-ZJxx6{MrekNpQwzP1|vp5^#+C`KpvoIXda_Rjj5m-WHdC;XwyvwfJlh~ zOr{gjQ}kqNJq)HodSxODp#nmnhe>K>u$XlQzk8hVX1G;JrS0ilyX4FG8M8UT4s zG}4F=QzG-w7>LV6=l)ICUg6KZ;oLOl_L!ZgY0sL)}iqX=ZedMBxq zK;EKegux!8(^2Ye283c5nrJ+uAlgkoQK_TUXc#pZ(qS@U8e(lh>OBKMK_ZYqOqwHV zV3hDv^wm6MC#5|!nVM1WQ&VbX9#hdjQKa?T!Z=YC~p@qSfpf${k9d z(-L9~jL|hk#KzLvY@K@Z-rH$jys+vhEjTdGu-w${UOcP84f-S0ga{*oUWTb^BYL@k z6}`ceid4{w37|n@n70f`E&CR$D4@4Z2?Yi%^XgjRv|~n=P1k;8)#gpKOlcyqpf-$1 z42Fipgk`jVs?bWoA+SxMAkKCg$dFvH1qK2{j1n?h6fp}Si=~+0LMCwGR#X`G15j4Y z*r9T41hJY7EJ3kgijx31wg9ZTA;2KT%RU&)?HG(Y^#;^2!9qzDG=WhjfFU#`aE#E4 z>zEE*N-JoiLWbGmA^_C$a7z#{o)vF!%Hh@?h?EG)C5(uW^`zs3bL0a!SpfvJ3}waj zSkM2982(#03x{5P2pgJU&uCA%96IRZFrDj!^2X4QIb z2%^NWGle&Ro-x_Du2bZqHpFrA;Dc`lw)HMyM8*JX!%9GcYE`tzt>qO6mW86i1|ow2 zv|6>H3XfjYG>yM#u+`u#O4 z*5^tNFJjt*TGB`a6x_jrtToXw10o=hDOdvMT3F@?C8Q9*Oc(?-Byq}iP9jholA!`Y zXC_xdfzq&+Siw+vtu9HS0kR)z7@1K53kI0sp#}Z40Szf+F|ch7gJ9YlY;PC?X2_UI z5tH z8%Y)cWDTyJ^35f41;n^Y^8)C&ccOt(4SEQ4KaUkimjW zvjYY&>8XA}3LsE}6Hn1}dGPz~YRh3QD+G;$1tSa3SlH8JS@6>&v5<@9S9EDOnhho) zv`_qljG@3W-Ga;s7G;SOSq2S>2a3T65JVWmS8Z$<^eJTC2XzI-#Pf42o^6H-t-B}_i5qC;eJfKq~wA7}eHmt=!qDrzDU40X8!`!QR2CB#NWpucOW zf1~DVEb`|?S-^esoQ@8El#?;$Gco6KJVzR3UfL# zXLo*<$%BYlH=#8i%N2gUFlE<&?n@^HcwL2r;Z@_S7zq5_|;{~r72WBba$yTtbX zjT!69xZi8Tx3@lEKvE3?qg@Nz=a>z(L4}ZTERUvgYWhnaHgbk11wjxfvw&uG6Ffup zhhiMh0UdnZ?xy`04)+Cu&5TFCRrqT-|LG`s;uMkL5srsPq)7siDU89yq^%qZq^Kkc zKO5^zPu&&5pAu!-G^T$stC-nnU-KHCu%W>iDNU9xfbiUa<5Al%(@)`NK5f2D^Uad7 zf&kzG^N0)>M_X5$r0sh~;hjiu9a#FK$bktPR97%?)2O&bByzwof##=MXmK!}8}8v- zy^^u0$P6gXre=Y1zCb#4liPX?&yfrnTPYqKpi1mifoMB5(Xe@CFPLtUdC(XxEjS|r z14UKS$8!@MpJSO)YDCms7a2^2EPEc0CaJ$`J`aV#SFapmwwDonCrkz2kF^f>$ zBf^%Z9UeV|Bmaz?5E3xlEgWKECDu03C;}qc)tHD1ql5XtL&bAu5|qdxR8kMLqT05s z(fK+u1ACvHvesx?DBqDy-S=`zHi?vk1LN;VFFIX@)~oNVMCUHrkPo>U`k5)Ne#4s2 zij|&Olj5>Gf`a;Gg^y_rK4Hrv{^Y=M?JJj?!mPyoTxpv#lL~N9LTx6bU2$w85WkFG zEr&(kdLkQpE6Sv341O!MGK}BP?L%5K>QDKcV>vvD4);FNcN9ffbig2=t| zzzb+>3qeP&m-P;7q{gH$J?A!dNsgvu}0# zch_hnCQl((Ej(XDlxFrRL23D*%}#_GP4g&O!v?~&MO~~0xd1^k?4Bj+s$}QRc-}E% z9f|HTenkqlP%yDf`i^Fo4n7jjO^k>Od*y7mJ3?F=HBBzEug5|nDpR?~T`)unm{iVA zFS?l9r-naWI{n^#vLHn#3&LOw2*AUfHQS?IyVRSawbx1>Re?^C;F7Rx(TvW>%`8gD z+%xS&T6(Fgcs3Wn3`8*1)A6}6;>-a0X-!KWv^_zPcuT;F*rjaQL_zQ>J6j4aA{06A zS<=uYJic{^|TWm>gBk}QUXS~q=aKev6!zE@=I;t43#{vWb)o;0`iaA^`5dy%}vdY&q-Xc+mc%=#$_!L$rJW^kq838JDL1Jk%2=3GpeYy#vcj~~7hjL_dvi}^>DX-ACBA|(Q;?BxJX~4dqMA9S z$yR92l*?8HWyOca^1YQx;SAX|&^6J%udV+R$}?e+89Y|gu%&uq2=;jQl4iNAx0}@L zJYj#T;lR3vxu3rrtMqJKH@)NqTwbOoj#Hb9YVD1r9a{8pWf(wAqZi+QgRSfPZ&k*# z@WxhVLoJfu;_^4vH)!3MVA#N^G=kyy%q|&(`KT(*tft4txUX=gfajV zx(7z}@9G}1T$?bk_mKiN2$oOH59 zm8DB5s!1U;&2GCLr<&r%Za6fbEt3J^S>GpN+YaeQ>6%kBIMen0Xw~_0Fx0F z=Kzi(AlCQ-GYugS6dF*bC)Y~r#b#&4gyRW$$`P#Wx9UriKHUX>^6Ir(-v5~|sac%@ z?>qdRi|^I_o`C>gfmbh+0uZfO^F+*)#&E5nmY7Aw)`b# zIJ1%$JM5+@e)G(W#ib%9fFqZ{M?CZ~FgUZj_KR?zA`2^#01U|6ZS<^EHsJ4b%Q8wy zN^43 zmHjpESsw}xq5>Y1jY+enx;CAbhDKkz;6OKTVDQ(S+Hou5_mvO`oWscJxfC?rh3120 z4V_#{DIhT#LJv=U>>XbJPruTCPv_Eq-N)|ywQhQ!so7cJ*9)ZHGgHv#VmR$S>D))=^ z1nZilJ)Z)JB$>O!nGi_H#MHB5BqmsbpqJdKB2tXXg#evWIsMk}KihKyuZvrf?(HrF z0AuUeVQ;w8JIBiG^Bq#Jz@QY2{?#dBHUS}*D-3~l_muFicN-a*Pkra0muoK|KkrAE_+WG|;N6sG08*8o9R8qqj0TCs{;VHS`!toK9 zuWKWbL&hgvHMwIw&Q%|!J$F0!dv zqJ_Zel~D$q*noC<&XuZU(3(A})xBnlHl|S9t2kq(A6Rh#j5L6ascFMVE}z`svF|@% zFUjbu zox_NPKuS&}+j7@c{Qe#8zT_?v!{%-B-K8wmk)xty8D?C&Q=TIOM%SY*0|75OlvI`k z64FB1mZZjjrChJj)+rZuj7T57~a6IcVhTsRLiq1)N-dVOa?vo%t{c$CTs1Gg1pyMn7rw&rUK{e6X$fi70&0gAYcYT~e;j-$c zivYj_7)G0p{?8`2%tZMS(K2xsdMV{jgry|mg&|R;I)K7Z%%pg80x&IS=75yD7fWR? z|7Uu}>m5iOQd6k{jgUr;wjU|+^7n13;5z4m z5KGYZDS|42uW=A6VlEa8cuQWK%K^a!k*x>iZReLRA#I4nIkdBxv(9kW}-*sT3{1Su5>pEpbTWKKobO@h=in&)8Dr0 z61WDUoobdKU`u7BCNjbb6eyQiZVnxX2F#X?Qz1``!RZvgK5vv14SxQA8ff2@t zBxXK>4V*z#7^k|V!U-8w0R`#`Xk};+R1$*Q5-iRNA=$aKw9My{6rhZ?dpxtQ`FU|# z2!`~cCWI2~^rGoWzl7_pci25X>R=UfBe^a33dB=I*uUAKWA8F}EQ(aXzg{*IFcMS2 z-RN^T{dl(appr%BbM(?ho`4X6H3+VnoWY`pGZSFaq9hw0X+Q=rP}Cu!Z-7PeGWu(Z zR=?JHuUTvS9cbT!xOfyPC=%*7H5&qSI5qwV#k>60U+7vY*^d_*M8BFIqXnAiEf%#( zrMo(%0}rnE?zv-j?doYz=QTub9KL1H$Jc?p5V(zC5?e z`&4rE4>;a@kEg4yMb3_WdUamjqHhCVkygV!&FVT&C*6_DGNTiQ^2>+NQpp!@CjRxx z?K(?R_Po>&nvX|iZ5D)YDIdJ=hmNKlJv4K&UKTM=;^vbhS%%hIu}%kzWUL;$sD*FY zVPP}sPJxYLe!q&kS8J=4S5KZ+229LGXz^-9f>|>`4qQ;ztRC4cE<(}-dC!`8-pqtY zFvm<|?(sDmn!6hh*8glGVQIdD88_83A)6;3Qo4>UcDGDpJSLwF4bS@~#UeFMu;P+RSMk7vOVRunI2qpqQkcK4o2gyn~7-K&BTq}GV_P<(C<++OSZX`Bt7OWBP8_VBTbETL& zjJPQzBTlOmUg(s9^ExT3(*)yrY!7J-Rs=i%75>FtG36?TqPS_ zHcOl^`E>nhANTY3dR&>LI=r{)lOj5Qqsx4m@JXEaa+4hDKc!~s4W?TzM#rS=T3~qApaB@nvJ#7+wHT3*$gO6bWU31y!Rp!e^nugT8 zMN5CUL6IC7)y{MW-MyfPOT#wuU(yX1uX3rf+;mUpx|C)ip#zc;nDis0JqD9ANy}ha z(-t=TxNt3?+ELehvg%U1>blV@<6+%DU+3o}tts_b&byz1UYe`Swji2T20WxwJEl-7 zeM)y`(#?K88xmCZ_muy?79CkNSwD?(gTSS>LD*4E*Eq!9L%2Mw$^7Oe(pEXpyf>}| z#UjoTPqN~w!B(y;#VQm)PkDFVCD#l-%b6u(;nx}0Zr>ZJU+p5dq>@=}IT*4Md9y#$ zsXpgtFwmxa%u1}odJCsgSZOY+=JO(YXp^yE%4^ZR`kUXgYh_Vf-HXV7!O+D0ye7`2 z{wZJ?3D)8%oSAk>In!@ykVbluB*!^ki?3JbogZ_%bKbH)F%&3S$g?6heE3qqf$iOuW zk!^y$?JJeT-dwT@h(^7tuP1k;)UH*KkSr!CkS#2=xSABq5V7}V;qNT0vkyDKXcmpb z|6xqte;18V6~u}zqbZKf_e*R%=0Hq$`-%zDz3S#5jUAKJ&*1ZJF>MLJGHbl?B0Px# z4x0iA-uL?Z&->)@4sT1a>F(&36rhMIO!4y9^)`vL&O9cHV!~#!G_Gmp-q-w0?P*b> z5u7|9$&}W8u)CTmq2hP9_?};jzrEB>o;I5X$H%^{7g}$7FXD!rwZO*BWq9N|sO%J* zZUqBEF=SpT7d4 z&nBgAvc2Z-s>uQ1Fa)1Nlj^^Iq3sbm7<8L@8wo%_AltrMkU$0wdn=}oqW~8li$?zI z+v|08Y~v-=YVRjqb0*GS!b1HCT7c%em_>Vo0C{CxZgD|3L<@V#&HxY+U@;^u+W`x5 f7#G>l6pU8vKpOAmz~fTI|KjdQrwS4UWBqf0E-S*C literal 5273 zcmV;K6lUu}T4*^jL0KkKS;;bGvH%rp|NsC0|NsC0|NsC0|NsC0|NsC0fB*mg|NsC0 z|NsC0|Nr0;pEW>G003#WqR`QH?b-kZm}tH2>qoQL9Th+TP9H!$+uj-^KqE#(c^W23 zr-_p&-jwvorqp2?o~CF`wGrxLMA`?UCTTR<34=pPu?X6kG|8qK4^gR^CzQnX1}B6Q zOh=U3OpnxzG|-xQn?&*vk5m$oG)(|9H8C{QHia_?(lpaXOwp-^nvDiA6HI{7lR#t` zng&luX^EiF(V#K_WN6T6X`lcA8UO*H8Z-a~q{5ph6!w(bYH5u?+JH1@XfkL300Yzp zni>J122C0OXaE2JG6sME13&-(Gynhq001Is6$S{z$kPbGA)%uQrkH~ezyxWC!Ujz; z44E1*hC!1E$jOAkF)=j2CMKCM6A;O!1i+aAk&`1J(V##80003n00E!?CIA2c00x3E z044wc00E!?000000000003%EUi3kuAO-vI^Ln3;off{L}MuDNKewrlo%}-Qqs%Xrk zPc=i@pbb4tn<&C*=^77E)6ox5Y2^mQLqI)4)bwgJ0NRZ_qtqG!hy&CbG-rHI9Ri1L zgJ=dAICoCF zRRjS$@Y5pVpq-^uvZ3e_W)d+C{TrAhWJ=9B{Zd9uTHFi$T@T3d{-{w6w%be^V{NwI zmO_O9L^lxTs8(>oa#t_OFziFtVULHuQk4?+qtu)U9^r-@tVP0wacN&QP2)E+-aC6*$zNo2yV6qRoS1GyJ38W6p7RbI6%?(- z;shvS$Qk6NuFr9A)o|abyyIC(f~P!!hoF^R9>1v<*)#-Lr4f> zK?4BzOiHC$S0w9%Q7c+OU=0>~sO#93%?XKI?TZI=OehBJ3=VkPtjXfX+TB<5J=vQ#jW&xQt zDobGU@Du|e(bsc^r9gZ#2rx!;@DR%s8D-?0X_@~{6$vdF!rP^<#|fkv&$s+o3X)#A zfV?KZ^Tpr#!#F-%)Yaqcn=vvD5!4;Y=XVRWEH4Ti8D{nOy{Ej-!6+nSmVv?Od&sos z*Yb)3Xi5F@erZ`6<@OKSajByi^30Y6utBWT0! zcm77-GKOm}JIxEZ+AVEB@;4TW#KDY~HIIrwC>KZ88f1wynIlkSIamkGu*!_GoN@Nd zh$R&T_$DIQh zoq`zeSyLF|w|e)?Kr9AnP8<4w{G~LOG?l121!Pcw7!XEQbu+d}f`#)SfFXS`o{vTh z$X?8?Z2XIc-|;y-_1F`y!cP@?c~|t{P$ewX1cnMnEqk1Oz0# zd{zfJ*WbBxBd}rMivezdBq9((l*h1pb&|K#{V75HS7=%7Q(qUV>p8atF#UJEkfw$ngF5FS4H^LcLk_c%K&?-2P~c>7 zM3UGRBE^oQYOhiv#b1}JGW#;)C8{nuzW#Pigb&UNv8NG)h7qS!E z06>W4t;6?}0Q~f-OKP@zGgKM{f_lW3QdcNs0P zcN=uQBBmVf?7zjyQO=&4K)Gx})f!1LiH#&LR3;!tis;ZJn+o$M$SSFH zGl-RSNl8gnNzQakWh#|XGc!S?duEq>a(w?hthL}9zhHyMO$eaU2x5|;)FhB|!GK;( zPZ|gnVw8lzwvv#=4JZeXbcq&HmpMwmS2)ehJq|6l9vAt1Zx85vu75Ko6I+kfhrynJ zp0Y_BMu2PstI@@?e<}Diu8;zD!DQ$u-Zl=<*gQ^%e;*u2cu}oLQ zUo_80&G7kG@L|08S?4Q`8VamA*|M6=oEf*)fnAJDu)qzjRaaOlV>8O-X{U&bXBQWv z5<;Mk6QdzX<$j%A#V(EJCaFa9)=Dj0*5!FdjerU=e)cG`;H; zwn8tw4*qU!a#2&NvMQZA9E#l-6bGuN8!VREbDh`~UJkw9d;7K{fcE%>%2+Ivq-H_* z?8tsxGg{f2N76Wrkpg_ulRJLZX8{GSha(vW3GZD?Fdzh;D~>7opa%p2CQyL_oOH&4 z3M3H$pRwer8?eH7FDR+COd4RqF*yS&JSwPLh6Tk>+xKOXAUP#rX!zHn!k@eJOg7s2 zKqrt$)#6PS1c!SmdH8cLPiox+aAja735vYtBFo2;Bp6dRkq~|%a#7VSCI~}Av13+- z?`E>kC~e@lC4Lc5ab#d>4B|X!{JNZtsuFhjLUP5@jS72|ph&YG1Z@#E*~C&hT2+KK zw20z{lu*QP^4_qxE-fMInZC77^qE~vO!vWd01UZ^I*IkWo88=!f9?nnfds+M@$tW3 zd7VsTIhmV&L(p<}9kSTiY-tt+%tC&FkzX)usN(0}f%dwej0V+!rlFNo&qr z*j{jWQQH6riDP;ySQt|cXLv+NVU4@p3}uy2XOv}UwOw46l}QE<_~!b3^W@w8x%(~m z=b^P!0i>!zvbJG76Ioqk0Xw;0#kefN{d~RHOUmI_Uh|}ciuc74VoF{=N9PA*9S)UY z7kY3+DSiu{_&Av9d$4#Xg^R%7S9NUoRJ5IEZC|?_hfrbcX+_+5b~T6;uUVXLk8or)#uOgVo1D zpn#cH;%N#UlYtJSqh`sVL`Wh6B^K~0K*A6Nx!p;TkPl<{~wr7lKW2T_TM1jwTi{(cJPFc@L&P)7zZ+-KnGx4Gli*$ zTE~oei@jd_Eziv(i^aZDn;W4rK}K3wEzpV;;|0acn5Z*YE(%zI#!Vu!U6cT0z}t2nX!wkb}o`D()4$wfeBg5;e)@j{BR71L!_)K zDk5tQ{vg1qV78uQQ^g{~xun8b^dO2q2HQBtXHV(w;}BsqIC!BirSmc3&GQmsEkV#3*Zm zeU|!DTY(ZNau^M)<8c<|YUCwh!#b8^>W=(4H;iK%V-EKU#3J#?4Bh@a`EK&u^!9-Z z2)n@A1`X&~NJdi1{Rd0;Niwn$$fgpcI#Qc*ptR5`kg6bx#bnY@at+*)DNr-r!Zh)3 zcn1Isgwu1=%ruvl#9UDvCR$5%D~wzd4J{a97Je&l3@QzDk9Am2M(u#fWwn2AQ>2Xp zr?ngI+JVu0rMkJjj>&vf!G6$L zk@CQ3nMaIK2X*OxqG?zQS5(@Ug8;XD#2kNN=1?*N70)4Q0y8KC7p5sp%+-Q{BoURwR#$EzVw+SB;Bj<);_#~qI_bVJq+vj$gb<%Gr#Y`GKW{eQdCkEl5 z$uh3YEmgUix@4SfR;$d)Puk`)%7}r~QZW1%N-+b0)c`6mMn(v%0hk7AVk&g$c!1%# zU<5Bab1i<)+Ai30A}ba1qj6YFn?@sp&}xv3ZLO!Xys;BBPEr`<@MU=A`BLg9Ae$Nq zWGdW$sQPt^EaMPQ?s2!>_U6AkLsc%zS5fXgJnZ58YW5KE-M#+#n3z-kW5zj@7!3`K zsz+1M10Uu*haqWZiUB9eaQF$x=-4GNJ`#Wc3z%O5wX9DTIFca3PiWfX5fcfv?)7<- z04zFVD(k`l5IXu!AJx2MJPv4jb6RV6^>%eEQUkR;HQnyMb4N6R3$LrGucWZ7^sxFx z|G#o6-PB%G{=^6<>nMM<(A=%RPyFb=OKhp)*}d-auv|@F9Y}=! zkUZdJ9_XsQ7+K|AZH!G0KIU(wtl8UKfK*@d-6r12pL+|$@c8Wo@!GrD|92S66!Q+R z&Qq0z=NpcPm+rXNL*+B{4wr}nnx@3gCR~hIA^?dKb~edn>fyOuw7z8{OSYMQqlc2z z=rk(dmw1$};bEUuD*UccA}Z!;BEa5|Fkl&(l)kp#tF69Im=X;+=@;qpzdiuM3Qkh8 zFF7>tv!kVns=C$nxYC?=**%_7s%`KQaolHKKE%!+wE=MQ9}lwb&EtXFf_&!uD@LiL zizzByqOCy>#Ei`05?Q{n=rVW;NV(@*?7B$IpiTntGgwmO5g@q|CU<1Fr zW0{1fo{um-Vnoi}q%ojFRgR(!tfFnX9Rqts8qI}X!vik&ns#dznCXuh_H)u!h=K%8 zC_n>T>Mq~_3uDrzSg__Y)oX|lO+$@;k;lmgpnC&HWPnSIiZFu9qC}7^0ML-Wv4DmC fVbE>BQ)1?^0CT&~Q{HR;C)m4^DZ+$?Ns}#*E-9)n From f3ce47f8918527e82d48d716b5a5f44a1608b6f6 Mon Sep 17 00:00:00 2001 From: Bailey Berro Date: Mon, 25 Feb 2019 18:22:17 -0800 Subject: [PATCH 004/624] update_engine: Fix bug in rollback tests This change fixes a bug identified in the asan tests where the value of rollback_allowed_milestones int he fake request_params was not intialized properly in a testcase, causing a failure. BUG=chromium:935140 TEST=update engine unittests w/ asan profile Change-Id: Iabf5ba126f6155caa679f43b29472c353e6d308f Reviewed-on: https://chromium-review.googlesource.com/1488192 Commit-Ready: ChromeOS CL Exonerator Bot Tested-by: Bailey Berro Reviewed-by: Amin Hassani Reviewed-by: Zentaro Kavanagh --- omaha_request_action_unittest.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/omaha_request_action_unittest.cc b/omaha_request_action_unittest.cc index 66fc6fe9..a642b5a8 100644 --- a/omaha_request_action_unittest.cc +++ b/omaha_request_action_unittest.cc @@ -3187,6 +3187,7 @@ TEST_F(OmahaRequestActionTest, PastRollbackVersionsNoEntries) { TEST_F(OmahaRequestActionTest, PastRollbackVersionsValidEntries) { OmahaResponse response; + request_params_.set_rollback_allowed_milestones(4); fake_update_response_.rollback = true; fake_update_response_.rollback_allowed_milestones = 4; fake_update_response_.rollback_firmware_version = "4.3"; From 7c41bbfa118140f0a48335bea367708ca071d2ae Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Wed, 13 Feb 2019 10:50:31 -0800 Subject: [PATCH 005/624] update_engine: Change Omaha request updater version format - Remove the "version" attribute of - Change the value of the "updaterversion" attribute of to be the pure version number (e.g. change "ChromeOSUpdateEngine-1.0.0.0" to "1.0.0.0") - Add the "updater" attribute to , the value of which is the name of the update client (e.g. "ChromeOSUpdateEngine") BUG=chromium:928798 TEST=unittests TEST=cros flashed and the omaha request was: Change-Id: I9b1d889fabb539ee64ebe2e4417592a6355dd940 Reviewed-on: https://chromium-review.googlesource.com/1470839 Commit-Ready: ChromeOS CL Exonerator Bot Tested-by: Amin Hassani Reviewed-by: Amin Hassani --- omaha_request_action.cc | 23 ++++++++--------------- 1 file changed, 8 insertions(+), 15 deletions(-) diff --git a/omaha_request_action.cc b/omaha_request_action.cc index f1678eef..deb294a1 100644 --- a/omaha_request_action.cc +++ b/omaha_request_action.cc @@ -460,22 +460,15 @@ string GetRequestXml(const OmahaEvent* event, system_state); } - string install_source = base::StringPrintf( - "installsource=\"%s\" ", - (params->interactive() ? "ondemandupdate" : "scheduler")); - - string updater_version = XmlEncodeWithDefault( - base::StringPrintf( - "%s-%s", constants::kOmahaUpdaterID, kOmahaUpdaterVersion), - ""); - string request_xml = + string request_xml = base::StringPrintf( "\n" - "\n") + - os_xml + app_xml + "\n"; + "\n%s%s\n", + constants::kOmahaUpdaterID, + kOmahaUpdaterVersion, + params->interactive() ? "ondemandupdate" : "scheduler", + os_xml.c_str(), + app_xml.c_str()); return request_xml; } From 9b66aa677f82c711b661d2f5dde49ff806514c5f Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Thu, 14 Mar 2019 14:13:30 -0700 Subject: [PATCH 006/624] update_engine: Use dlcservice.proto instead of update_engine.proto system_api/dbus/update_engine/update_engine.proto and system_api/dbus/dlcservice/dlcservice.proto are basically the same thing. They providesf a mechanism to transfer information about DLCs from dlcservice to update_engine and vice verse. So having only one copy of it makes it easier to maintain and extend in the future. BUG=chromium:898340 TEST=precq CQ-DEPEND=CL:1524873 Change-Id: Ieacea209d289c1bac86e5daa2392b3e3e6908124 Reviewed-on: https://chromium-review.googlesource.com/1525022 Commit-Ready: Amin Hassani Tested-by: Amin Hassani Reviewed-by: Nicolas Norvez Reviewed-by: Xiaochu Liu --- client_library/client_dbus.cc | 15 ++++++++------- dbus_service.cc | 15 +++++++-------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/client_library/client_dbus.cc b/client_library/client_dbus.cc index 7ca63070..3ffb0886 100644 --- a/client_library/client_dbus.cc +++ b/client_library/client_dbus.cc @@ -19,8 +19,8 @@ #include #include +#include #include -#include #include "update_engine/update_status_utils.h" @@ -57,14 +57,15 @@ bool DBusUpdateEngineClient::AttemptUpdate(const string& in_app_version, nullptr); } -bool DBusUpdateEngineClient::AttemptInstall( - const string& omaha_url, const vector& dlc_module_ids) { +bool DBusUpdateEngineClient::AttemptInstall(const string& omaha_url, + const vector& dlc_ids) { // Convert parameters into protobuf. - chromeos_update_engine::DlcParameters dlc_parameters; + dlcservice::DlcModuleList dlc_parameters; dlc_parameters.set_omaha_url(omaha_url); - for (const auto& dlc_module_id : dlc_module_ids) { - chromeos_update_engine::DlcInfo* dlc_info = dlc_parameters.add_dlc_infos(); - dlc_info->set_dlc_id(dlc_module_id); + for (const auto& dlc_id : dlc_ids) { + dlcservice::DlcModuleInfo* dlc_module_info = + dlc_parameters.add_dlc_module_infos(); + dlc_module_info->set_dlc_id(dlc_id); } string dlc_request; if (dlc_parameters.SerializeToString(&dlc_request)) { diff --git a/dbus_service.cc b/dbus_service.cc index 72960532..2a5662f3 100644 --- a/dbus_service.cc +++ b/dbus_service.cc @@ -19,8 +19,8 @@ #include #include +#include #include -#include #include "update_engine/dbus_connection.h" #include "update_engine/update_status_utils.h" @@ -65,24 +65,23 @@ bool DBusUpdateEngineService::AttemptUpdateWithFlags( bool DBusUpdateEngineService::AttemptInstall(ErrorPtr* error, const string& dlc_request) { // Parse the raw parameters into protobuf. - DlcParameters dlc_parameters; + dlcservice::DlcModuleList dlc_parameters; if (!dlc_parameters.ParseFromString(dlc_request)) { *error = brillo::Error::Create( FROM_HERE, "update_engine", "INTERNAL", "parameters are invalid."); return false; } // Extract fields from the protobuf. - vector dlc_module_ids; - for (const auto& dlc_info : dlc_parameters.dlc_infos()) { - if (dlc_info.dlc_id().empty()) { + vector dlc_ids; + for (const auto& dlc_module_info : dlc_parameters.dlc_module_infos()) { + if (dlc_module_info.dlc_id().empty()) { *error = brillo::Error::Create( FROM_HERE, "update_engine", "INTERNAL", "parameters are invalid."); return false; } - dlc_module_ids.push_back(dlc_info.dlc_id()); + dlc_ids.push_back(dlc_module_info.dlc_id()); } - return common_->AttemptInstall( - error, dlc_parameters.omaha_url(), dlc_module_ids); + return common_->AttemptInstall(error, dlc_parameters.omaha_url(), dlc_ids); } bool DBusUpdateEngineService::AttemptRollback(ErrorPtr* error, From a7add991ca54d16462c189e05deba08615123786 Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Mon, 18 Mar 2019 11:36:48 -0700 Subject: [PATCH 007/624] update_engine: Remove --install and --dlc_module_ids flags --install flag was added to install DLCs, but now dlcservice can actually send D-Bus signal to update_engine for installing DLCs and we already have a dlcservice_util binary that can help with that. So remove these two flags so no one else start using them. These flags are not actively being used anywhere. BUG=none TEST=precq Change-Id: Ifa76c45336581e0dc97a27d6d577692a74853c94 Reviewed-on: https://chromium-review.googlesource.com/1528929 Commit-Ready: Amin Hassani Tested-by: Amin Hassani Reviewed-by: Nicolas Norvez Reviewed-by: Xiaochu Liu --- update_engine_client.cc | 28 +--------------------------- 1 file changed, 1 insertion(+), 27 deletions(-) diff --git a/update_engine_client.cc b/update_engine_client.cc index 7446041c..d1b22678 100644 --- a/update_engine_client.cc +++ b/update_engine_client.cc @@ -321,8 +321,6 @@ int UpdateEngineClient::ProcessFlags() { "Show the previous OS version used before the update reboot."); DEFINE_bool(last_attempt_error, false, "Show the last attempt error."); DEFINE_bool(eol_status, false, "Show the current end-of-life status."); - DEFINE_bool(install, false, "Requests an install."); - DEFINE_string(dlc_module_ids, "", "colon-separated list of DLC IDs."); // Boilerplate init commands. base::CommandLine::Init(argc_, argv_); @@ -507,30 +505,6 @@ int UpdateEngineClient::ProcessFlags() { } } - if (FLAGS_install) { - // Parse DLC module IDs. - vector dlc_module_ids; - if (!FLAGS_dlc_module_ids.empty()) { - dlc_module_ids = base::SplitString(FLAGS_dlc_module_ids, - ":", - base::TRIM_WHITESPACE, - base::SPLIT_WANT_ALL); - } - if (dlc_module_ids.empty()) { - LOG(ERROR) << "dlc_module_ids is empty:" << FLAGS_dlc_module_ids; - return 1; - } - if (!client_->AttemptInstall(FLAGS_omaha_url, dlc_module_ids)) { - LOG(ERROR) << "AttemptInstall failed."; - return 1; - } - return 0; - } else if (!FLAGS_dlc_module_ids.empty()) { - LOG(ERROR) << "dlc_module_ids is not empty while install is not set:" - << FLAGS_dlc_module_ids; - return 1; - } - // Initiate an update check, if necessary. if (do_update_request) { LOG_IF(WARNING, FLAGS_reboot) << "-reboot flag ignored."; @@ -539,7 +513,7 @@ int UpdateEngineClient::ProcessFlags() { app_version = "ForcedUpdate"; LOG(INFO) << "Forcing an update by setting app_version to ForcedUpdate."; } - LOG(INFO) << "Initiating update check and install."; + LOG(INFO) << "Initiating update check."; if (!client_->AttemptUpdate( app_version, FLAGS_omaha_url, FLAGS_interactive)) { LOG(ERROR) << "Error checking for update."; From 6cf8e675a37c3b61cea0d313f8006068a1b3190a Mon Sep 17 00:00:00 2001 From: Xiaochu Liu Date: Thu, 14 Mar 2019 16:15:42 -0700 Subject: [PATCH 008/624] update_payload: deprecate unused flags from paycheck.py Some temporary workaround are placed to hack the parameters to some default value that the rest of the code is happy with. This CL removes them. part_sizes should be default None (it was default [None, None]). when part_sizes is None, part sizes are not checked (same behavior as [None, None]). src_part_paths should be default None (it was default [None, None]). when src_part_paths is None, we set args.assert_type=_TYPE_FULL (same behavior as [None, None]). dst_part_paths (out_dst_part_paths) should be default None (it was default [None, None]). when dst_part_paths (and out_dst_part_paths) is None, dargs is not set (same behavior as [None, None]). out_dst_part_paths is rarely used and we often chose to use dst_part_path. Old flags (deprecated) are removed as well. BUG=chromium:926045 TEST=test_paycheck.sh chromeos_11844.0.0_kevin-arcnext_canary-channel_full_kevin-mp.bin-6f7f58b3c9a1a84ea15ab67f84cd5387.signed chromeos_11844.0.0-11869.0.0_kevin-arcnext_canary-channel_delta_kevin-mp.bin-fc6014025415e0b5e780c0739a3b0461.signed chromeos_11869.0.0_kevin-arcnext_canary-channel_full_kevin-mp.bin-1be744e0723534e02084c762dea316c0.signed Change-Id: Ifdf872ddfa03d5759a8ee4021e296e4dc4571d9c Reviewed-on: https://chromium-review.googlesource.com/1525289 Commit-Ready: Xiaochu Liu Tested-by: Xiaochu Liu Reviewed-by: Nicolas Norvez --- scripts/paycheck.py | 92 +++++++++++++++---------------- scripts/update_payload/checker.py | 4 +- 2 files changed, 45 insertions(+), 51 deletions(-) diff --git a/scripts/paycheck.py b/scripts/paycheck.py index 9d617787..875b00f7 100755 --- a/scripts/paycheck.py +++ b/scripts/paycheck.py @@ -38,6 +38,29 @@ _TYPE_FULL = 'full' _TYPE_DELTA = 'delta' +def CheckApplyPayload(args): + """Whether to check the result after applying the payload. + + Args: + args: Parsed command arguments (the return value of + ArgumentParser.parse_args). + + Returns: + Boolean value whether to check. + """ + return args.dst_part_paths is not None + +def ApplyPayload(args): + """Whether to apply the payload. + + Args: + args: Parsed command arguments (the return value of + ArgumentParser.parse_args). + + Returns: + Boolean value whether to apply the payload. + """ + return CheckApplyPayload(args) or args.out_dst_part_paths is not None def ParseArguments(argv): """Parse and validate command-line arguments. @@ -49,9 +72,9 @@ def ParseArguments(argv): Returns the arguments returned by the argument parser. """ parser = argparse.ArgumentParser( - description=('Applies a Chrome OS update PAYLOAD to src_kern and ' - 'src_root emitting dst_kern and dst_root, respectively. ' - 'src_kern and src_root are only needed for delta payloads. ' + description=('Applies a Chrome OS update PAYLOAD to src_part_paths' + 'emitting dst_part_paths, respectively. ' + 'src_part_paths are only needed for delta payloads. ' 'When no partitions are provided, verifies the payload ' 'integrity.'), epilog=('Note: a payload may verify correctly but fail to apply, and ' @@ -93,13 +116,6 @@ def ParseArguments(argv): check_args.add_argument('-s', '--metadata-size', metavar='NUM', default=0, help='the metadata size to verify with the one in' ' payload') - # TODO(tbrindus): deprecated in favour of --part_sizes - check_args.add_argument('-p', '--root-part-size', metavar='NUM', - default=0, type=int, - help='override rootfs partition size auto-inference') - check_args.add_argument('-P', '--kern-part-size', metavar='NUM', - default=0, type=int, - help='override kernel partition size auto-inference') check_args.add_argument('--part_sizes', metavar='NUM', nargs='+', type=int, help='override partition size auto-inference') @@ -113,21 +129,6 @@ def ParseArguments(argv): help='use the specified bspatch binary') apply_args.add_argument('--puffpatch-path', metavar='FILE', help='use the specified puffpatch binary') - # TODO(tbrindus): deprecated in favour of --dst_part_paths - apply_args.add_argument('--dst_kern', metavar='FILE', - help='destination kernel partition file') - apply_args.add_argument('--dst_root', metavar='FILE', - help='destination root partition file') - # TODO(tbrindus): deprecated in favour of --src_part_paths - apply_args.add_argument('--src_kern', metavar='FILE', - help='source kernel partition file') - apply_args.add_argument('--src_root', metavar='FILE', - help='source root partition file') - # TODO(tbrindus): deprecated in favour of --out_dst_part_paths - apply_args.add_argument('--out_dst_kern', metavar='FILE', - help='created destination kernel partition file') - apply_args.add_argument('--out_dst_root', metavar='FILE', - help='created destination root partition file') apply_args.add_argument('--src_part_paths', metavar='FILE', nargs='+', help='source partitition files') @@ -143,36 +144,28 @@ def ParseArguments(argv): # Parse command-line arguments. args = parser.parse_args(argv) - # TODO(tbrindus): temporary workaround to keep old-style flags from breaking - # without having to handle both types in our code. Remove after flag usage is - # removed from calling scripts. - args.part_names = args.part_names or [common.KERNEL, common.ROOTFS] - args.part_sizes = args.part_sizes or [args.kern_part_size, - args.root_part_size] - args.src_part_paths = args.src_part_paths or [args.src_kern, args.src_root] - args.dst_part_paths = args.dst_part_paths or [args.dst_kern, args.dst_root] - args.out_dst_part_paths = args.out_dst_part_paths or [args.out_dst_kern, - args.out_dst_root] - - # Make sure we don't have new dependencies on old flags by deleting them from - # the namespace here. - for old in ['kern_part_size', 'root_part_size', 'src_kern', 'src_root', - 'dst_kern', 'dst_root', 'out_dst_kern', 'out_dst_root']: - delattr(args, old) - # There are several options that imply --check. args.check = (args.check or args.report or args.assert_type or args.block_size or args.allow_unhashed or args.disabled_tests or args.meta_sig or args.key or - any(args.part_sizes) or args.metadata_size) + args.part_sizes is not None or args.metadata_size) + # Makes sure the following arguments have the same length as |part_names| if + # set. for arg in ['part_sizes', 'src_part_paths', 'dst_part_paths', 'out_dst_part_paths']: + if getattr(args, arg) is None: + # Parameter is not set. + continue if len(args.part_names) != len(getattr(args, arg, [])): parser.error('partitions in --%s do not match --part_names' % arg) - if all(args.dst_part_paths) or all(args.out_dst_part_paths): - if all(args.src_part_paths): + def _IsSrcPartPathsProvided(args): + return args.src_part_paths is not None + + # Makes sure parameters are coherent with payload type. + if ApplyPayload(args): + if _IsSrcPartPathsProvided(args): if args.assert_type == _TYPE_FULL: parser.error('%s payload does not accept source partition arguments' % _TYPE_FULL) @@ -230,7 +223,8 @@ def main(argv): report_file = open(args.report, 'w') do_close_report_file = True - part_sizes = dict(zip(args.part_names, args.part_sizes)) + part_sizes = (args.part_sizes and + dict(zip(args.part_names, args.part_sizes))) metadata_sig_file = args.meta_sig and open(args.meta_sig) payload.Check( pubkey_file_name=args.key, @@ -249,7 +243,7 @@ def main(argv): report_file.close() # Apply payload. - if all(args.dst_part_paths) or all(args.out_dst_part_paths): + if ApplyPayload(args): dargs = {'bsdiff_in_place': not args.extract_bsdiff} if args.bspatch_path: dargs['bspatch_path'] = args.bspatch_path @@ -260,7 +254,7 @@ def main(argv): out_dst_parts = {} file_handles = [] - if all(args.out_dst_part_paths): + if args.out_dst_part_paths is not None: for name, path in zip(args.part_names, args.out_dst_part_paths): handle = open(path, 'w+') file_handles.append(handle) @@ -275,7 +269,7 @@ def main(argv): # If destination kernel and rootfs partitions are not given, then this # just becomes an apply operation with no check. - if all(args.dst_part_paths): + if CheckApplyPayload(args): # Prior to comparing, add the unused space past the filesystem # boundary in the new target partitions to become the same size as # the given partitions. This will truncate to larger size. diff --git a/scripts/update_payload/checker.py b/scripts/update_payload/checker.py index 6d17fbe5..e4fec2d9 100644 --- a/scripts/update_payload/checker.py +++ b/scripts/update_payload/checker.py @@ -609,7 +609,7 @@ def _CheckManifest(self, report, part_sizes=None): """ self.major_version = self.payload.header.version - part_sizes = collections.defaultdict(int, part_sizes) + part_sizes = part_sizes or collections.defaultdict(int) manifest = self.payload.manifest report.AddSection('manifest') @@ -1372,7 +1372,7 @@ def Run(self, pubkey_file_name=None, metadata_sig_file=None, metadata_size=0, new_fs_usable_size = self.new_fs_sizes[part] old_fs_usable_size = self.old_fs_sizes[part] - if part_sizes.get(part, None): + if part_sizes is not None and part_sizes.get(part, None): new_fs_usable_size = old_fs_usable_size = part_sizes[part] # Infer the usable partition size when validating rootfs operations: # - If rootfs partition size was provided, use that. From 73733a0bd4162229c194dbd365b9d4c4e768ec2c Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Mon, 18 Mar 2019 14:20:46 -0700 Subject: [PATCH 009/624] update_engine: Add support for DLC packages. The update_engine can handle downloading multipe apckages per an App ID. Currently each DLC is tied to an AppID and currently we have only one image per DLC. This basically restricts our ability to support multiple images per DLC in the future and if not fixed now, it would be near impossible to add it in the future. Currently, in order to find the location of a DLC module, the location (dlc-id) is inferred from the paritition name structured like dlc_. In order to handled multiple packages for each DLC, we can add another argument dlc-package in the partition name which allows us to identify different packages. The new format for the partition name is as follows: dlc// BUG=chromium:908994 TEST=unittest CQ-DEPEND=CL:1532852, CL:1532770, CL:1532851, CL:1531833, CL:1531834 Change-Id: Ie9d03e23b5a44a963ab9a088e66f3d6bbbb9d664 Reviewed-on: https://chromium-review.googlesource.com/1532771 Commit-Ready: ChromeOS CL Exonerator Bot Tested-by: Amin Hassani Reviewed-by: Xiaochu Liu --- boot_control_chromeos.cc | 44 ++++++++++++++++++++++++------- boot_control_chromeos.h | 8 ++++++ boot_control_chromeos_unittest.cc | 20 ++++++++++++++ 3 files changed, 62 insertions(+), 10 deletions(-) diff --git a/boot_control_chromeos.cc b/boot_control_chromeos.cc index b390f61e..3f1eac44 100644 --- a/boot_control_chromeos.cc +++ b/boot_control_chromeos.cc @@ -19,10 +19,12 @@ #include #include #include +#include #include #include #include +#include #include #include #include @@ -36,6 +38,7 @@ extern "C" { #include "update_engine/common/utils.h" using std::string; +using std::vector; namespace { @@ -44,7 +47,7 @@ const char* kChromeOSPartitionNameRoot = "root"; const char* kAndroidPartitionNameKernel = "boot"; const char* kAndroidPartitionNameRoot = "system"; -const char kPartitionNamePrefixDlc[] = "dlc_"; +const char kPartitionNamePrefixDlc[] = "dlc"; const char kPartitionNameDlcA[] = "dlc_a"; const char kPartitionNameDlcB[] = "dlc_b"; const char kPartitionNameDlcImage[] = "dlc.img"; @@ -145,6 +148,31 @@ BootControlInterface::Slot BootControlChromeOS::GetCurrentSlot() const { return current_slot_; } +bool BootControlChromeOS::ParseDlcPartitionName( + const std::string partition_name, + std::string* dlc_id, + std::string* dlc_package) const { + CHECK_NE(dlc_id, nullptr); + CHECK_NE(dlc_package, nullptr); + + vector tokens = base::SplitString( + partition_name, "/", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL); + if (tokens.size() != 3 || tokens[0] != kPartitionNamePrefixDlc) { + LOG(ERROR) << "DLC partition name (" << partition_name + << ") is not well formatted."; + return false; + } + if (tokens[1].empty() || tokens[2].empty()) { + LOG(ERROR) << " partition name does not contain valid DLC ID (" << tokens[1] + << ") or package (" << tokens[2] << ")"; + return false; + } + + *dlc_id = tokens[1]; + *dlc_package = tokens[2]; + return true; +} + bool BootControlChromeOS::GetPartitionDevice(const string& partition_name, unsigned int slot, string* device) const { @@ -152,17 +180,13 @@ bool BootControlChromeOS::GetPartitionDevice(const string& partition_name, if (base::StartsWith(partition_name, kPartitionNamePrefixDlc, base::CompareCase::SENSITIVE)) { - // Extract DLC module ID from partition_name (DLC module ID is the string - // after |kPartitionNamePrefixDlc| in partition_name). - const auto dlc_module_id = - partition_name.substr(strlen(kPartitionNamePrefixDlc)); - if (dlc_module_id.empty()) { - LOG(ERROR) << " partition name does not contain DLC module ID:" - << partition_name; + string dlc_id, dlc_package; + if (!ParseDlcPartitionName(partition_name, &dlc_id, &dlc_package)) return false; - } + *device = base::FilePath(imageloader::kDlcImageRootpath) - .Append(dlc_module_id) + .Append(dlc_id) + .Append(dlc_package) .Append(slot == 0 ? kPartitionNameDlcA : kPartitionNameDlcB) .Append(kPartitionNameDlcImage) .value(); diff --git a/boot_control_chromeos.h b/boot_control_chromeos.h index f3682e9f..109197fe 100644 --- a/boot_control_chromeos.h +++ b/boot_control_chromeos.h @@ -59,6 +59,7 @@ class BootControlChromeOS : public BootControlInterface { friend class BootControlChromeOSTest; FRIEND_TEST(BootControlChromeOSTest, SysfsBlockDeviceTest); FRIEND_TEST(BootControlChromeOSTest, GetPartitionNumberTest); + FRIEND_TEST(BootControlChromeOSTest, ParseDlcPartitionNameTest); // Returns the sysfs block device for a root block device. For example, // SysfsBlockDevice("/dev/sda") returns "/sys/block/sda". Returns an empty @@ -74,6 +75,13 @@ class BootControlChromeOS : public BootControlInterface { int GetPartitionNumber(const std::string partition_name, BootControlInterface::Slot slot) const; + // Extracts DLC module ID and package ID from partition name. The structure of + // the partition name is dlc//. For example: + // dlc/dummy-dlc/dummy-package + bool ParseDlcPartitionName(const std::string partition_name, + std::string* dlc_id, + std::string* dlc_package) const; + // Cached values for GetNumSlots() and GetCurrentSlot(). BootControlInterface::Slot num_slots_{1}; BootControlInterface::Slot current_slot_{BootControlInterface::kInvalidSlot}; diff --git a/boot_control_chromeos_unittest.cc b/boot_control_chromeos_unittest.cc index 6a600093..1c40dcec 100644 --- a/boot_control_chromeos_unittest.cc +++ b/boot_control_chromeos_unittest.cc @@ -18,6 +18,8 @@ #include +using std::string; + namespace chromeos_update_engine { class BootControlChromeOSTest : public ::testing::Test { @@ -67,4 +69,22 @@ TEST_F(BootControlChromeOSTest, GetPartitionNumberTest) { EXPECT_EQ(-1, bootctl_.GetPartitionNumber("A little panda", 0)); } +TEST_F(BootControlChromeOSTest, ParseDlcPartitionNameTest) { + string id, package; + + EXPECT_TRUE(bootctl_.ParseDlcPartitionName("dlc/id/package", &id, &package)); + EXPECT_EQ(id, "id"); + EXPECT_EQ(package, "package"); + + EXPECT_FALSE( + bootctl_.ParseDlcPartitionName("dlc-foo/id/package", &id, &package)); + EXPECT_FALSE( + bootctl_.ParseDlcPartitionName("dlc-foo/id/package/", &id, &package)); + EXPECT_FALSE(bootctl_.ParseDlcPartitionName("dlc/id", &id, &package)); + EXPECT_FALSE(bootctl_.ParseDlcPartitionName("dlc/id/", &id, &package)); + EXPECT_FALSE(bootctl_.ParseDlcPartitionName("dlc//package", &id, &package)); + EXPECT_FALSE(bootctl_.ParseDlcPartitionName("dlc", &id, &package)); + EXPECT_FALSE(bootctl_.ParseDlcPartitionName("foo", &id, &package)); +} + } // namespace chromeos_update_engine From 3dbe64e9f2e3b1ef9b3eb2b1e7a42e4cb4af1b89 Mon Sep 17 00:00:00 2001 From: xunchang Date: Mon, 8 Apr 2019 23:02:28 -0700 Subject: [PATCH 010/624] Support signature size of 512 bytes The signature size will be 512 bytes for the payload signed with 4096 bits RSA key. Bug: 129163830 Test: generate and verify an OTA Change-Id: I18710218e4a3a030e257c594f416831cbfa8c041 --- payload_generator/generate_delta_main.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/payload_generator/generate_delta_main.cc b/payload_generator/generate_delta_main.cc index de0a0918..3cb891f0 100644 --- a/payload_generator/generate_delta_main.cc +++ b/payload_generator/generate_delta_main.cc @@ -63,8 +63,8 @@ void ParseSignatureSizes(const string& signature_sizes_flag, bool parsing_successful = base::StringToInt(str, &size); LOG_IF(FATAL, !parsing_successful) << "Invalid signature size: " << str; - LOG_IF(FATAL, size != (2048 / 8)) - << "Only signature sizes of 256 bytes are supported."; + LOG_IF(FATAL, size != 256 && size != 512) + << "Only signature sizes of 256 or 512 bytes are supported."; signature_sizes->push_back(size); } From 1e1c86cd640201795483b641caa8d98ecad04598 Mon Sep 17 00:00:00 2001 From: Tao Bao Date: Thu, 18 Apr 2019 10:48:32 -0700 Subject: [PATCH 011/624] update_engine requires update_engine_payload_key. This ensures update-payload-key.pub.pem is installed whenever update_engine is added to PRODUCT_PACKAGES. Bug: 130516531 Test: Build aosp_arm64-userdebug w/o setting AB_OTA_UPDATER. Check that /system/etc/update_engine/update-payload-key.pub.pem is available. Test: Build aosp_taimen-userdebug. Check that /system/etc/update_engine/update-payload-key.pub.pem is available in the built system and recovery images. Change-Id: Iea282025f2ba1dd3c46c764c13f768b84553b59e --- Android.bp | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/Android.bp b/Android.bp index 8b1730b9..ce43bea5 100644 --- a/Android.bp +++ b/Android.bp @@ -293,7 +293,10 @@ cc_binary { ], static_libs: ["libupdate_engine_android"], - required: ["cacerts_google"], + required: [ + "cacerts_google", + "update_engine_payload_key", + ], srcs: ["main.cc"], init_rc: ["update_engine.rc"], @@ -366,7 +369,10 @@ cc_binary { }, }, - required: ["android.hardware.boot@1.0-impl-wrapper.recovery"], + required: [ + "android.hardware.boot@1.0-impl-wrapper.recovery", + "update_engine_payload_key.recovery", + ], } // libupdate_engine_client (type: shared_library) From 7fca28632aaac41c8a2f2f29b519ac8a7387e65c Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Thu, 28 Mar 2019 16:09:22 -0700 Subject: [PATCH 012/624] update_engine: Move Omaha xml request generation into its own file omaha_request_action.cc is becoming large and unmanagable. This CL moves the code related to the XML request building process into its own file so it can be managed properly. In the future we can clean it up and use more proper XML builders like tinyxml2. There is no semantic change in this. It just moves that part of the code into another file. BUG=none TEST=unittest Change-Id: If774d86f6b29dd17963bec94bb6e91e2f4109a12 Reviewed-on: https://chromium-review.googlesource.com/1544892 Commit-Ready: ChromeOS CL Exonerator Bot Tested-by: Amin Hassani Reviewed-by: Sen Jiang --- common/constants.cc | 2 + common/constants.h | 2 + omaha_request_action.cc | 406 +------------------------ omaha_request_action.h | 52 +--- omaha_request_action_unittest.cc | 22 +- omaha_request_builder_xml.cc | 413 ++++++++++++++++++++++++++ omaha_request_builder_xml.h | 161 ++++++++++ omaha_request_builder_xml_unittest.cc | 50 ++++ update_attempter.h | 1 + update_engine.gyp | 4 +- 10 files changed, 637 insertions(+), 476 deletions(-) create mode 100644 omaha_request_builder_xml.cc create mode 100644 omaha_request_builder_xml.h create mode 100644 omaha_request_builder_xml_unittest.cc diff --git a/common/constants.cc b/common/constants.cc index 310f1b2b..5ab96b0f 100644 --- a/common/constants.cc +++ b/common/constants.cc @@ -124,4 +124,6 @@ const char kPayloadPropertySwitchSlotOnReboot[] = "SWITCH_SLOT_ON_REBOOT"; // The default is 1 (always run post install). const char kPayloadPropertyRunPostInstall[] = "RUN_POST_INSTALL"; +const char kOmahaUpdaterVersion[] = "0.1.0.0"; + } // namespace chromeos_update_engine diff --git a/common/constants.h b/common/constants.h index d5a8ae35..9b4623f5 100644 --- a/common/constants.h +++ b/common/constants.h @@ -108,6 +108,8 @@ extern const char kPayloadPropertyNetworkId[]; extern const char kPayloadPropertySwitchSlotOnReboot[]; extern const char kPayloadPropertyRunPostInstall[]; +extern const char kOmahaUpdaterVersion[]; + // A download source is any combination of protocol and server (that's of // interest to us when looking at UMA metrics) using which we may download // the payload. diff --git a/omaha_request_action.cc b/omaha_request_action.cc index deb294a1..5b69ec87 100644 --- a/omaha_request_action.cc +++ b/omaha_request_action.cc @@ -48,6 +48,7 @@ #include "update_engine/connection_manager_interface.h" #include "update_engine/metrics_reporter_interface.h" #include "update_engine/metrics_utils.h" +#include "update_engine/omaha_request_builder_xml.h" #include "update_engine/omaha_request_params.h" #include "update_engine/p2p_manager.h" #include "update_engine/payload_state_interface.h" @@ -106,8 +107,6 @@ constexpr char kAttrStatus[] = "status"; constexpr char kValPostInstall[] = "postinstall"; constexpr char kValNoUpdate[] = "noupdate"; -constexpr char kOmahaUpdaterVersion[] = "0.1.0.0"; - // X-Goog-Update headers. constexpr char kXGoogleUpdateInteractivity[] = "X-Goog-Update-Interactivity"; constexpr char kXGoogleUpdateAppId[] = "X-Goog-Update-AppId"; @@ -119,362 +118,6 @@ constexpr char kAttrRollback[] = "rollback"; constexpr char kAttrFirmwareVersion[] = "firmware_version"; constexpr char kAttrKernelVersion[] = "kernel_version"; -namespace { - -// Returns an XML ping element attribute assignment with attribute -// |name| and value |ping_days| if |ping_days| has a value that needs -// to be sent, or an empty string otherwise. -string GetPingAttribute(const string& name, int ping_days) { - if (ping_days > 0 || ping_days == OmahaRequestAction::kNeverPinged) - return base::StringPrintf(" %s=\"%d\"", name.c_str(), ping_days); - return ""; -} - -// Returns an XML ping element if any of the elapsed days need to be -// sent, or an empty string otherwise. -string GetPingXml(int ping_active_days, int ping_roll_call_days) { - string ping_active = GetPingAttribute("a", ping_active_days); - string ping_roll_call = GetPingAttribute("r", ping_roll_call_days); - if (!ping_active.empty() || !ping_roll_call.empty()) { - return base::StringPrintf(" \n", - ping_active.c_str(), - ping_roll_call.c_str()); - } - return ""; -} - -// Returns an XML that goes into the body of the element of the Omaha -// request based on the given parameters. -string GetAppBody(const OmahaEvent* event, - OmahaRequestParams* params, - bool ping_only, - bool include_ping, - bool skip_updatecheck, - int ping_active_days, - int ping_roll_call_days, - PrefsInterface* prefs) { - string app_body; - if (event == nullptr) { - if (include_ping) - app_body = GetPingXml(ping_active_days, ping_roll_call_days); - if (!ping_only) { - if (!skip_updatecheck) { - app_body += " target_version_prefix().empty()) { - app_body += base::StringPrintf( - " targetversionprefix=\"%s\"", - XmlEncodeWithDefault(params->target_version_prefix(), "") - .c_str()); - // Rollback requires target_version_prefix set. - if (params->rollback_allowed()) { - app_body += " rollback_allowed=\"true\""; - } - } - app_body += ">\n"; - } - - // If this is the first update check after a reboot following a previous - // update, generate an event containing the previous version number. If - // the previous version preference file doesn't exist the event is still - // generated with a previous version of 0.0.0.0 -- this is relevant for - // older clients or new installs. The previous version event is not sent - // for ping-only requests because they come before the client has - // rebooted. The previous version event is also not sent if it was already - // sent for this new version with a previous updatecheck. - string prev_version; - if (!prefs->GetString(kPrefsPreviousVersion, &prev_version)) { - prev_version = "0.0.0.0"; - } - // We only store a non-empty previous version value after a successful - // update in the previous boot. After reporting it back to the server, - // we clear the previous version value so it doesn't get reported again. - if (!prev_version.empty()) { - app_body += base::StringPrintf( - " \n", - OmahaEvent::kTypeRebootedAfterUpdate, - OmahaEvent::kResultSuccess, - XmlEncodeWithDefault(prev_version, "0.0.0.0").c_str()); - LOG_IF(WARNING, !prefs->SetString(kPrefsPreviousVersion, "")) - << "Unable to reset the previous version."; - } - } - } else { - // The error code is an optional attribute so append it only if the result - // is not success. - string error_code; - if (event->result != OmahaEvent::kResultSuccess) { - error_code = base::StringPrintf(" errorcode=\"%d\"", - static_cast(event->error_code)); - } - app_body = base::StringPrintf( - " \n", - event->type, - event->result, - error_code.c_str()); - } - - return app_body; -} - -// Returns the cohort* argument to include in the tag for the passed -// |arg_name| and |prefs_key|, if any. The return value is suitable to -// concatenate to the list of arguments and includes a space at the end. -string GetCohortArgXml(PrefsInterface* prefs, - const string arg_name, - const string prefs_key) { - // There's nothing wrong with not having a given cohort setting, so we check - // existence first to avoid the warning log message. - if (!prefs->Exists(prefs_key)) - return ""; - string cohort_value; - if (!prefs->GetString(prefs_key, &cohort_value) || cohort_value.empty()) - return ""; - // This is a sanity check to avoid sending a huge XML file back to Ohama due - // to a compromised stateful partition making the update check fail in low - // network environments envent after a reboot. - if (cohort_value.size() > 1024) { - LOG(WARNING) << "The omaha cohort setting " << arg_name - << " has a too big value, which must be an error or an " - "attacker trying to inhibit updates."; - return ""; - } - - string escaped_xml_value; - if (!XmlEncode(cohort_value, &escaped_xml_value)) { - LOG(WARNING) << "The omaha cohort setting " << arg_name - << " is ASCII-7 invalid, ignoring it."; - return ""; - } - - return base::StringPrintf( - "%s=\"%s\" ", arg_name.c_str(), escaped_xml_value.c_str()); -} - -struct OmahaAppData { - string id; - string version; - string product_components; -}; - -bool IsValidComponentID(const string& id) { - for (char c : id) { - if (!isalnum(c) && c != '-' && c != '_' && c != '.') - return false; - } - return true; -} - -// Returns an XML that corresponds to the entire node of the Omaha -// request based on the given parameters. -string GetAppXml(const OmahaEvent* event, - OmahaRequestParams* params, - const OmahaAppData& app_data, - bool ping_only, - bool include_ping, - bool skip_updatecheck, - int ping_active_days, - int ping_roll_call_days, - int install_date_in_days, - SystemState* system_state) { - string app_body = GetAppBody(event, - params, - ping_only, - include_ping, - skip_updatecheck, - ping_active_days, - ping_roll_call_days, - system_state->prefs()); - string app_versions; - - // If we are downgrading to a more stable channel and we are allowed to do - // powerwash, then pass 0.0.0.0 as the version. This is needed to get the - // highest-versioned payload on the destination channel. - if (params->ShouldPowerwash()) { - LOG(INFO) << "Passing OS version as 0.0.0.0 as we are set to powerwash " - << "on downgrading to the version in the more stable channel"; - app_versions = "version=\"0.0.0.0\" from_version=\"" + - XmlEncodeWithDefault(app_data.version, "0.0.0.0") + "\" "; - } else { - app_versions = "version=\"" + - XmlEncodeWithDefault(app_data.version, "0.0.0.0") + "\" "; - } - - string download_channel = params->download_channel(); - string app_channels = - "track=\"" + XmlEncodeWithDefault(download_channel, "") + "\" "; - if (params->current_channel() != download_channel) { - app_channels += "from_track=\"" + - XmlEncodeWithDefault(params->current_channel(), "") + "\" "; - } - - string delta_okay_str = params->delta_okay() ? "true" : "false"; - - // If install_date_days is not set (e.g. its value is -1 ), don't - // include the attribute. - string install_date_in_days_str = ""; - if (install_date_in_days >= 0) { - install_date_in_days_str = - base::StringPrintf("installdate=\"%d\" ", install_date_in_days); - } - - string app_cohort_args; - app_cohort_args += - GetCohortArgXml(system_state->prefs(), "cohort", kPrefsOmahaCohort); - app_cohort_args += GetCohortArgXml( - system_state->prefs(), "cohorthint", kPrefsOmahaCohortHint); - app_cohort_args += GetCohortArgXml( - system_state->prefs(), "cohortname", kPrefsOmahaCohortName); - - string fingerprint_arg; - if (!params->os_build_fingerprint().empty()) { - fingerprint_arg = "fingerprint=\"" + - XmlEncodeWithDefault(params->os_build_fingerprint(), "") + - "\" "; - } - - string buildtype_arg; - if (!params->os_build_type().empty()) { - buildtype_arg = "os_build_type=\"" + - XmlEncodeWithDefault(params->os_build_type(), "") + "\" "; - } - - string product_components_args; - if (!params->ShouldPowerwash() && !app_data.product_components.empty()) { - brillo::KeyValueStore store; - if (store.LoadFromString(app_data.product_components)) { - for (const string& key : store.GetKeys()) { - if (!IsValidComponentID(key)) { - LOG(ERROR) << "Invalid component id: " << key; - continue; - } - string version; - if (!store.GetString(key, &version)) { - LOG(ERROR) << "Failed to get version for " << key - << " in product_components."; - continue; - } - product_components_args += - base::StringPrintf("_%s.version=\"%s\" ", - key.c_str(), - XmlEncodeWithDefault(version, "").c_str()); - } - } else { - LOG(ERROR) << "Failed to parse product_components:\n" - << app_data.product_components; - } - } - - // clang-format off - string app_xml = " app_lang(), "en-US") + "\" " + - "board=\"" + XmlEncodeWithDefault(params->os_board(), "") + "\" " + - "hardware_class=\"" + XmlEncodeWithDefault(params->hwid(), "") + "\" " + - "delta_okay=\"" + delta_okay_str + "\" " - "fw_version=\"" + XmlEncodeWithDefault(params->fw_version(), "") + "\" " + - "ec_version=\"" + XmlEncodeWithDefault(params->ec_version(), "") + "\" " + - install_date_in_days_str + - ">\n" + - app_body + - " \n"; - // clang-format on - return app_xml; -} - -// Returns an XML that corresponds to the entire node of the Omaha -// request based on the given parameters. -string GetOsXml(OmahaRequestParams* params) { - string os_xml = - " os_version(), "") + "\" " + "platform=\"" + - XmlEncodeWithDefault(params->os_platform(), "") + "\" " + "sp=\"" + - XmlEncodeWithDefault(params->os_sp(), "") + - "\">" - "\n"; - return os_xml; -} - -// Returns an XML that corresponds to the entire Omaha request based on the -// given parameters. -string GetRequestXml(const OmahaEvent* event, - OmahaRequestParams* params, - bool ping_only, - bool include_ping, - int ping_active_days, - int ping_roll_call_days, - int install_date_in_days, - SystemState* system_state) { - string os_xml = GetOsXml(params); - OmahaAppData product_app = { - .id = params->GetAppId(), - .version = params->app_version(), - .product_components = params->product_components()}; - // Skips updatecheck for platform app in case of an install operation. - string app_xml = GetAppXml(event, - params, - product_app, - ping_only, - include_ping, - params->is_install(), /* skip_updatecheck */ - ping_active_days, - ping_roll_call_days, - install_date_in_days, - system_state); - if (!params->system_app_id().empty()) { - OmahaAppData system_app = {.id = params->system_app_id(), - .version = params->system_version()}; - app_xml += GetAppXml(event, - params, - system_app, - ping_only, - include_ping, - false, /* skip_updatecheck */ - ping_active_days, - ping_roll_call_days, - install_date_in_days, - system_state); - } - // Create APP ID according to |dlc_module_id| (sticking the current AppID to - // the DLC module ID with an underscode). - for (const auto& dlc_module_id : params->dlc_module_ids()) { - OmahaAppData dlc_module_app = { - .id = params->GetAppId() + "_" + dlc_module_id, - .version = params->app_version()}; - app_xml += GetAppXml(event, - params, - dlc_module_app, - ping_only, - include_ping, - false, /* skip_updatecheck */ - ping_active_days, - ping_roll_call_days, - install_date_in_days, - system_state); - } - - string request_xml = base::StringPrintf( - "\n" - "\n%s%s\n", - constants::kOmahaUpdaterID, - kOmahaUpdaterVersion, - params->interactive() ? "ondemandupdate" : "scheduler", - os_xml.c_str(), - app_xml.c_str()); - - return request_xml; -} - -} // namespace - // Struct used for holding data obtained when parsing the XML. struct OmahaParserData { explicit OmahaParserData(XML_Parser _xml_parser) : xml_parser(_xml_parser) {} @@ -638,49 +281,6 @@ void ParserHandlerEntityDecl(void* user_data, } // namespace -bool XmlEncode(const string& input, string* output) { - if (std::find_if(input.begin(), input.end(), [](const char c) { - return c & 0x80; - }) != input.end()) { - LOG(WARNING) << "Invalid ASCII-7 string passed to the XML encoder:"; - utils::HexDumpString(input); - return false; - } - output->clear(); - // We need at least input.size() space in the output, but the code below will - // handle it if we need more. - output->reserve(input.size()); - for (char c : input) { - switch (c) { - case '\"': - output->append("""); - break; - case '\'': - output->append("'"); - break; - case '&': - output->append("&"); - break; - case '<': - output->append("<"); - break; - case '>': - output->append(">"); - break; - default: - output->push_back(c); - } - } - return true; -} - -string XmlEncodeWithDefault(const string& input, const string& default_value) { - string output; - if (XmlEncode(input, &output)) - return output; - return default_value; -} - OmahaRequestAction::OmahaRequestAction( SystemState* system_state, OmahaEvent* event, @@ -732,8 +332,8 @@ void OmahaRequestAction::InitPingDays() { } bool OmahaRequestAction::ShouldPing() const { - if (ping_active_days_ == OmahaRequestAction::kNeverPinged && - ping_roll_call_days_ == OmahaRequestAction::kNeverPinged) { + if (ping_active_days_ == kNeverPinged && + ping_roll_call_days_ == kNeverPinged) { int powerwash_count = system_state_->hardware()->GetPowerwashCount(); if (powerwash_count > 0) { LOG(INFO) << "Not sending ping with a=-1 r=-1 to omaha because " diff --git a/omaha_request_action.h b/omaha_request_action.h index 8db5fb9b..8e81af96 100644 --- a/omaha_request_action.h +++ b/omaha_request_action.h @@ -33,6 +33,7 @@ #include "update_engine/common/action.h" #include "update_engine/common/http_fetcher.h" +#include "update_engine/omaha_request_builder_xml.h" #include "update_engine/omaha_response.h" #include "update_engine/system_state.h" @@ -45,56 +46,6 @@ class PolicyProvider; namespace chromeos_update_engine { -// Encodes XML entities in a given string. Input must be ASCII-7 valid. If -// the input is invalid, the default value is used instead. -std::string XmlEncodeWithDefault(const std::string& input, - const std::string& default_value); - -// Escapes text so it can be included as character data and attribute -// values. The |input| string must be valid ASCII-7, no UTF-8 supported. -// Returns whether the |input| was valid and escaped properly in |output|. -bool XmlEncode(const std::string& input, std::string* output); - -// This struct encapsulates the Omaha event information. For a -// complete list of defined event types and results, see -// http://code.google.com/p/omaha/wiki/ServerProtocol#event -struct OmahaEvent { - // The Type values correspond to EVENT_TYPE values of Omaha. - enum Type { - kTypeUnknown = 0, - kTypeDownloadComplete = 1, - kTypeInstallComplete = 2, - kTypeUpdateComplete = 3, - kTypeUpdateDownloadStarted = 13, - kTypeUpdateDownloadFinished = 14, - // Chromium OS reserved type sent after the first reboot following an update - // completed. - kTypeRebootedAfterUpdate = 54, - }; - - // The Result values correspond to EVENT_RESULT values of Omaha. - enum Result { - kResultError = 0, - kResultSuccess = 1, - kResultUpdateDeferred = 9, // When we ignore/defer updates due to policy. - }; - - OmahaEvent() - : type(kTypeUnknown), - result(kResultError), - error_code(ErrorCode::kError) {} - explicit OmahaEvent(Type in_type) - : type(in_type), - result(kResultSuccess), - error_code(ErrorCode::kSuccess) {} - OmahaEvent(Type in_type, Result in_result, ErrorCode in_error_code) - : type(in_type), result(in_result), error_code(in_error_code) {} - - Type type; - Result result; - ErrorCode error_code; -}; - class NoneType; class OmahaRequestAction; class OmahaRequestParams; @@ -116,7 +67,6 @@ class ActionTraits { class OmahaRequestAction : public Action, public HttpFetcherDelegate { public: - static const int kNeverPinged = -1; static const int kPingTimeJump = -2; // We choose this value of 10 as a heuristic for a work day in trying // each URL, assuming we check roughly every 45 mins. This is a good time to diff --git a/omaha_request_action_unittest.cc b/omaha_request_action_unittest.cc index a642b5a8..e7c610fb 100644 --- a/omaha_request_action_unittest.cc +++ b/omaha_request_action_unittest.cc @@ -51,6 +51,7 @@ #include "update_engine/metrics_reporter_interface.h" #include "update_engine/mock_connection_manager.h" #include "update_engine/mock_payload_state.h" +#include "update_engine/omaha_request_builder_xml.h" #include "update_engine/omaha_request_params.h" #include "update_engine/update_manager/rollback_prefs.h" @@ -1755,27 +1756,6 @@ TEST_F(OmahaRequestActionTest, TerminateTransferTest) { EXPECT_FALSE(loop.PendingTasks()); } -TEST_F(OmahaRequestActionTest, XmlEncodeTest) { - string output; - EXPECT_TRUE(XmlEncode("ab", &output)); - EXPECT_EQ("ab", output); - EXPECT_TRUE(XmlEncode("a\"\'\\", &output)); - EXPECT_EQ("<&>"'\\", output); - EXPECT_TRUE(XmlEncode("<&>", &output)); - EXPECT_EQ("&lt;&amp;&gt;", output); - // Check that unterminated UTF-8 strings are handled properly. - EXPECT_FALSE(XmlEncode("\xc2", &output)); - // Fail with invalid ASCII-7 chars. - EXPECT_FALSE(XmlEncode("This is an 'n' with a tilde: \xc3\xb1", &output)); -} - -TEST_F(OmahaRequestActionTest, XmlEncodeWithDefaultTest) { - EXPECT_EQ("<&>", XmlEncodeWithDefault("<&>", "something else")); - EXPECT_EQ("", XmlEncodeWithDefault("\xc2", "")); -} - TEST_F(OmahaRequestActionTest, XmlEncodeIsUsedForParams) { brillo::Blob post_data; diff --git a/omaha_request_builder_xml.cc b/omaha_request_builder_xml.cc new file mode 100644 index 00000000..899f17ff --- /dev/null +++ b/omaha_request_builder_xml.cc @@ -0,0 +1,413 @@ +// +// Copyright (C) 2019 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include "update_engine/omaha_request_builder_xml.h" + +#include + +#include + +#include +#include +#include +#include +#include + +#include "update_engine/common/constants.h" +#include "update_engine/common/prefs_interface.h" +#include "update_engine/common/utils.h" +#include "update_engine/omaha_request_params.h" + +using std::string; + +namespace chromeos_update_engine { + +const int kNeverPinged = -1; + +bool XmlEncode(const string& input, string* output) { + if (std::find_if(input.begin(), input.end(), [](const char c) { + return c & 0x80; + }) != input.end()) { + LOG(WARNING) << "Invalid ASCII-7 string passed to the XML encoder:"; + utils::HexDumpString(input); + return false; + } + output->clear(); + // We need at least input.size() space in the output, but the code below will + // handle it if we need more. + output->reserve(input.size()); + for (char c : input) { + switch (c) { + case '\"': + output->append("""); + break; + case '\'': + output->append("'"); + break; + case '&': + output->append("&"); + break; + case '<': + output->append("<"); + break; + case '>': + output->append(">"); + break; + default: + output->push_back(c); + } + } + return true; +} + +string XmlEncodeWithDefault(const string& input, const string& default_value) { + string output; + if (XmlEncode(input, &output)) + return output; + return default_value; +} + +string GetPingAttribute(const string& name, int ping_days) { + if (ping_days > 0 || ping_days == kNeverPinged) + return base::StringPrintf(" %s=\"%d\"", name.c_str(), ping_days); + return ""; +} + +string GetPingXml(int ping_active_days, int ping_roll_call_days) { + string ping_active = GetPingAttribute("a", ping_active_days); + string ping_roll_call = GetPingAttribute("r", ping_roll_call_days); + if (!ping_active.empty() || !ping_roll_call.empty()) { + return base::StringPrintf(" \n", + ping_active.c_str(), + ping_roll_call.c_str()); + } + return ""; +} + +string GetAppBody(const OmahaEvent* event, + OmahaRequestParams* params, + bool ping_only, + bool include_ping, + bool skip_updatecheck, + int ping_active_days, + int ping_roll_call_days, + PrefsInterface* prefs) { + string app_body; + if (event == nullptr) { + if (include_ping) + app_body = GetPingXml(ping_active_days, ping_roll_call_days); + if (!ping_only) { + if (!skip_updatecheck) { + app_body += " target_version_prefix().empty()) { + app_body += base::StringPrintf( + " targetversionprefix=\"%s\"", + XmlEncodeWithDefault(params->target_version_prefix(), "") + .c_str()); + // Rollback requires target_version_prefix set. + if (params->rollback_allowed()) { + app_body += " rollback_allowed=\"true\""; + } + } + app_body += ">\n"; + } + + // If this is the first update check after a reboot following a previous + // update, generate an event containing the previous version number. If + // the previous version preference file doesn't exist the event is still + // generated with a previous version of 0.0.0.0 -- this is relevant for + // older clients or new installs. The previous version event is not sent + // for ping-only requests because they come before the client has + // rebooted. The previous version event is also not sent if it was already + // sent for this new version with a previous updatecheck. + string prev_version; + if (!prefs->GetString(kPrefsPreviousVersion, &prev_version)) { + prev_version = "0.0.0.0"; + } + // We only store a non-empty previous version value after a successful + // update in the previous boot. After reporting it back to the server, + // we clear the previous version value so it doesn't get reported again. + if (!prev_version.empty()) { + app_body += base::StringPrintf( + " \n", + OmahaEvent::kTypeRebootedAfterUpdate, + OmahaEvent::kResultSuccess, + XmlEncodeWithDefault(prev_version, "0.0.0.0").c_str()); + LOG_IF(WARNING, !prefs->SetString(kPrefsPreviousVersion, "")) + << "Unable to reset the previous version."; + } + } + } else { + // The error code is an optional attribute so append it only if the result + // is not success. + string error_code; + if (event->result != OmahaEvent::kResultSuccess) { + error_code = base::StringPrintf(" errorcode=\"%d\"", + static_cast(event->error_code)); + } + app_body = base::StringPrintf( + " \n", + event->type, + event->result, + error_code.c_str()); + } + + return app_body; +} + +string GetCohortArgXml(PrefsInterface* prefs, + const string arg_name, + const string prefs_key) { + // There's nothing wrong with not having a given cohort setting, so we check + // existence first to avoid the warning log message. + if (!prefs->Exists(prefs_key)) + return ""; + string cohort_value; + if (!prefs->GetString(prefs_key, &cohort_value) || cohort_value.empty()) + return ""; + // This is a sanity check to avoid sending a huge XML file back to Ohama due + // to a compromised stateful partition making the update check fail in low + // network environments envent after a reboot. + if (cohort_value.size() > 1024) { + LOG(WARNING) << "The omaha cohort setting " << arg_name + << " has a too big value, which must be an error or an " + "attacker trying to inhibit updates."; + return ""; + } + + string escaped_xml_value; + if (!XmlEncode(cohort_value, &escaped_xml_value)) { + LOG(WARNING) << "The omaha cohort setting " << arg_name + << " is ASCII-7 invalid, ignoring it."; + return ""; + } + + return base::StringPrintf( + "%s=\"%s\" ", arg_name.c_str(), escaped_xml_value.c_str()); +} + +bool IsValidComponentID(const string& id) { + for (char c : id) { + if (!isalnum(c) && c != '-' && c != '_' && c != '.') + return false; + } + return true; +} + +string GetAppXml(const OmahaEvent* event, + OmahaRequestParams* params, + const OmahaAppData& app_data, + bool ping_only, + bool include_ping, + bool skip_updatecheck, + int ping_active_days, + int ping_roll_call_days, + int install_date_in_days, + SystemState* system_state) { + string app_body = GetAppBody(event, + params, + ping_only, + include_ping, + skip_updatecheck, + ping_active_days, + ping_roll_call_days, + system_state->prefs()); + string app_versions; + + // If we are downgrading to a more stable channel and we are allowed to do + // powerwash, then pass 0.0.0.0 as the version. This is needed to get the + // highest-versioned payload on the destination channel. + if (params->ShouldPowerwash()) { + LOG(INFO) << "Passing OS version as 0.0.0.0 as we are set to powerwash " + << "on downgrading to the version in the more stable channel"; + app_versions = "version=\"0.0.0.0\" from_version=\"" + + XmlEncodeWithDefault(app_data.version, "0.0.0.0") + "\" "; + } else { + app_versions = "version=\"" + + XmlEncodeWithDefault(app_data.version, "0.0.0.0") + "\" "; + } + + string download_channel = params->download_channel(); + string app_channels = + "track=\"" + XmlEncodeWithDefault(download_channel, "") + "\" "; + if (params->current_channel() != download_channel) { + app_channels += "from_track=\"" + + XmlEncodeWithDefault(params->current_channel(), "") + "\" "; + } + + string delta_okay_str = params->delta_okay() ? "true" : "false"; + + // If install_date_days is not set (e.g. its value is -1 ), don't + // include the attribute. + string install_date_in_days_str = ""; + if (install_date_in_days >= 0) { + install_date_in_days_str = + base::StringPrintf("installdate=\"%d\" ", install_date_in_days); + } + + string app_cohort_args; + app_cohort_args += + GetCohortArgXml(system_state->prefs(), "cohort", kPrefsOmahaCohort); + app_cohort_args += GetCohortArgXml( + system_state->prefs(), "cohorthint", kPrefsOmahaCohortHint); + app_cohort_args += GetCohortArgXml( + system_state->prefs(), "cohortname", kPrefsOmahaCohortName); + + string fingerprint_arg; + if (!params->os_build_fingerprint().empty()) { + fingerprint_arg = "fingerprint=\"" + + XmlEncodeWithDefault(params->os_build_fingerprint(), "") + + "\" "; + } + + string buildtype_arg; + if (!params->os_build_type().empty()) { + buildtype_arg = "os_build_type=\"" + + XmlEncodeWithDefault(params->os_build_type(), "") + "\" "; + } + + string product_components_args; + if (!params->ShouldPowerwash() && !app_data.product_components.empty()) { + brillo::KeyValueStore store; + if (store.LoadFromString(app_data.product_components)) { + for (const string& key : store.GetKeys()) { + if (!IsValidComponentID(key)) { + LOG(ERROR) << "Invalid component id: " << key; + continue; + } + string version; + if (!store.GetString(key, &version)) { + LOG(ERROR) << "Failed to get version for " << key + << " in product_components."; + continue; + } + product_components_args += + base::StringPrintf("_%s.version=\"%s\" ", + key.c_str(), + XmlEncodeWithDefault(version, "").c_str()); + } + } else { + LOG(ERROR) << "Failed to parse product_components:\n" + << app_data.product_components; + } + } + + // clang-format off + string app_xml = " app_lang(), "en-US") + "\" " + + "board=\"" + XmlEncodeWithDefault(params->os_board(), "") + "\" " + + "hardware_class=\"" + XmlEncodeWithDefault(params->hwid(), "") + "\" " + + "delta_okay=\"" + delta_okay_str + "\" " + "fw_version=\"" + XmlEncodeWithDefault(params->fw_version(), "") + "\" " + + "ec_version=\"" + XmlEncodeWithDefault(params->ec_version(), "") + "\" " + + install_date_in_days_str + + ">\n" + + app_body + + " \n"; + // clang-format on + return app_xml; +} + +string GetOsXml(OmahaRequestParams* params) { + string os_xml = + " os_version(), "") + "\" " + "platform=\"" + + XmlEncodeWithDefault(params->os_platform(), "") + "\" " + "sp=\"" + + XmlEncodeWithDefault(params->os_sp(), "") + + "\">" + "\n"; + return os_xml; +} + +string GetRequestXml(const OmahaEvent* event, + OmahaRequestParams* params, + bool ping_only, + bool include_ping, + int ping_active_days, + int ping_roll_call_days, + int install_date_in_days, + SystemState* system_state) { + string os_xml = GetOsXml(params); + OmahaAppData product_app = { + .id = params->GetAppId(), + .version = params->app_version(), + .product_components = params->product_components()}; + // Skips updatecheck for platform app in case of an install operation. + string app_xml = GetAppXml(event, + params, + product_app, + ping_only, + include_ping, + params->is_install(), /* skip_updatecheck */ + ping_active_days, + ping_roll_call_days, + install_date_in_days, + system_state); + if (!params->system_app_id().empty()) { + OmahaAppData system_app = {.id = params->system_app_id(), + .version = params->system_version()}; + app_xml += GetAppXml(event, + params, + system_app, + ping_only, + include_ping, + false, /* skip_updatecheck */ + ping_active_days, + ping_roll_call_days, + install_date_in_days, + system_state); + } + // Create APP ID according to |dlc_module_id| (sticking the current AppID to + // the DLC module ID with an underscode). + for (const auto& dlc_module_id : params->dlc_module_ids()) { + OmahaAppData dlc_module_app = { + .id = params->GetAppId() + "_" + dlc_module_id, + .version = params->app_version()}; + app_xml += GetAppXml(event, + params, + dlc_module_app, + ping_only, + include_ping, + false, /* skip_updatecheck */ + ping_active_days, + ping_roll_call_days, + install_date_in_days, + system_state); + } + + string request_xml = base::StringPrintf( + "\n" + "\n%s%s\n", + constants::kOmahaUpdaterID, + kOmahaUpdaterVersion, + params->interactive() ? "ondemandupdate" : "scheduler", + os_xml.c_str(), + app_xml.c_str()); + + return request_xml; +} + +} // namespace chromeos_update_engine diff --git a/omaha_request_builder_xml.h b/omaha_request_builder_xml.h new file mode 100644 index 00000000..011c5929 --- /dev/null +++ b/omaha_request_builder_xml.h @@ -0,0 +1,161 @@ +// +// Copyright (C) 2019 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef UPDATE_ENGINE_OMAHA_REQUEST_BUILDER_XML_H_ +#define UPDATE_ENGINE_OMAHA_REQUEST_BUILDER_XML_H_ + +#include +#include +#include + +#include +#include +#include +#include + +#include // for FRIEND_TEST + +#include +#include + +#include "update_engine/common/action.h" +#include "update_engine/common/http_fetcher.h" +#include "update_engine/omaha_response.h" +#include "update_engine/system_state.h" + +// TODO(ahassani): Make the xml builder into a class of its own so we don't have +// to pass all these parameters around. + +namespace chromeos_update_engine { + +extern const int kNeverPinged; + +// This struct encapsulates the Omaha event information. For a +// complete list of defined event types and results, see +// http://code.google.com/p/omaha/wiki/ServerProtocol#event +struct OmahaEvent { + // The Type values correspond to EVENT_TYPE values of Omaha. + enum Type { + kTypeUnknown = 0, + kTypeDownloadComplete = 1, + kTypeInstallComplete = 2, + kTypeUpdateComplete = 3, + kTypeUpdateDownloadStarted = 13, + kTypeUpdateDownloadFinished = 14, + // Chromium OS reserved type sent after the first reboot following an update + // completed. + kTypeRebootedAfterUpdate = 54, + }; + + // The Result values correspond to EVENT_RESULT values of Omaha. + enum Result { + kResultError = 0, + kResultSuccess = 1, + kResultUpdateDeferred = 9, // When we ignore/defer updates due to policy. + }; + + OmahaEvent() + : type(kTypeUnknown), + result(kResultError), + error_code(ErrorCode::kError) {} + explicit OmahaEvent(Type in_type) + : type(in_type), + result(kResultSuccess), + error_code(ErrorCode::kSuccess) {} + OmahaEvent(Type in_type, Result in_result, ErrorCode in_error_code) + : type(in_type), result(in_result), error_code(in_error_code) {} + + Type type; + Result result; + ErrorCode error_code; +}; + +struct OmahaAppData { + std::string id; + std::string version; + std::string product_components; +}; + +// Encodes XML entities in a given string. Input must be ASCII-7 valid. If +// the input is invalid, the default value is used instead. +std::string XmlEncodeWithDefault(const std::string& input, + const std::string& default_value); + +// Escapes text so it can be included as character data and attribute +// values. The |input| string must be valid ASCII-7, no UTF-8 supported. +// Returns whether the |input| was valid and escaped properly in |output|. +bool XmlEncode(const std::string& input, std::string* output); + +// Returns an XML ping element attribute assignment with attribute +// |name| and value |ping_days| if |ping_days| has a value that needs +// to be sent, or an empty string otherwise. +std::string GetPingAttribute(const std::string& name, int ping_days); + +// Returns an XML ping element if any of the elapsed days need to be +// sent, or an empty string otherwise. +std::string GetPingXml(int ping_active_days, int ping_roll_call_days); + +// Returns an XML that goes into the body of the element of the Omaha +// request based on the given parameters. +std::string GetAppBody(const OmahaEvent* event, + OmahaRequestParams* params, + bool ping_only, + bool include_ping, + bool skip_updatecheck, + int ping_active_days, + int ping_roll_call_days, + PrefsInterface* prefs); + +// Returns the cohort* argument to include in the tag for the passed +// |arg_name| and |prefs_key|, if any. The return value is suitable to +// concatenate to the list of arguments and includes a space at the end. +std::string GetCohortArgXml(PrefsInterface* prefs, + const std::string arg_name, + const std::string prefs_key); + +bool IsValidComponentID(const std::string& id); + +// Returns an XML that corresponds to the entire node of the Omaha +// request based on the given parameters. +std::string GetAppXml(const OmahaEvent* event, + OmahaRequestParams* params, + const OmahaAppData& app_data, + bool ping_only, + bool include_ping, + bool skip_updatecheck, + int ping_active_days, + int ping_roll_call_days, + int install_date_in_days, + SystemState* system_state); + +// Returns an XML that corresponds to the entire node of the Omaha +// request based on the given parameters. +std::string GetOsXml(OmahaRequestParams* params); + +// Returns an XML that corresponds to the entire Omaha request based on the +// given parameters. +std::string GetRequestXml(const OmahaEvent* event, + OmahaRequestParams* params, + bool ping_only, + bool include_ping, + int ping_active_days, + int ping_roll_call_days, + int install_date_in_days, + SystemState* system_state); + +} // namespace chromeos_update_engine + +#endif // UPDATE_ENGINE_OMAHA_REQUEST_BUILDER_XML_H_ diff --git a/omaha_request_builder_xml_unittest.cc b/omaha_request_builder_xml_unittest.cc new file mode 100644 index 00000000..3293c44d --- /dev/null +++ b/omaha_request_builder_xml_unittest.cc @@ -0,0 +1,50 @@ +// +// Copyright (C) 2019 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include "update_engine/omaha_request_builder_xml.h" + +#include + +#include + +using std::string; + +namespace chromeos_update_engine { + +class OmahaRequestBuilderXmlTest : public ::testing::Test {}; + +TEST_F(OmahaRequestBuilderXmlTest, XmlEncodeTest) { + string output; + EXPECT_TRUE(XmlEncode("ab", &output)); + EXPECT_EQ("ab", output); + EXPECT_TRUE(XmlEncode("a\"\'\\", &output)); + EXPECT_EQ("<&>"'\\", output); + EXPECT_TRUE(XmlEncode("<&>", &output)); + EXPECT_EQ("&lt;&amp;&gt;", output); + // Check that unterminated UTF-8 strings are handled properly. + EXPECT_FALSE(XmlEncode("\xc2", &output)); + // Fail with invalid ASCII-7 chars. + EXPECT_FALSE(XmlEncode("This is an 'n' with a tilde: \xc3\xb1", &output)); +} + +TEST_F(OmahaRequestBuilderXmlTest, XmlEncodeWithDefaultTest) { + EXPECT_EQ("<&>", XmlEncodeWithDefault("<&>", "something else")); + EXPECT_EQ("", XmlEncodeWithDefault("\xc2", "")); +} + +} // namespace chromeos_update_engine diff --git a/update_attempter.h b/update_attempter.h index c106001a..6c25eb2e 100644 --- a/update_attempter.h +++ b/update_attempter.h @@ -37,6 +37,7 @@ #include "update_engine/common/action_processor.h" #include "update_engine/common/cpu_limiter.h" #include "update_engine/common/proxy_resolver.h" +#include "update_engine/omaha_request_builder_xml.h" #include "update_engine/omaha_request_params.h" #include "update_engine/omaha_response_handler_action.h" #include "update_engine/payload_consumer/download_action.h" diff --git a/update_engine.gyp b/update_engine.gyp index 754b314f..b7ccae8c 100644 --- a/update_engine.gyp +++ b/update_engine.gyp @@ -269,6 +269,7 @@ 'metrics_reporter_omaha.cc', 'metrics_utils.cc', 'omaha_request_action.cc', + 'omaha_request_builder_xml.cc', 'omaha_request_params.cc', 'omaha_response_handler_action.cc', 'omaha_utils.cc', @@ -424,8 +425,8 @@ 'payload_generator/inplace_generator.cc', 'payload_generator/mapfile_filesystem.cc', 'payload_generator/payload_file.cc', - 'payload_generator/payload_generation_config_chromeos.cc', 'payload_generator/payload_generation_config.cc', + 'payload_generator/payload_generation_config_chromeos.cc', 'payload_generator/payload_signer.cc', 'payload_generator/raw_filesystem.cc', 'payload_generator/squashfs_filesystem.cc', @@ -564,6 +565,7 @@ 'metrics_reporter_omaha_unittest.cc', 'metrics_utils_unittest.cc', 'omaha_request_action_unittest.cc', + 'omaha_request_builder_xml_unittest.cc', 'omaha_request_params_unittest.cc', 'omaha_response_handler_action_unittest.cc', 'omaha_utils_unittest.cc', From 41ac04b6e8bc9e30d8436171f3eb54e3958e8cd6 Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Fri, 29 Mar 2019 11:31:03 -0700 Subject: [PATCH 013/624] update_engine: Simplify unittests in omaha_request_action_unittest.cc This file has a ton of unittests that call the function TestUpdateCheck() with specific parameters. The problem is if we want to add a new parameter to the function, we either have to add the parameter to all call sites (around 100 one of them, which is cumbersome) or add a new function with default parameters (which will get ugly). So instead just create a new structure |TestUpdateCheckParams| with default values to use instead of passing parameters to the function itself. So: - Removed one version of TestUpdateCheck(). - Removed TestRollbackCheck() and replaced with TestUpdateCheck(). - Also modified the output parameters of the TestUpdateCheck() to be in the class itself so unittests can just look them up. BUG=none TEST=unittest Change-Id: I96b40bffb74d15ae4567652b0153179496b6a200 Reviewed-on: https://chromium-review.googlesource.com/1544865 Commit-Ready: ChromeOS CL Exonerator Bot Tested-by: Amin Hassani Reviewed-by: Xiaochu Liu Reviewed-by: Sen Jiang --- omaha_request_action_unittest.cc | 1721 +++++++++++------------------- 1 file changed, 627 insertions(+), 1094 deletions(-) diff --git a/omaha_request_action_unittest.cc b/omaha_request_action_unittest.cc index e7c610fb..ec083335 100644 --- a/omaha_request_action_unittest.cc +++ b/omaha_request_action_unittest.cc @@ -316,6 +316,19 @@ class OmahaRequestActionTestProcessorDelegate : public ActionProcessorDelegate { std::unique_ptr omaha_response_; }; +struct TestUpdateCheckParams { + string http_response; + int fail_http_response_code; + bool ping_only; + bool is_consumer_device; + int rollback_allowed_milestones; + bool is_policy_loaded; + ErrorCode expected_code; + metrics::CheckResult expected_check_result; + metrics::CheckReaction expected_check_reaction; + metrics::DownloadErrorCode expected_download_error_code; +}; + class OmahaRequestActionTest : public ::testing::Test { protected: void SetUp() override { @@ -340,56 +353,39 @@ class OmahaRequestActionTest : public ::testing::Test { fake_system_state_.set_request_params(&request_params_); fake_system_state_.set_prefs(&fake_prefs_); + + // Setting the default update check params. Lookup |TestUpdateCheck()|. + tuc_params_ = { + .http_response = "", + .fail_http_response_code = -1, + .ping_only = false, + .is_consumer_device = true, + .rollback_allowed_milestones = 0, + .is_policy_loaded = false, + .expected_code = ErrorCode::kSuccess, + .expected_check_result = metrics::CheckResult::kUpdateAvailable, + .expected_check_reaction = metrics::CheckReaction::kUpdating, + .expected_download_error_code = metrics::DownloadErrorCode::kUnset, + }; } - // Returns true iff an output response was obtained from the - // OmahaRequestAction. |prefs| may be null, in which case a local MockPrefs - // is used. |payload_state| may be null, in which case a local mock is used. - // |p2p_manager| may be null, in which case a local mock is used. - // |connection_manager| may be null, in which case a local mock is used. - // out_response may be null. If |fail_http_response_code| is non-negative, - // the transfer will fail with that code. |ping_only| is passed through to the - // OmahaRequestAction constructor. out_post_data may be null; if non-null, the - // post-data received by the mock HttpFetcher is returned. + // This function uses the paramets in |tuc_params_| to do an update check. It + // will fill out |post_str| with the result data and |response| with + // |OmahaResponse|. Returns true iff an output response was obtained from the + // |OmahaRequestAction|. If |fail_http_response_code| is non-negative, the + // transfer will fail with that code. |ping_only| is passed through to the + // |OmahaRequestAction| constructor. // // The |expected_check_result|, |expected_check_reaction| and - // |expected_error_code| parameters are for checking expectations - // about reporting UpdateEngine.Check.{Result,Reaction,DownloadError} - // UMA statistics. Use the appropriate ::kUnset value to specify that - // the given metric should not be reported. - bool TestUpdateCheck(const string& http_response, - int fail_http_response_code, - bool ping_only, - bool is_consumer_device, - int rollback_allowed_milestones, - bool is_policy_loaded, - ErrorCode expected_code, - metrics::CheckResult expected_check_result, - metrics::CheckReaction expected_check_reaction, - metrics::DownloadErrorCode expected_download_error_code, - OmahaResponse* out_response, - brillo::Blob* out_post_data); - - // Overload of TestUpdateCheck that does not supply |is_consumer_device| or - // |rollback_allowed_milestones| which are only required for rollback tests. - bool TestUpdateCheck(const string& http_response, - int fail_http_response_code, - bool ping_only, - ErrorCode expected_code, - metrics::CheckResult expected_check_result, - metrics::CheckReaction expected_check_reaction, - metrics::DownloadErrorCode expected_download_error_code, - OmahaResponse* out_response, - brillo::Blob* out_post_data); - - void TestRollbackCheck(bool is_consumer_device, - int rollback_allowed_milestones, - bool is_policy_loaded, - OmahaResponse* out_response); - - void TestEvent(OmahaEvent* event, - const string& http_response, - brillo::Blob* out_post_data); + // |expected_error_code| parameters are for checking expectations about + // reporting UpdateEngine.Check.{Result,Reaction,DownloadError} UMA + // statistics. Use the appropriate ::kUnset value to specify that the given + // metric should not be reported. + bool TestUpdateCheck(); + + // Tests events using |event| and |https_response|. It will fill up |post_str| + // with the result data. + void TestEvent(OmahaEvent* event, const string& http_response); // Runs and checks a ping test. |ping_only| indicates whether it should send // only a ping or also an updatecheck. @@ -421,54 +417,52 @@ class OmahaRequestActionTest : public ::testing::Test { OmahaRequestActionTestProcessorDelegate delegate_; bool test_http_fetcher_headers_{false}; + + TestUpdateCheckParams tuc_params_; + + // TODO(ahassani): Add trailing _ to these two variables. + OmahaResponse response; + string post_str; }; -bool OmahaRequestActionTest::TestUpdateCheck( - const string& http_response, - int fail_http_response_code, - bool ping_only, - bool is_consumer_device, - int rollback_allowed_milestones, - bool is_policy_loaded, - ErrorCode expected_code, - metrics::CheckResult expected_check_result, - metrics::CheckReaction expected_check_reaction, - metrics::DownloadErrorCode expected_download_error_code, - OmahaResponse* out_response, - brillo::Blob* out_post_data) { +bool OmahaRequestActionTest::TestUpdateCheck() { brillo::FakeMessageLoop loop(nullptr); loop.SetAsCurrent(); - auto fetcher = std::make_unique( - http_response.data(), http_response.size(), nullptr); - if (fail_http_response_code >= 0) { - fetcher->FailTransfer(fail_http_response_code); + auto fetcher = + std::make_unique(tuc_params_.http_response.data(), + tuc_params_.http_response.size(), + nullptr); + if (tuc_params_.fail_http_response_code >= 0) { + fetcher->FailTransfer(tuc_params_.fail_http_response_code); } // This ensures the tests didn't forget to update fake_system_state_ if they // are not using the default request_params_. EXPECT_EQ(&request_params_, fake_system_state_.request_params()); auto omaha_request_action = std::make_unique( - &fake_system_state_, nullptr, std::move(fetcher), ping_only); + &fake_system_state_, nullptr, std::move(fetcher), tuc_params_.ping_only); auto mock_policy_provider = std::make_unique>(); EXPECT_CALL(*mock_policy_provider, IsConsumerDevice()) - .WillRepeatedly(Return(is_consumer_device)); + .WillRepeatedly(Return(tuc_params_.is_consumer_device)); EXPECT_CALL(*mock_policy_provider, device_policy_is_loaded()) - .WillRepeatedly(Return(is_policy_loaded)); + .WillRepeatedly(Return(tuc_params_.is_policy_loaded)); const policy::MockDevicePolicy device_policy; - const bool get_allowed_milestone_succeeds = rollback_allowed_milestones >= 0; + const bool get_allowed_milestone_succeeds = + tuc_params_.rollback_allowed_milestones >= 0; EXPECT_CALL(device_policy, GetRollbackAllowedMilestones(_)) - .WillRepeatedly(DoAll(SetArgPointee<0>(rollback_allowed_milestones), - Return(get_allowed_milestone_succeeds))); + .WillRepeatedly( + DoAll(SetArgPointee<0>(tuc_params_.rollback_allowed_milestones), + Return(get_allowed_milestone_succeeds))); EXPECT_CALL(*mock_policy_provider, GetDevicePolicy()) .WillRepeatedly(ReturnRef(device_policy)); omaha_request_action->policy_provider_ = std::move(mock_policy_provider); - delegate_.expected_code_ = expected_code; + delegate_.expected_code_ = tuc_params_.expected_code; delegate_.interactive_ = request_params_.interactive(); delegate_.test_http_fetcher_headers_ = test_http_fetcher_headers_; ActionProcessor processor; @@ -484,75 +478,30 @@ bool OmahaRequestActionTest::TestUpdateCheck( ReportUpdateCheckMetrics(_, _, _, _)) .Times(AnyNumber()); - EXPECT_CALL(*fake_system_state_.mock_metrics_reporter(), - ReportUpdateCheckMetrics(_, - expected_check_result, - expected_check_reaction, - expected_download_error_code)) - .Times(ping_only ? 0 : 1); + EXPECT_CALL( + *fake_system_state_.mock_metrics_reporter(), + ReportUpdateCheckMetrics(_, + tuc_params_.expected_check_result, + tuc_params_.expected_check_reaction, + tuc_params_.expected_download_error_code)) + .Times(tuc_params_.ping_only ? 0 : 1); loop.PostTask(base::Bind( [](ActionProcessor* processor) { processor->StartProcessing(); }, base::Unretained(&processor))); loop.Run(); EXPECT_FALSE(loop.PendingTasks()); - if (delegate_.omaha_response_ && out_response) - *out_response = *delegate_.omaha_response_; - if (out_post_data) - *out_post_data = delegate_.post_data_; + if (delegate_.omaha_response_) + response = *delegate_.omaha_response_; + post_str = string(delegate_.post_data_.begin(), delegate_.post_data_.end()); return delegate_.omaha_response_ != nullptr; } -bool OmahaRequestActionTest::TestUpdateCheck( - const string& http_response, - int fail_http_response_code, - bool ping_only, - ErrorCode expected_code, - metrics::CheckResult expected_check_result, - metrics::CheckReaction expected_check_reaction, - metrics::DownloadErrorCode expected_download_error_code, - OmahaResponse* out_response, - brillo::Blob* out_post_data) { - return TestUpdateCheck(http_response, - fail_http_response_code, - ping_only, - true, // is_consumer_device - 0, // rollback_allowed_milestones - false, // is_policy_loaded - expected_code, - expected_check_result, - expected_check_reaction, - expected_download_error_code, - out_response, - out_post_data); -} - -void OmahaRequestActionTest::TestRollbackCheck(bool is_consumer_device, - int rollback_allowed_milestones, - bool is_policy_loaded, - OmahaResponse* out_response) { - fake_update_response_.deadline = "20101020"; - ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(), - -1, - false, // ping_only - is_consumer_device, - rollback_allowed_milestones, - is_policy_loaded, - ErrorCode::kSuccess, - metrics::CheckResult::kUpdateAvailable, - metrics::CheckReaction::kUpdating, - metrics::DownloadErrorCode::kUnset, - out_response, - nullptr)); - ASSERT_TRUE(out_response->update_exists); -} - // Tests Event requests -- they should always succeed. |out_post_data| may be // null; if non-null, the post-data received by the mock HttpFetcher is // returned. void OmahaRequestActionTest::TestEvent(OmahaEvent* event, - const string& http_response, - brillo::Blob* out_post_data) { + const string& http_response) { brillo::FakeMessageLoop loop(nullptr); loop.SetAsCurrent(); @@ -572,100 +521,69 @@ void OmahaRequestActionTest::TestEvent(OmahaEvent* event, loop.Run(); EXPECT_FALSE(loop.PendingTasks()); - if (out_post_data) - *out_post_data = delegate_.post_data_; + post_str = string(delegate_.post_data_.begin(), delegate_.post_data_.end()); } TEST_F(OmahaRequestActionTest, RejectEntities) { - OmahaResponse response; fake_update_response_.include_entity = true; - ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(), - -1, - false, // ping_only - ErrorCode::kOmahaRequestXMLHasEntityDecl, - metrics::CheckResult::kParsingError, - metrics::CheckReaction::kUnset, - metrics::DownloadErrorCode::kUnset, - &response, - nullptr)); + tuc_params_.http_response = fake_update_response_.GetNoUpdateResponse(); + tuc_params_.expected_code = ErrorCode::kOmahaRequestXMLHasEntityDecl; + tuc_params_.expected_check_result = metrics::CheckResult::kParsingError; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; + + ASSERT_FALSE(TestUpdateCheck()); EXPECT_FALSE(response.update_exists); } TEST_F(OmahaRequestActionTest, NoUpdateTest) { - OmahaResponse response; - ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(), - -1, - false, // ping_only - ErrorCode::kSuccess, - metrics::CheckResult::kNoUpdateAvailable, - metrics::CheckReaction::kUnset, - metrics::DownloadErrorCode::kUnset, - &response, - nullptr)); + tuc_params_.http_response = fake_update_response_.GetNoUpdateResponse(); + tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; + + ASSERT_TRUE(TestUpdateCheck()); EXPECT_FALSE(response.update_exists); } TEST_F(OmahaRequestActionTest, MultiAppNoUpdateTest) { - OmahaResponse response; fake_update_response_.multi_app_no_update = true; - ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(), - -1, - false, // ping_only - ErrorCode::kSuccess, - metrics::CheckResult::kNoUpdateAvailable, - metrics::CheckReaction::kUnset, - metrics::DownloadErrorCode::kUnset, - &response, - nullptr)); + tuc_params_.http_response = fake_update_response_.GetNoUpdateResponse(); + tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; + + ASSERT_TRUE(TestUpdateCheck()); EXPECT_FALSE(response.update_exists); } TEST_F(OmahaRequestActionTest, MultiAppNoPartialUpdateTest) { - OmahaResponse response; fake_update_response_.multi_app_no_update = true; - ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(), - -1, - false, // ping_only - ErrorCode::kSuccess, - metrics::CheckResult::kNoUpdateAvailable, - metrics::CheckReaction::kUnset, - metrics::DownloadErrorCode::kUnset, - &response, - nullptr)); + tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); + tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; + + ASSERT_TRUE(TestUpdateCheck()); EXPECT_FALSE(response.update_exists); } TEST_F(OmahaRequestActionTest, NoSelfUpdateTest) { - OmahaResponse response; - ASSERT_TRUE(TestUpdateCheck( + tuc_params_.http_response = "" - "", - -1, - false, // ping_only - ErrorCode::kSuccess, - metrics::CheckResult::kNoUpdateAvailable, - metrics::CheckReaction::kUnset, - metrics::DownloadErrorCode::kUnset, - &response, - nullptr)); + ""; + tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; + + ASSERT_TRUE(TestUpdateCheck()); EXPECT_FALSE(response.update_exists); } // Test that all the values in the response are parsed in a normal update // response. TEST_F(OmahaRequestActionTest, ValidUpdateTest) { - OmahaResponse response; fake_update_response_.deadline = "20101020"; - ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(), - -1, - false, // ping_only - ErrorCode::kSuccess, - metrics::CheckResult::kUpdateAvailable, - metrics::CheckReaction::kUpdating, - metrics::DownloadErrorCode::kUnset, - &response, - nullptr)); + tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); + + ASSERT_TRUE(TestUpdateCheck()); + EXPECT_TRUE(response.update_exists); EXPECT_EQ(fake_update_response_.version, response.version); EXPECT_EQ("", response.system_version); @@ -686,17 +604,11 @@ TEST_F(OmahaRequestActionTest, ValidUpdateTest) { } TEST_F(OmahaRequestActionTest, MultiPackageUpdateTest) { - OmahaResponse response; fake_update_response_.multi_package = true; - ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(), - -1, - false, // ping_only - ErrorCode::kSuccess, - metrics::CheckResult::kUpdateAvailable, - metrics::CheckReaction::kUpdating, - metrics::DownloadErrorCode::kUnset, - &response, - nullptr)); + tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); + + ASSERT_TRUE(TestUpdateCheck()); + EXPECT_TRUE(response.update_exists); EXPECT_EQ(fake_update_response_.version, response.version); EXPECT_EQ(fake_update_response_.GetPayloadUrl(), @@ -715,17 +627,11 @@ TEST_F(OmahaRequestActionTest, MultiPackageUpdateTest) { } TEST_F(OmahaRequestActionTest, MultiAppUpdateTest) { - OmahaResponse response; fake_update_response_.multi_app = true; - ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(), - -1, - false, // ping_only - ErrorCode::kSuccess, - metrics::CheckResult::kUpdateAvailable, - metrics::CheckReaction::kUpdating, - metrics::DownloadErrorCode::kUnset, - &response, - nullptr)); + tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); + + ASSERT_TRUE(TestUpdateCheck()); + EXPECT_TRUE(response.update_exists); EXPECT_EQ(fake_update_response_.version, response.version); EXPECT_EQ(fake_update_response_.GetPayloadUrl(), @@ -744,20 +650,13 @@ TEST_F(OmahaRequestActionTest, MultiAppUpdateTest) { } TEST_F(OmahaRequestActionTest, MultiAppAndSystemUpdateTest) { - OmahaResponse response; fake_update_response_.multi_app = true; - // trigger the lining up of the app and system versions + // Trigger the lining up of the app and system versions. request_params_.set_system_app_id(fake_update_response_.app_id2); + tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); + + ASSERT_TRUE(TestUpdateCheck()); - ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(), - -1, - false, // ping_only - ErrorCode::kSuccess, - metrics::CheckResult::kUpdateAvailable, - metrics::CheckReaction::kUpdating, - metrics::DownloadErrorCode::kUnset, - &response, - nullptr)); EXPECT_TRUE(response.update_exists); EXPECT_EQ(fake_update_response_.version, response.version); EXPECT_EQ(fake_update_response_.version2, response.system_version); @@ -777,18 +676,12 @@ TEST_F(OmahaRequestActionTest, MultiAppAndSystemUpdateTest) { } TEST_F(OmahaRequestActionTest, MultiAppPartialUpdateTest) { - OmahaResponse response; fake_update_response_.multi_app = true; fake_update_response_.multi_app_self_update = true; - ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(), - -1, - false, // ping_only - ErrorCode::kSuccess, - metrics::CheckResult::kUpdateAvailable, - metrics::CheckReaction::kUpdating, - metrics::DownloadErrorCode::kUnset, - &response, - nullptr)); + tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); + + ASSERT_TRUE(TestUpdateCheck()); + EXPECT_TRUE(response.update_exists); EXPECT_EQ(fake_update_response_.version, response.version); EXPECT_EQ("", response.system_version); @@ -805,18 +698,12 @@ TEST_F(OmahaRequestActionTest, MultiAppPartialUpdateTest) { } TEST_F(OmahaRequestActionTest, MultiAppMultiPackageUpdateTest) { - OmahaResponse response; fake_update_response_.multi_app = true; fake_update_response_.multi_package = true; - ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(), - -1, - false, // ping_only - ErrorCode::kSuccess, - metrics::CheckResult::kUpdateAvailable, - metrics::CheckReaction::kUpdating, - metrics::DownloadErrorCode::kUnset, - &response, - nullptr)); + tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); + + ASSERT_TRUE(TestUpdateCheck()); + EXPECT_TRUE(response.update_exists); EXPECT_EQ(fake_update_response_.version, response.version); EXPECT_EQ("", response.system_version); @@ -842,55 +729,42 @@ TEST_F(OmahaRequestActionTest, MultiAppMultiPackageUpdateTest) { } TEST_F(OmahaRequestActionTest, PowerwashTest) { - OmahaResponse response; fake_update_response_.powerwash = true; - ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(), - -1, - false, // ping_only - ErrorCode::kSuccess, - metrics::CheckResult::kUpdateAvailable, - metrics::CheckReaction::kUpdating, - metrics::DownloadErrorCode::kUnset, - &response, - nullptr)); + tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); + + ASSERT_TRUE(TestUpdateCheck()); + EXPECT_TRUE(response.update_exists); EXPECT_TRUE(response.powerwash_required); } TEST_F(OmahaRequestActionTest, ExtraHeadersSentInteractiveTest) { - OmahaResponse response; request_params_.set_interactive(true); test_http_fetcher_headers_ = true; - ASSERT_FALSE(TestUpdateCheck("invalid xml>", - -1, - false, // ping_only - ErrorCode::kOmahaRequestXMLParseError, - metrics::CheckResult::kParsingError, - metrics::CheckReaction::kUnset, - metrics::DownloadErrorCode::kUnset, - &response, - nullptr)); + tuc_params_.http_response = "invalid xml>"; + tuc_params_.expected_code = ErrorCode::kOmahaRequestXMLParseError; + tuc_params_.expected_check_result = metrics::CheckResult::kParsingError; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; + + ASSERT_FALSE(TestUpdateCheck()); + EXPECT_FALSE(response.update_exists); } TEST_F(OmahaRequestActionTest, ExtraHeadersSentNoInteractiveTest) { - OmahaResponse response; request_params_.set_interactive(false); test_http_fetcher_headers_ = true; - ASSERT_FALSE(TestUpdateCheck("invalid xml>", - -1, - false, // ping_only - ErrorCode::kOmahaRequestXMLParseError, - metrics::CheckResult::kParsingError, - metrics::CheckReaction::kUnset, - metrics::DownloadErrorCode::kUnset, - &response, - nullptr)); + tuc_params_.http_response = "invalid xml>"; + tuc_params_.expected_code = ErrorCode::kOmahaRequestXMLParseError; + tuc_params_.expected_check_result = metrics::CheckResult::kParsingError; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; + + ASSERT_FALSE(TestUpdateCheck()); + EXPECT_FALSE(response.update_exists); } TEST_F(OmahaRequestActionTest, ValidUpdateBlockedByConnection) { - OmahaResponse response; // Set up a connection manager that doesn't allow a valid update over // the current ethernet connection. MockConnectionManager mock_cm; @@ -903,24 +777,19 @@ TEST_F(OmahaRequestActionTest, ValidUpdateBlockedByConnection) { EXPECT_CALL(mock_cm, IsUpdateAllowedOver(ConnectionType::kEthernet, _)) .WillRepeatedly(Return(false)); - ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(), - -1, - false, // ping_only - ErrorCode::kOmahaUpdateIgnoredPerPolicy, - metrics::CheckResult::kUpdateAvailable, - metrics::CheckReaction::kIgnored, - metrics::DownloadErrorCode::kUnset, - &response, - nullptr)); + tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); + tuc_params_.expected_code = ErrorCode::kOmahaUpdateIgnoredPerPolicy; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kIgnored; + + ASSERT_FALSE(TestUpdateCheck()); + EXPECT_FALSE(response.update_exists); } TEST_F(OmahaRequestActionTest, ValidUpdateOverCellularAllowedByDevicePolicy) { // This test tests that update over cellular is allowed as device policy // says yes. - OmahaResponse response; MockConnectionManager mock_cm; - fake_system_state_.set_connection_manager(&mock_cm); EXPECT_CALL(mock_cm, GetConnectionProperties(_, _)) @@ -932,24 +801,17 @@ TEST_F(OmahaRequestActionTest, ValidUpdateOverCellularAllowedByDevicePolicy) { EXPECT_CALL(mock_cm, IsUpdateAllowedOver(ConnectionType::kCellular, _)) .WillRepeatedly(Return(true)); - ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(), - -1, - false, // ping_only - ErrorCode::kSuccess, - metrics::CheckResult::kUpdateAvailable, - metrics::CheckReaction::kUpdating, - metrics::DownloadErrorCode::kUnset, - &response, - nullptr)); + tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); + + ASSERT_TRUE(TestUpdateCheck()); + EXPECT_TRUE(response.update_exists); } TEST_F(OmahaRequestActionTest, ValidUpdateOverCellularBlockedByDevicePolicy) { // This test tests that update over cellular is blocked as device policy // says no. - OmahaResponse response; MockConnectionManager mock_cm; - fake_system_state_.set_connection_manager(&mock_cm); EXPECT_CALL(mock_cm, GetConnectionProperties(_, _)) @@ -961,15 +823,12 @@ TEST_F(OmahaRequestActionTest, ValidUpdateOverCellularBlockedByDevicePolicy) { EXPECT_CALL(mock_cm, IsUpdateAllowedOver(ConnectionType::kCellular, _)) .WillRepeatedly(Return(false)); - ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(), - -1, - false, // ping_only - ErrorCode::kOmahaUpdateIgnoredPerPolicy, - metrics::CheckResult::kUpdateAvailable, - metrics::CheckReaction::kIgnored, - metrics::DownloadErrorCode::kUnset, - &response, - nullptr)); + tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); + tuc_params_.expected_code = ErrorCode::kOmahaUpdateIgnoredPerPolicy; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kIgnored; + + ASSERT_FALSE(TestUpdateCheck()); + EXPECT_FALSE(response.update_exists); } @@ -977,9 +836,7 @@ TEST_F(OmahaRequestActionTest, ValidUpdateOverCellularAllowedByUserPermissionTrue) { // This test tests that, when device policy is not set, update over cellular // is allowed as permission for update over cellular is set to true. - OmahaResponse response; MockConnectionManager mock_cm; - fake_prefs_.SetBoolean(kPrefsUpdateOverCellularPermission, true); fake_system_state_.set_connection_manager(&mock_cm); @@ -992,15 +849,10 @@ TEST_F(OmahaRequestActionTest, EXPECT_CALL(mock_cm, IsUpdateAllowedOver(ConnectionType::kCellular, _)) .WillRepeatedly(Return(true)); - ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(), - -1, - false, // ping_only - ErrorCode::kSuccess, - metrics::CheckResult::kUpdateAvailable, - metrics::CheckReaction::kUpdating, - metrics::DownloadErrorCode::kUnset, - &response, - nullptr)); + tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); + + ASSERT_TRUE(TestUpdateCheck()); + EXPECT_TRUE(response.update_exists); } @@ -1009,7 +861,6 @@ TEST_F(OmahaRequestActionTest, // This test tests that, when device policy is not set and permission for // update over cellular is set to false or does not exist, update over // cellular is blocked as update target does not match the omaha response. - OmahaResponse response; MockConnectionManager mock_cm; // A version different from the version in omaha response. string diff_version = "99.99.99"; @@ -1030,15 +881,12 @@ TEST_F(OmahaRequestActionTest, EXPECT_CALL(mock_cm, IsUpdateAllowedOver(ConnectionType::kCellular, _)) .WillRepeatedly(Return(true)); - ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(), - -1, - false, // ping_only - ErrorCode::kOmahaUpdateIgnoredOverCellular, - metrics::CheckResult::kUpdateAvailable, - metrics::CheckReaction::kIgnored, - metrics::DownloadErrorCode::kUnset, - &response, - nullptr)); + tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); + tuc_params_.expected_code = ErrorCode::kOmahaUpdateIgnoredOverCellular; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kIgnored; + + ASSERT_FALSE(TestUpdateCheck()); + EXPECT_FALSE(response.update_exists); } @@ -1047,7 +895,6 @@ TEST_F(OmahaRequestActionTest, // This test tests that, when device policy is not set and permission for // update over cellular is set to false or does not exist, update over // cellular is allowed as update target matches the omaha response. - OmahaResponse response; MockConnectionManager mock_cm; // A version same as the version in omaha response. string new_version = fake_update_response_.version; @@ -1067,96 +914,67 @@ TEST_F(OmahaRequestActionTest, EXPECT_CALL(mock_cm, IsUpdateAllowedOver(ConnectionType::kCellular, _)) .WillRepeatedly(Return(true)); - ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(), - -1, - false, // ping_only - ErrorCode::kSuccess, - metrics::CheckResult::kUpdateAvailable, - metrics::CheckReaction::kUpdating, - metrics::DownloadErrorCode::kUnset, - &response, - nullptr)); + tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); + + ASSERT_TRUE(TestUpdateCheck()); + EXPECT_TRUE(response.update_exists); } TEST_F(OmahaRequestActionTest, ValidUpdateBlockedByRollback) { string rollback_version = "1234.0.0"; - OmahaResponse response; - MockPayloadState mock_payload_state; fake_system_state_.set_payload_state(&mock_payload_state); + fake_update_response_.version = rollback_version; + tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); + tuc_params_.expected_code = ErrorCode::kOmahaUpdateIgnoredPerPolicy; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kIgnored; EXPECT_CALL(mock_payload_state, GetRollbackVersion()) .WillRepeatedly(Return(rollback_version)); - fake_update_response_.version = rollback_version; - ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(), - -1, - false, // ping_only - ErrorCode::kOmahaUpdateIgnoredPerPolicy, - metrics::CheckResult::kUpdateAvailable, - metrics::CheckReaction::kIgnored, - metrics::DownloadErrorCode::kUnset, - &response, - nullptr)); + ASSERT_FALSE(TestUpdateCheck()); + EXPECT_FALSE(response.update_exists); } // Verify that update checks called during OOBE will not try to download an // update if the response doesn't include the deadline field. TEST_F(OmahaRequestActionTest, SkipNonCriticalUpdatesBeforeOOBE) { - OmahaResponse response; fake_system_state_.fake_hardware()->UnsetIsOOBEComplete(); + tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); + tuc_params_.expected_code = ErrorCode::kNonCriticalUpdateInOOBE; + tuc_params_.expected_check_result = metrics::CheckResult::kParsingError; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; // TODO(senj): set better default value for metrics::checkresult in // OmahaRequestAction::ActionCompleted. - ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(), - -1, - false, // ping_only - ErrorCode::kNonCriticalUpdateInOOBE, - metrics::CheckResult::kParsingError, - metrics::CheckReaction::kUnset, - metrics::DownloadErrorCode::kUnset, - &response, - nullptr)); + ASSERT_FALSE(TestUpdateCheck()); + EXPECT_FALSE(response.update_exists); } // Verify that the IsOOBEComplete() value is ignored when the OOBE flow is not // enabled. TEST_F(OmahaRequestActionTest, SkipNonCriticalUpdatesBeforeOOBEDisabled) { - OmahaResponse response; fake_system_state_.fake_hardware()->UnsetIsOOBEComplete(); fake_system_state_.fake_hardware()->SetIsOOBEEnabled(false); + tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); + + ASSERT_TRUE(TestUpdateCheck()); - ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(), - -1, - false, // ping_only - ErrorCode::kSuccess, - metrics::CheckResult::kUpdateAvailable, - metrics::CheckReaction::kUpdating, - metrics::DownloadErrorCode::kUnset, - &response, - nullptr)); EXPECT_TRUE(response.update_exists); } // Verify that update checks called during OOBE will still try to download an // update if the response includes the deadline field. TEST_F(OmahaRequestActionTest, SkipNonCriticalUpdatesBeforeOOBEDeadlineSet) { - OmahaResponse response; fake_system_state_.fake_hardware()->UnsetIsOOBEComplete(); fake_update_response_.deadline = "20101020"; + tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); + + ASSERT_TRUE(TestUpdateCheck()); - ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(), - -1, - false, // ping_only - ErrorCode::kSuccess, - metrics::CheckResult::kUpdateAvailable, - metrics::CheckReaction::kUpdating, - metrics::DownloadErrorCode::kUnset, - &response, - nullptr)); EXPECT_TRUE(response.update_exists); } @@ -1164,21 +982,18 @@ TEST_F(OmahaRequestActionTest, SkipNonCriticalUpdatesBeforeOOBEDeadlineSet) { // update if a rollback happened, even when the response includes the deadline // field. TEST_F(OmahaRequestActionTest, SkipNonCriticalUpdatesBeforeOOBERollback) { - OmahaResponse response; fake_system_state_.fake_hardware()->UnsetIsOOBEComplete(); fake_update_response_.deadline = "20101020"; + tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); + tuc_params_.expected_code = ErrorCode::kNonCriticalUpdateInOOBE; + tuc_params_.expected_check_result = metrics::CheckResult::kParsingError; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; + EXPECT_CALL(*(fake_system_state_.mock_payload_state()), GetRollbackHappened()) .WillOnce(Return(true)); - ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(), - -1, - false, // ping_only - ErrorCode::kNonCriticalUpdateInOOBE, - metrics::CheckResult::kParsingError, - metrics::CheckReaction::kUnset, - metrics::DownloadErrorCode::kUnset, - &response, - nullptr)); + ASSERT_FALSE(TestUpdateCheck()); + EXPECT_FALSE(response.update_exists); } @@ -1188,11 +1003,14 @@ TEST_F(OmahaRequestActionTest, SkipNonCriticalUpdatesBeforeOOBERollback) { // kOmahaUpdateIgnoredOverCellular error in this case might cause undesired UX // in OOBE (warning the user about an update that will be skipped). TEST_F(OmahaRequestActionTest, SkipNonCriticalUpdatesInOOBEOverCellular) { - OmahaResponse response; fake_system_state_.fake_hardware()->UnsetIsOOBEComplete(); MockConnectionManager mock_cm; fake_system_state_.set_connection_manager(&mock_cm); + tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); + tuc_params_.expected_code = ErrorCode::kNonCriticalUpdateInOOBE; + tuc_params_.expected_check_result = metrics::CheckResult::kParsingError; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; EXPECT_CALL(mock_cm, GetConnectionProperties(_, _)) .WillRepeatedly(DoAll(SetArgPointee<0>(ConnectionType::kCellular), @@ -1201,111 +1019,77 @@ TEST_F(OmahaRequestActionTest, SkipNonCriticalUpdatesInOOBEOverCellular) { EXPECT_CALL(mock_cm, IsAllowedConnectionTypesForUpdateSet()) .WillRepeatedly(Return(false)); - ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(), - -1, - false, // ping_only - ErrorCode::kNonCriticalUpdateInOOBE, - metrics::CheckResult::kParsingError, - metrics::CheckReaction::kUnset, - metrics::DownloadErrorCode::kUnset, - &response, - nullptr)); + ASSERT_FALSE(TestUpdateCheck()); + EXPECT_FALSE(response.update_exists); } TEST_F(OmahaRequestActionTest, WallClockBasedWaitAloneCausesScattering) { - OmahaResponse response; request_params_.set_wall_clock_based_wait_enabled(true); request_params_.set_update_check_count_wait_enabled(false); request_params_.set_waiting_period(TimeDelta::FromDays(2)); - fake_system_state_.fake_clock()->SetWallclockTime(Time::Now()); + tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); + tuc_params_.expected_code = ErrorCode::kOmahaUpdateDeferredPerPolicy; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kDeferring; + + ASSERT_FALSE(TestUpdateCheck()); - ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(), - -1, - false, // ping_only - ErrorCode::kOmahaUpdateDeferredPerPolicy, - metrics::CheckResult::kUpdateAvailable, - metrics::CheckReaction::kDeferring, - metrics::DownloadErrorCode::kUnset, - &response, - nullptr)); EXPECT_FALSE(response.update_exists); +} - // Verify if we are interactive check we don't defer. +TEST_F(OmahaRequestActionTest, + WallClockBasedWaitAloneCausesScatteringInteractive) { + request_params_.set_wall_clock_based_wait_enabled(true); + request_params_.set_update_check_count_wait_enabled(false); + request_params_.set_waiting_period(TimeDelta::FromDays(2)); request_params_.set_interactive(true); - ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(), - -1, - false, // ping_only - ErrorCode::kSuccess, - metrics::CheckResult::kUpdateAvailable, - metrics::CheckReaction::kUpdating, - metrics::DownloadErrorCode::kUnset, - &response, - nullptr)); + fake_system_state_.fake_clock()->SetWallclockTime(Time::Now()); + tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); + + // Verify if we are interactive check we don't defer. + ASSERT_TRUE(TestUpdateCheck()); + EXPECT_TRUE(response.update_exists); } TEST_F(OmahaRequestActionTest, NoWallClockBasedWaitCausesNoScattering) { - OmahaResponse response; request_params_.set_wall_clock_based_wait_enabled(false); request_params_.set_waiting_period(TimeDelta::FromDays(2)); request_params_.set_update_check_count_wait_enabled(true); request_params_.set_min_update_checks_needed(1); request_params_.set_max_update_checks_allowed(8); + tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); + + ASSERT_TRUE(TestUpdateCheck()); - ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(), - -1, - false, // ping_only - ErrorCode::kSuccess, - metrics::CheckResult::kUpdateAvailable, - metrics::CheckReaction::kUpdating, - metrics::DownloadErrorCode::kUnset, - &response, - nullptr)); EXPECT_TRUE(response.update_exists); } TEST_F(OmahaRequestActionTest, ZeroMaxDaysToScatterCausesNoScattering) { - OmahaResponse response; request_params_.set_wall_clock_based_wait_enabled(true); request_params_.set_waiting_period(TimeDelta::FromDays(2)); request_params_.set_update_check_count_wait_enabled(true); request_params_.set_min_update_checks_needed(1); request_params_.set_max_update_checks_allowed(8); - fake_update_response_.max_days_to_scatter = "0"; - ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(), - -1, - false, // ping_only - ErrorCode::kSuccess, - metrics::CheckResult::kUpdateAvailable, - metrics::CheckReaction::kUpdating, - metrics::DownloadErrorCode::kUnset, - &response, - nullptr)); + tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); + + ASSERT_TRUE(TestUpdateCheck()); + EXPECT_TRUE(response.update_exists); } TEST_F(OmahaRequestActionTest, ZeroUpdateCheckCountCausesNoScattering) { - OmahaResponse response; request_params_.set_wall_clock_based_wait_enabled(true); request_params_.set_waiting_period(TimeDelta()); request_params_.set_update_check_count_wait_enabled(true); request_params_.set_min_update_checks_needed(0); request_params_.set_max_update_checks_allowed(0); - fake_system_state_.fake_clock()->SetWallclockTime(Time::Now()); + tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); - ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(), - -1, - false, // ping_only - ErrorCode::kSuccess, - metrics::CheckResult::kUpdateAvailable, - metrics::CheckReaction::kUpdating, - metrics::DownloadErrorCode::kUnset, - &response, - nullptr)); + ASSERT_TRUE(TestUpdateCheck()); int64_t count; ASSERT_TRUE(fake_prefs_.GetInt64(kPrefsUpdateCheckCount, &count)); @@ -1314,141 +1098,118 @@ TEST_F(OmahaRequestActionTest, ZeroUpdateCheckCountCausesNoScattering) { } TEST_F(OmahaRequestActionTest, NonZeroUpdateCheckCountCausesScattering) { - OmahaResponse response; request_params_.set_wall_clock_based_wait_enabled(true); request_params_.set_waiting_period(TimeDelta()); request_params_.set_update_check_count_wait_enabled(true); request_params_.set_min_update_checks_needed(1); request_params_.set_max_update_checks_allowed(8); - fake_system_state_.fake_clock()->SetWallclockTime(Time::Now()); + tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); + tuc_params_.expected_code = ErrorCode::kOmahaUpdateDeferredPerPolicy; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kDeferring; - ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(), - -1, - false, // ping_only - ErrorCode::kOmahaUpdateDeferredPerPolicy, - metrics::CheckResult::kUpdateAvailable, - metrics::CheckReaction::kDeferring, - metrics::DownloadErrorCode::kUnset, - &response, - nullptr)); + ASSERT_FALSE(TestUpdateCheck()); int64_t count; ASSERT_TRUE(fake_prefs_.GetInt64(kPrefsUpdateCheckCount, &count)); ASSERT_GT(count, 0); EXPECT_FALSE(response.update_exists); +} - // Verify if we are interactive check we don't defer. +TEST_F(OmahaRequestActionTest, + NonZeroUpdateCheckCountCausesScatteringInteractive) { + request_params_.set_wall_clock_based_wait_enabled(true); + request_params_.set_waiting_period(TimeDelta()); + request_params_.set_update_check_count_wait_enabled(true); + request_params_.set_min_update_checks_needed(1); + request_params_.set_max_update_checks_allowed(8); request_params_.set_interactive(true); - ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(), - -1, - false, // ping_only - ErrorCode::kSuccess, - metrics::CheckResult::kUpdateAvailable, - metrics::CheckReaction::kUpdating, - metrics::DownloadErrorCode::kUnset, - &response, - nullptr)); + fake_system_state_.fake_clock()->SetWallclockTime(Time::Now()); + tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); + + // Verify if we are interactive check we don't defer. + ASSERT_TRUE(TestUpdateCheck()); + EXPECT_TRUE(response.update_exists); } TEST_F(OmahaRequestActionTest, ExistingUpdateCheckCountCausesScattering) { - OmahaResponse response; request_params_.set_wall_clock_based_wait_enabled(true); request_params_.set_waiting_period(TimeDelta()); request_params_.set_update_check_count_wait_enabled(true); request_params_.set_min_update_checks_needed(1); request_params_.set_max_update_checks_allowed(8); - fake_system_state_.fake_clock()->SetWallclockTime(Time::Now()); + tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); + tuc_params_.expected_code = ErrorCode::kOmahaUpdateDeferredPerPolicy; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kDeferring; ASSERT_TRUE(fake_prefs_.SetInt64(kPrefsUpdateCheckCount, 5)); - - ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(), - -1, - false, // ping_only - ErrorCode::kOmahaUpdateDeferredPerPolicy, - metrics::CheckResult::kUpdateAvailable, - metrics::CheckReaction::kDeferring, - metrics::DownloadErrorCode::kUnset, - &response, - nullptr)); + ASSERT_FALSE(TestUpdateCheck()); int64_t count; ASSERT_TRUE(fake_prefs_.GetInt64(kPrefsUpdateCheckCount, &count)); - // count remains the same, as the decrementing happens in update_attempter + // |count| remains the same, as the decrementing happens in update_attempter // which this test doesn't exercise. ASSERT_EQ(count, 5); EXPECT_FALSE(response.update_exists); +} - // Verify if we are interactive check we don't defer. +TEST_F(OmahaRequestActionTest, + ExistingUpdateCheckCountCausesScatteringInteractive) { + request_params_.set_wall_clock_based_wait_enabled(true); + request_params_.set_waiting_period(TimeDelta()); + request_params_.set_update_check_count_wait_enabled(true); + request_params_.set_min_update_checks_needed(1); + request_params_.set_max_update_checks_allowed(8); request_params_.set_interactive(true); - ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(), - -1, - false, // ping_only - ErrorCode::kSuccess, - metrics::CheckResult::kUpdateAvailable, - metrics::CheckReaction::kUpdating, - metrics::DownloadErrorCode::kUnset, - &response, - nullptr)); + fake_system_state_.fake_clock()->SetWallclockTime(Time::Now()); + tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); + + ASSERT_TRUE(fake_prefs_.SetInt64(kPrefsUpdateCheckCount, 5)); + + // Verify if we are interactive check we don't defer. + ASSERT_TRUE(TestUpdateCheck()); EXPECT_TRUE(response.update_exists); } TEST_F(OmahaRequestActionTest, StagingTurnedOnCausesScattering) { // If staging is on, the value for max days to scatter should be ignored, and // staging's scatter value should be used. - OmahaResponse response; request_params_.set_wall_clock_based_wait_enabled(true); request_params_.set_waiting_period(TimeDelta::FromDays(6)); request_params_.set_update_check_count_wait_enabled(false); - fake_system_state_.fake_clock()->SetWallclockTime(Time::Now()); ASSERT_TRUE(fake_prefs_.SetInt64(kPrefsWallClockStagingWaitPeriod, 6)); // This should not prevent scattering due to staging. fake_update_response_.max_days_to_scatter = "0"; - ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(), - -1, - false, // ping_only - ErrorCode::kOmahaUpdateDeferredPerPolicy, - metrics::CheckResult::kUpdateAvailable, - metrics::CheckReaction::kDeferring, - metrics::DownloadErrorCode::kUnset, - &response, - nullptr)); + tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); + tuc_params_.expected_code = ErrorCode::kOmahaUpdateDeferredPerPolicy; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kDeferring; + + ASSERT_FALSE(TestUpdateCheck()); + EXPECT_FALSE(response.update_exists); // Interactive updates should not be affected. request_params_.set_interactive(true); - ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(), - -1, - false, // ping_only - ErrorCode::kSuccess, - metrics::CheckResult::kUpdateAvailable, - metrics::CheckReaction::kUpdating, - metrics::DownloadErrorCode::kUnset, - &response, - nullptr)); + tuc_params_.expected_code = ErrorCode::kSuccess; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kUpdating; + + ASSERT_TRUE(TestUpdateCheck()); + EXPECT_TRUE(response.update_exists); } TEST_F(OmahaRequestActionTest, CohortsArePersisted) { - OmahaResponse response; fake_update_response_.include_cohorts = true; fake_update_response_.cohort = "s/154454/8479665"; fake_update_response_.cohorthint = "please-put-me-on-beta"; fake_update_response_.cohortname = "stable"; + tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); - ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(), - -1, - false, // ping_only - ErrorCode::kSuccess, - metrics::CheckResult::kUpdateAvailable, - metrics::CheckReaction::kUpdating, - metrics::DownloadErrorCode::kUnset, - &response, - nullptr)); + ASSERT_TRUE(TestUpdateCheck()); string value; EXPECT_TRUE(fake_prefs_.GetString(kPrefsOmahaCohort, &value)); @@ -1462,7 +1223,6 @@ TEST_F(OmahaRequestActionTest, CohortsArePersisted) { } TEST_F(OmahaRequestActionTest, CohortsAreUpdated) { - OmahaResponse response; EXPECT_TRUE(fake_prefs_.SetString(kPrefsOmahaCohort, "old_value")); EXPECT_TRUE(fake_prefs_.SetString(kPrefsOmahaCohortHint, "old_hint")); EXPECT_TRUE(fake_prefs_.SetString(kPrefsOmahaCohortName, "old_name")); @@ -1470,16 +1230,9 @@ TEST_F(OmahaRequestActionTest, CohortsAreUpdated) { fake_update_response_.cohort = "s/154454/8479665"; fake_update_response_.cohorthint = "please-put-me-on-beta"; fake_update_response_.cohortname = ""; + tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); - ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(), - -1, - false, // ping_only - ErrorCode::kSuccess, - metrics::CheckResult::kUpdateAvailable, - metrics::CheckReaction::kUpdating, - metrics::DownloadErrorCode::kUnset, - &response, - nullptr)); + ASSERT_TRUE(TestUpdateCheck()); string value; EXPECT_TRUE(fake_prefs_.GetString(kPrefsOmahaCohort, &value)); @@ -1492,18 +1245,10 @@ TEST_F(OmahaRequestActionTest, CohortsAreUpdated) { } TEST_F(OmahaRequestActionTest, CohortsAreNotModifiedWhenMissing) { - OmahaResponse response; - EXPECT_TRUE(fake_prefs_.SetString(kPrefsOmahaCohort, "old_value")); + tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); - ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(), - -1, - false, // ping_only - ErrorCode::kSuccess, - metrics::CheckResult::kUpdateAvailable, - metrics::CheckReaction::kUpdating, - metrics::DownloadErrorCode::kUnset, - &response, - nullptr)); + EXPECT_TRUE(fake_prefs_.SetString(kPrefsOmahaCohort, "old_value")); + ASSERT_TRUE(TestUpdateCheck()); string value; EXPECT_TRUE(fake_prefs_.GetString(kPrefsOmahaCohort, &value)); @@ -1514,21 +1259,15 @@ TEST_F(OmahaRequestActionTest, CohortsAreNotModifiedWhenMissing) { } TEST_F(OmahaRequestActionTest, CohortsArePersistedWhenNoUpdate) { - OmahaResponse response; fake_update_response_.include_cohorts = true; fake_update_response_.cohort = "s/154454/8479665"; fake_update_response_.cohorthint = "please-put-me-on-beta"; fake_update_response_.cohortname = "stable"; + tuc_params_.http_response = fake_update_response_.GetNoUpdateResponse(); + tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; - ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(), - -1, - false, // ping_only - ErrorCode::kSuccess, - metrics::CheckResult::kNoUpdateAvailable, - metrics::CheckReaction::kUnset, - metrics::DownloadErrorCode::kUnset, - &response, - nullptr)); + ASSERT_TRUE(TestUpdateCheck()); string value; EXPECT_TRUE(fake_prefs_.GetString(kPrefsOmahaCohort, &value)); @@ -1542,22 +1281,14 @@ TEST_F(OmahaRequestActionTest, CohortsArePersistedWhenNoUpdate) { } TEST_F(OmahaRequestActionTest, MultiAppCohortTest) { - OmahaResponse response; fake_update_response_.multi_app = true; fake_update_response_.include_cohorts = true; fake_update_response_.cohort = "s/154454/8479665"; fake_update_response_.cohorthint = "please-put-me-on-beta"; fake_update_response_.cohortname = "stable"; + tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); - ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(), - -1, - false, // ping_only - ErrorCode::kSuccess, - metrics::CheckResult::kUpdateAvailable, - metrics::CheckReaction::kUpdating, - metrics::DownloadErrorCode::kUnset, - &response, - nullptr)); + ASSERT_TRUE(TestUpdateCheck()); string value; EXPECT_TRUE(fake_prefs_.GetString(kPrefsOmahaCohort, &value)); @@ -1572,7 +1303,6 @@ TEST_F(OmahaRequestActionTest, MultiAppCohortTest) { TEST_F(OmahaRequestActionTest, NoOutputPipeTest) { const string http_response(fake_update_response_.GetNoUpdateResponse()); - brillo::FakeMessageLoop loop(nullptr); loop.SetAsCurrent(); @@ -1595,92 +1325,71 @@ TEST_F(OmahaRequestActionTest, NoOutputPipeTest) { } TEST_F(OmahaRequestActionTest, InvalidXmlTest) { - OmahaResponse response; - ASSERT_FALSE(TestUpdateCheck("invalid xml>", - -1, - false, // ping_only - ErrorCode::kOmahaRequestXMLParseError, - metrics::CheckResult::kParsingError, - metrics::CheckReaction::kUnset, - metrics::DownloadErrorCode::kUnset, - &response, - nullptr)); + tuc_params_.http_response = "invalid xml>"; + tuc_params_.expected_code = ErrorCode::kOmahaRequestXMLParseError; + tuc_params_.expected_check_result = metrics::CheckResult::kParsingError; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; + + ASSERT_FALSE(TestUpdateCheck()); EXPECT_FALSE(response.update_exists); } TEST_F(OmahaRequestActionTest, EmptyResponseTest) { - OmahaResponse response; - ASSERT_FALSE(TestUpdateCheck("", - -1, - false, // ping_only - ErrorCode::kOmahaRequestEmptyResponseError, - metrics::CheckResult::kParsingError, - metrics::CheckReaction::kUnset, - metrics::DownloadErrorCode::kUnset, - &response, - nullptr)); + tuc_params_.expected_code = ErrorCode::kOmahaRequestEmptyResponseError; + tuc_params_.expected_check_result = metrics::CheckResult::kParsingError; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; + + ASSERT_FALSE(TestUpdateCheck()); EXPECT_FALSE(response.update_exists); } TEST_F(OmahaRequestActionTest, MissingStatusTest) { - OmahaResponse response; - ASSERT_FALSE(TestUpdateCheck( + tuc_params_.http_response = "" "" "" "" - "", - -1, - false, // ping_only - ErrorCode::kOmahaResponseInvalid, - metrics::CheckResult::kParsingError, - metrics::CheckReaction::kUnset, - metrics::DownloadErrorCode::kUnset, - &response, - nullptr)); + ""; + tuc_params_.expected_code = ErrorCode::kOmahaResponseInvalid; + tuc_params_.expected_check_result = metrics::CheckResult::kParsingError; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; + + ASSERT_FALSE(TestUpdateCheck()); EXPECT_FALSE(response.update_exists); } TEST_F(OmahaRequestActionTest, InvalidStatusTest) { - OmahaResponse response; - ASSERT_FALSE(TestUpdateCheck( + tuc_params_.http_response = "" "" "" "" - "", - -1, - false, // ping_only - ErrorCode::kOmahaResponseInvalid, - metrics::CheckResult::kParsingError, - metrics::CheckReaction::kUnset, - metrics::DownloadErrorCode::kUnset, - &response, - nullptr)); + ""; + tuc_params_.expected_code = ErrorCode::kOmahaResponseInvalid; + tuc_params_.expected_check_result = metrics::CheckResult::kParsingError; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; + + ASSERT_FALSE(TestUpdateCheck()); EXPECT_FALSE(response.update_exists); } TEST_F(OmahaRequestActionTest, MissingNodesetTest) { - OmahaResponse response; - ASSERT_FALSE(TestUpdateCheck( + tuc_params_.http_response = "" "" "" "" - "", - -1, - false, // ping_only - ErrorCode::kOmahaResponseInvalid, - metrics::CheckResult::kParsingError, - metrics::CheckReaction::kUnset, - metrics::DownloadErrorCode::kUnset, - &response, - nullptr)); + ""; + tuc_params_.expected_code = ErrorCode::kOmahaResponseInvalid; + tuc_params_.expected_check_result = metrics::CheckResult::kParsingError; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; + + ASSERT_FALSE(TestUpdateCheck()); EXPECT_FALSE(response.update_exists); } TEST_F(OmahaRequestActionTest, MissingFieldTest) { - string input_response = + tuc_params_.http_response = "" "" // the appid needs to match that in the request params @@ -1697,18 +1406,9 @@ TEST_F(OmahaRequestActionTest, MissingFieldTest) { "IsDeltaPayload=\"false\" " "sha256=\"not-used\" " "/>"; - LOG(INFO) << "Input Response = " << input_response; - OmahaResponse response; - ASSERT_TRUE(TestUpdateCheck(input_response, - -1, - false, // ping_only - ErrorCode::kSuccess, - metrics::CheckResult::kUpdateAvailable, - metrics::CheckReaction::kUpdating, - metrics::DownloadErrorCode::kUnset, - &response, - nullptr)); + ASSERT_TRUE(TestUpdateCheck()); + EXPECT_TRUE(response.update_exists); EXPECT_EQ("10.2.3.4", response.version); EXPECT_EQ("http://missing/field/test/f", @@ -1757,9 +1457,7 @@ TEST_F(OmahaRequestActionTest, TerminateTransferTest) { } TEST_F(OmahaRequestActionTest, XmlEncodeIsUsedForParams) { - brillo::Blob post_data; - - // Make sure XML Encode is being called on the params + // Make sure XML Encode is being called on the params. request_params_.set_os_sp("testtheservice_pack>"); request_params_.set_os_board("x86 generic(100, "My spoon is too big."), " ")); - OmahaResponse response; - ASSERT_FALSE(TestUpdateCheck("invalid xml>", - -1, - false, // ping_only - ErrorCode::kOmahaRequestXMLParseError, - metrics::CheckResult::kParsingError, - metrics::CheckReaction::kUnset, - metrics::DownloadErrorCode::kUnset, - &response, - &post_data)); - // convert post_data to string - string post_str(post_data.begin(), post_data.end()); + tuc_params_.http_response = "invalid xml>"; + tuc_params_.expected_code = ErrorCode::kOmahaRequestXMLParseError; + tuc_params_.expected_check_result = metrics::CheckResult::kParsingError; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; + + ASSERT_FALSE(TestUpdateCheck()); + EXPECT_NE(string::npos, post_str.find("testtheservice_pack>")); EXPECT_EQ(string::npos, post_str.find("testtheservice_pack>")); EXPECT_NE(string::npos, post_str.find("x86 generic<id")); @@ -1799,19 +1492,12 @@ TEST_F(OmahaRequestActionTest, XmlEncodeIsUsedForParams) { } TEST_F(OmahaRequestActionTest, XmlDecodeTest) { - OmahaResponse response; fake_update_response_.deadline = "<20110101"; fake_update_response_.more_info_url = "testthe<url"; fake_update_response_.codebase = "testthe&codebase/"; - ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(), - -1, - false, // ping_only - ErrorCode::kSuccess, - metrics::CheckResult::kUpdateAvailable, - metrics::CheckReaction::kUpdating, - metrics::DownloadErrorCode::kUnset, - &response, - nullptr)); + tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); + + ASSERT_TRUE(TestUpdateCheck()); EXPECT_EQ("testthe prefs; fake_system_state_.set_prefs(&prefs); + tuc_params_.http_response = "invalid xml>"; + tuc_params_.expected_code = ErrorCode::kOmahaRequestXMLParseError; + tuc_params_.expected_check_result = metrics::CheckResult::kParsingError; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; EXPECT_CALL(prefs, GetString(kPrefsPreviousVersion, _)) .WillOnce(DoAll(SetArgPointee<1>(string("")), Return(true))); // An existing but empty previous version means that we didn't reboot to a new // update, therefore, no need to update the previous version. EXPECT_CALL(prefs, SetString(kPrefsPreviousVersion, _)).Times(0); - ASSERT_FALSE(TestUpdateCheck("invalid xml>", - -1, - false, // ping_only - ErrorCode::kOmahaRequestXMLParseError, - metrics::CheckResult::kParsingError, - metrics::CheckReaction::kUnset, - metrics::DownloadErrorCode::kUnset, - nullptr, // response - &post_data)); - // convert post_data to string - string post_str(post_data.begin(), post_data.end()); + ASSERT_FALSE(TestUpdateCheck()); + EXPECT_NE( post_str.find(" \n" " \n"), @@ -1870,12 +1542,9 @@ TEST_F(OmahaRequestActionTest, FormatUpdateCheckOutputTest) { } TEST_F(OmahaRequestActionTest, FormatSuccessEventOutputTest) { - brillo::Blob post_data; TestEvent(new OmahaEvent(OmahaEvent::kTypeUpdateDownloadStarted), - "invalid xml>", - &post_data); - // convert post_data to string - string post_str(post_data.begin(), post_data.end()); + "invalid xml>"); + string expected_event = base::StringPrintf( " \n", OmahaEvent::kTypeUpdateDownloadStarted, @@ -1886,14 +1555,11 @@ TEST_F(OmahaRequestActionTest, FormatSuccessEventOutputTest) { } TEST_F(OmahaRequestActionTest, FormatErrorEventOutputTest) { - brillo::Blob post_data; TestEvent(new OmahaEvent(OmahaEvent::kTypeDownloadComplete, OmahaEvent::kResultError, ErrorCode::kError), - "invalid xml>", - &post_data); - // convert post_data to string - string post_str(post_data.begin(), post_data.end()); + "invalid xml>"); + string expected_event = base::StringPrintf( " \n", @@ -1924,24 +1590,17 @@ TEST_F(OmahaRequestActionTest, IsEventTest) { } TEST_F(OmahaRequestActionTest, FormatDeltaOkayOutputTest) { + tuc_params_.http_response = "invalid xml>"; + tuc_params_.expected_code = ErrorCode::kOmahaRequestXMLParseError; + tuc_params_.expected_check_result = metrics::CheckResult::kParsingError; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; + for (int i = 0; i < 2; i++) { bool delta_okay = i == 1; const char* delta_okay_str = delta_okay ? "true" : "false"; - brillo::Blob post_data; - request_params_.set_delta_okay(delta_okay); - ASSERT_FALSE(TestUpdateCheck("invalid xml>", - -1, - false, // ping_only - ErrorCode::kOmahaRequestXMLParseError, - metrics::CheckResult::kParsingError, - metrics::CheckReaction::kUnset, - metrics::DownloadErrorCode::kUnset, - nullptr, - &post_data)); - // convert post_data to string - string post_str(post_data.begin(), post_data.end()); + ASSERT_FALSE(TestUpdateCheck()); EXPECT_NE( post_str.find(base::StringPrintf(" delta_okay=\"%s\"", delta_okay_str)), string::npos) @@ -1950,25 +1609,17 @@ TEST_F(OmahaRequestActionTest, FormatDeltaOkayOutputTest) { } TEST_F(OmahaRequestActionTest, FormatInteractiveOutputTest) { + tuc_params_.http_response = "invalid xml>"; + tuc_params_.expected_code = ErrorCode::kOmahaRequestXMLParseError; + tuc_params_.expected_check_result = metrics::CheckResult::kParsingError; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; + for (int i = 0; i < 2; i++) { bool interactive = i == 1; const char* interactive_str = interactive ? "ondemandupdate" : "scheduler"; - brillo::Blob post_data; - FakeSystemState fake_system_state; - request_params_.set_interactive(interactive); - ASSERT_FALSE(TestUpdateCheck("invalid xml>", - -1, - false, // ping_only - ErrorCode::kOmahaRequestXMLParseError, - metrics::CheckResult::kParsingError, - metrics::CheckReaction::kUnset, - metrics::DownloadErrorCode::kUnset, - nullptr, - &post_data)); - // convert post_data to string - string post_str(post_data.begin(), post_data.end()); + ASSERT_FALSE(TestUpdateCheck()); EXPECT_NE(post_str.find( base::StringPrintf("installsource=\"%s\"", interactive_str)), string::npos) @@ -1977,25 +1628,17 @@ TEST_F(OmahaRequestActionTest, FormatInteractiveOutputTest) { } TEST_F(OmahaRequestActionTest, FormatTargetVersionPrefixOutputTest) { + tuc_params_.http_response = "invalid xml>"; + tuc_params_.expected_code = ErrorCode::kOmahaRequestXMLParseError; + tuc_params_.expected_check_result = metrics::CheckResult::kParsingError; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; + for (int i = 0; i < 2; i++) { bool target_version_set = i == 1; const char* target_version_prefix = target_version_set ? "10032." : ""; - brillo::Blob post_data; - FakeSystemState fake_system_state; - request_params_.set_target_version_prefix(target_version_prefix); - ASSERT_FALSE(TestUpdateCheck("invalid xml>", - -1, - false, // ping_only - ErrorCode::kOmahaRequestXMLParseError, - metrics::CheckResult::kParsingError, - metrics::CheckReaction::kUnset, - metrics::DownloadErrorCode::kUnset, - nullptr, - &post_data)); - // convert post_data to string - string post_str(post_data.begin(), post_data.end()); + ASSERT_FALSE(TestUpdateCheck()); if (target_version_set) { EXPECT_NE(post_str.find(""), string::npos) @@ -2008,27 +1651,19 @@ TEST_F(OmahaRequestActionTest, FormatTargetVersionPrefixOutputTest) { } TEST_F(OmahaRequestActionTest, FormatRollbackAllowedOutputTest) { + tuc_params_.http_response = "invalid xml>"; + tuc_params_.expected_code = ErrorCode::kOmahaRequestXMLParseError; + tuc_params_.expected_check_result = metrics::CheckResult::kParsingError; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; + for (int i = 0; i < 4; i++) { bool rollback_allowed = i / 2 == 0; bool target_version_set = i % 2 == 0; - brillo::Blob post_data; - FakeSystemState fake_system_state; - request_params_.set_target_version_prefix(target_version_set ? "10032." : ""); request_params_.set_rollback_allowed(rollback_allowed); - ASSERT_FALSE(TestUpdateCheck("invalid xml>", - -1, - false, // ping_only - ErrorCode::kOmahaRequestXMLParseError, - metrics::CheckResult::kParsingError, - metrics::CheckReaction::kUnset, - metrics::DownloadErrorCode::kUnset, - nullptr, - &post_data)); - // convert post_data to string - string post_str(post_data.begin(), post_data.end()); + ASSERT_FALSE(TestUpdateCheck()); if (rollback_allowed && target_version_set) { EXPECT_NE(post_str.find("rollback_allowed=\"true\""), string::npos) << "i = " << i; @@ -2074,17 +1709,14 @@ void OmahaRequestActionTest::PingTest(bool ping_only) { .WillOnce(DoAll(SetArgPointee<1>(six_days_ago), Return(true))); EXPECT_CALL(prefs, GetInt64(kPrefsLastRollCallPingDay, _)) .WillOnce(DoAll(SetArgPointee<1>(five_days_ago), Return(true))); - brillo::Blob post_data; - ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(), - -1, - ping_only, - ErrorCode::kSuccess, - metrics::CheckResult::kNoUpdateAvailable, - metrics::CheckReaction::kUnset, - metrics::DownloadErrorCode::kUnset, - nullptr, - &post_data)); - string post_str(post_data.begin(), post_data.end()); + + tuc_params_.http_response = fake_update_response_.GetNoUpdateResponse(); + tuc_params_.ping_only = ping_only; + tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; + + ASSERT_TRUE(TestUpdateCheck()); + EXPECT_NE(post_str.find(""), string::npos); if (ping_only) { @@ -2119,17 +1751,13 @@ TEST_F(OmahaRequestActionTest, ActivePingTest) { .WillOnce(DoAll(SetArgPointee<1>(three_days_ago), Return(true))); EXPECT_CALL(prefs, GetInt64(kPrefsLastRollCallPingDay, _)) .WillOnce(DoAll(SetArgPointee<1>(now), Return(true))); - brillo::Blob post_data; - ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(), - -1, - false, // ping_only - ErrorCode::kSuccess, - metrics::CheckResult::kNoUpdateAvailable, - metrics::CheckReaction::kUnset, - metrics::DownloadErrorCode::kUnset, - nullptr, - &post_data)); - string post_str(post_data.begin(), post_data.end()); + + tuc_params_.http_response = fake_update_response_.GetNoUpdateResponse(); + tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; + + ASSERT_TRUE(TestUpdateCheck()); + EXPECT_NE(post_str.find(""), string::npos); } @@ -2148,17 +1776,13 @@ TEST_F(OmahaRequestActionTest, RollCallPingTest) { .WillOnce(DoAll(SetArgPointee<1>(now), Return(true))); EXPECT_CALL(prefs, GetInt64(kPrefsLastRollCallPingDay, _)) .WillOnce(DoAll(SetArgPointee<1>(four_days_ago), Return(true))); - brillo::Blob post_data; - ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(), - -1, - false, // ping_only - ErrorCode::kSuccess, - metrics::CheckResult::kNoUpdateAvailable, - metrics::CheckReaction::kUnset, - metrics::DownloadErrorCode::kUnset, - nullptr, - &post_data)); - string post_str(post_data.begin(), post_data.end()); + + tuc_params_.http_response = fake_update_response_.GetNoUpdateResponse(); + tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; + + ASSERT_TRUE(TestUpdateCheck()); + EXPECT_NE(post_str.find("\n"), string::npos); } @@ -2183,17 +1807,13 @@ TEST_F(OmahaRequestActionTest, NoPingTest) { .WillOnce(Return(true)); EXPECT_CALL(prefs, SetInt64(kPrefsLastRollCallPingDay, _)) .WillOnce(Return(true)); - brillo::Blob post_data; - ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(), - -1, - false, // ping_only - ErrorCode::kSuccess, - metrics::CheckResult::kNoUpdateAvailable, - metrics::CheckReaction::kUnset, - metrics::DownloadErrorCode::kUnset, - nullptr, - &post_data)); - string post_str(post_data.begin(), post_data.end()); + + tuc_params_.http_response = fake_update_response_.GetNoUpdateResponse(); + tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; + + ASSERT_TRUE(TestUpdateCheck()); + EXPECT_EQ(post_str.find("ping"), string::npos); } @@ -2208,17 +1828,14 @@ TEST_F(OmahaRequestActionTest, IgnoreEmptyPingTest) { .WillOnce(DoAll(SetArgPointee<1>(now), Return(true))); EXPECT_CALL(prefs, SetInt64(kPrefsLastActivePingDay, _)).Times(0); EXPECT_CALL(prefs, SetInt64(kPrefsLastRollCallPingDay, _)).Times(0); - brillo::Blob post_data; - EXPECT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(), - -1, - true, // ping_only - ErrorCode::kSuccess, - metrics::CheckResult::kUnset, - metrics::CheckReaction::kUnset, - metrics::DownloadErrorCode::kUnset, - nullptr, - &post_data)); - EXPECT_EQ(0U, post_data.size()); + + tuc_params_.http_response = fake_update_response_.GetNoUpdateResponse(); + tuc_params_.ping_only = true; + tuc_params_.expected_check_result = metrics::CheckResult::kUnset; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; + + EXPECT_TRUE(TestUpdateCheck()); + EXPECT_TRUE(post_str.empty()); } TEST_F(OmahaRequestActionTest, BackInTimePingTest) { @@ -2239,21 +1856,16 @@ TEST_F(OmahaRequestActionTest, BackInTimePingTest) { .WillOnce(Return(true)); EXPECT_CALL(prefs, SetInt64(kPrefsLastRollCallPingDay, _)) .WillOnce(Return(true)); - brillo::Blob post_data; - ASSERT_TRUE( - TestUpdateCheck("" - "" - "", - -1, - false, // ping_only - ErrorCode::kSuccess, - metrics::CheckResult::kNoUpdateAvailable, - metrics::CheckReaction::kUnset, - metrics::DownloadErrorCode::kUnset, - nullptr, - &post_data)); - string post_str(post_data.begin(), post_data.end()); + + tuc_params_.http_response = + "" + "" + ""; + tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; + + ASSERT_TRUE(TestUpdateCheck()); EXPECT_EQ(post_str.find("ping"), string::npos); } @@ -2278,19 +1890,16 @@ TEST_F(OmahaRequestActionTest, LastPingDayUpdateTest) { SetInt64(kPrefsLastRollCallPingDay, AllOf(Ge(midnight), Le(midnight_slack)))) .WillOnce(Return(true)); - ASSERT_TRUE( - TestUpdateCheck("" - "" - "", - -1, - false, // ping_only - ErrorCode::kSuccess, - metrics::CheckResult::kNoUpdateAvailable, - metrics::CheckReaction::kUnset, - metrics::DownloadErrorCode::kUnset, - nullptr, - nullptr)); + + tuc_params_.http_response = + "" + "" + ""; + tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; + + ASSERT_TRUE(TestUpdateCheck()); } TEST_F(OmahaRequestActionTest, NoElapsedSecondsTest) { @@ -2300,19 +1909,16 @@ TEST_F(OmahaRequestActionTest, NoElapsedSecondsTest) { EXPECT_CALL(prefs, SetInt64(_, _)).Times(AnyNumber()); EXPECT_CALL(prefs, SetInt64(kPrefsLastActivePingDay, _)).Times(0); EXPECT_CALL(prefs, SetInt64(kPrefsLastRollCallPingDay, _)).Times(0); - ASSERT_TRUE( - TestUpdateCheck("" - "" - "", - -1, - false, // ping_only - ErrorCode::kSuccess, - metrics::CheckResult::kNoUpdateAvailable, - metrics::CheckReaction::kUnset, - metrics::DownloadErrorCode::kUnset, - nullptr, - nullptr)); + + tuc_params_.http_response = + "" + "" + ""; + tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; + + ASSERT_TRUE(TestUpdateCheck()); } TEST_F(OmahaRequestActionTest, BadElapsedSecondsTest) { @@ -2322,38 +1928,31 @@ TEST_F(OmahaRequestActionTest, BadElapsedSecondsTest) { EXPECT_CALL(prefs, SetInt64(_, _)).Times(AnyNumber()); EXPECT_CALL(prefs, SetInt64(kPrefsLastActivePingDay, _)).Times(0); EXPECT_CALL(prefs, SetInt64(kPrefsLastRollCallPingDay, _)).Times(0); - ASSERT_TRUE( - TestUpdateCheck("" - "" - "", - -1, - false, // ping_only - ErrorCode::kSuccess, - metrics::CheckResult::kNoUpdateAvailable, - metrics::CheckReaction::kUnset, - metrics::DownloadErrorCode::kUnset, - nullptr, - nullptr)); + + tuc_params_.http_response = + "" + "" + ""; + tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; + + ASSERT_TRUE(TestUpdateCheck()); } TEST_F(OmahaRequestActionTest, ParseUpdateCheckAttributesTest) { // Test that the "eol" flags is only parsed from the "_eol" attribute and not // the "eol" attribute. - ASSERT_TRUE( - TestUpdateCheck("" - "" - "", - -1, - false, // ping_only - ErrorCode::kSuccess, - metrics::CheckResult::kNoUpdateAvailable, - metrics::CheckReaction::kUnset, - metrics::DownloadErrorCode::kUnset, - nullptr, - nullptr)); + tuc_params_.http_response = + "" + ""; + tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; + + ASSERT_TRUE(TestUpdateCheck()); + string eol_pref; EXPECT_TRUE( fake_system_state_.prefs()->GetString(kPrefsOmahaEolStatus, &eol_pref)); @@ -2363,56 +1962,48 @@ TEST_F(OmahaRequestActionTest, ParseUpdateCheckAttributesTest) { } TEST_F(OmahaRequestActionTest, NoUniqueIDTest) { - brillo::Blob post_data; - ASSERT_FALSE(TestUpdateCheck("invalid xml>", - -1, - false, // ping_only - ErrorCode::kOmahaRequestXMLParseError, - metrics::CheckResult::kParsingError, - metrics::CheckReaction::kUnset, - metrics::DownloadErrorCode::kUnset, - nullptr, // response - &post_data)); - // convert post_data to string - string post_str(post_data.begin(), post_data.end()); + tuc_params_.http_response = "invalid xml>"; + tuc_params_.expected_code = ErrorCode::kOmahaRequestXMLParseError; + tuc_params_.expected_check_result = metrics::CheckResult::kParsingError; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; + + ASSERT_FALSE(TestUpdateCheck()); + EXPECT_EQ(post_str.find("machineid="), string::npos); EXPECT_EQ(post_str.find("userid="), string::npos); } TEST_F(OmahaRequestActionTest, NetworkFailureTest) { - OmahaResponse response; const int http_error_code = static_cast(ErrorCode::kOmahaRequestHTTPResponseBase) + 501; - ASSERT_FALSE(TestUpdateCheck("", - 501, - false, // ping_only - static_cast(http_error_code), - metrics::CheckResult::kDownloadError, - metrics::CheckReaction::kUnset, - static_cast(501), - &response, - nullptr)); + tuc_params_.fail_http_response_code = 501; + tuc_params_.expected_code = static_cast(http_error_code); + tuc_params_.expected_check_result = metrics::CheckResult::kDownloadError; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; + tuc_params_.expected_download_error_code = + static_cast(501); + + ASSERT_FALSE(TestUpdateCheck()); + EXPECT_FALSE(response.update_exists); } TEST_F(OmahaRequestActionTest, NetworkFailureBadHTTPCodeTest) { - OmahaResponse response; const int http_error_code = static_cast(ErrorCode::kOmahaRequestHTTPResponseBase) + 999; - ASSERT_FALSE(TestUpdateCheck("", - 1500, - false, // ping_only - static_cast(http_error_code), - metrics::CheckResult::kDownloadError, - metrics::CheckReaction::kUnset, - metrics::DownloadErrorCode::kHttpStatusOther, - &response, - nullptr)); + + tuc_params_.fail_http_response_code = 1500; + tuc_params_.expected_code = static_cast(http_error_code); + tuc_params_.expected_check_result = metrics::CheckResult::kDownloadError; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; + tuc_params_.expected_download_error_code = + metrics::DownloadErrorCode::kHttpStatusOther; + + ASSERT_FALSE(TestUpdateCheck()); EXPECT_FALSE(response.update_exists); } TEST_F(OmahaRequestActionTest, TestUpdateFirstSeenAtGetsPersistedFirstTime) { - OmahaResponse response; request_params_.set_wall_clock_based_wait_enabled(true); request_params_.set_waiting_period(TimeDelta().FromDays(1)); request_params_.set_update_check_count_wait_enabled(false); @@ -2420,15 +2011,12 @@ TEST_F(OmahaRequestActionTest, TestUpdateFirstSeenAtGetsPersistedFirstTime) { Time arbitrary_date; ASSERT_TRUE(Time::FromString("6/4/1989", &arbitrary_date)); fake_system_state_.fake_clock()->SetWallclockTime(arbitrary_date); - ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(), - -1, - false, // ping_only - ErrorCode::kOmahaUpdateDeferredPerPolicy, - metrics::CheckResult::kUpdateAvailable, - metrics::CheckReaction::kDeferring, - metrics::DownloadErrorCode::kUnset, - &response, - nullptr)); + + tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); + tuc_params_.expected_code = ErrorCode::kOmahaUpdateDeferredPerPolicy; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kDeferring; + + ASSERT_FALSE(TestUpdateCheck()); int64_t timestamp = 0; ASSERT_TRUE(fake_prefs_.GetInt64(kPrefsUpdateFirstSeenAt, ×tamp)); @@ -2437,20 +2025,14 @@ TEST_F(OmahaRequestActionTest, TestUpdateFirstSeenAtGetsPersistedFirstTime) { // Verify if we are interactive check we don't defer. request_params_.set_interactive(true); - ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(), - -1, - false, // ping_only - ErrorCode::kSuccess, - metrics::CheckResult::kUpdateAvailable, - metrics::CheckReaction::kUpdating, - metrics::DownloadErrorCode::kUnset, - &response, - nullptr)); + tuc_params_.expected_code = ErrorCode::kSuccess; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kUpdating; + + ASSERT_TRUE(TestUpdateCheck()); EXPECT_TRUE(response.update_exists); } TEST_F(OmahaRequestActionTest, TestUpdateFirstSeenAtGetsUsedIfAlreadyPresent) { - OmahaResponse response; request_params_.set_wall_clock_based_wait_enabled(true); request_params_.set_waiting_period(TimeDelta().FromDays(1)); request_params_.set_update_check_count_wait_enabled(false); @@ -2461,16 +2043,10 @@ TEST_F(OmahaRequestActionTest, TestUpdateFirstSeenAtGetsUsedIfAlreadyPresent) { ASSERT_TRUE( fake_prefs_.SetInt64(kPrefsUpdateFirstSeenAt, t1.ToInternalValue())); fake_system_state_.fake_clock()->SetWallclockTime(t2); - ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(), - -1, - false, // ping_only - ErrorCode::kSuccess, - metrics::CheckResult::kUpdateAvailable, - metrics::CheckReaction::kUpdating, - metrics::DownloadErrorCode::kUnset, - &response, - nullptr)); + tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); + + ASSERT_TRUE(TestUpdateCheck()); EXPECT_TRUE(response.update_exists); // Make sure the timestamp t1 is unchanged showing that it was reused. @@ -2484,7 +2060,6 @@ TEST_F(OmahaRequestActionTest, TestChangingToMoreStableChannel) { base::ScopedTempDir tempdir; ASSERT_TRUE(tempdir.CreateUniqueTempDir()); - brillo::Blob post_data; request_params_.set_root(tempdir.GetPath().value()); request_params_.set_app_id("{22222222-2222-2222-2222-222222222222}"); request_params_.set_app_version("1.2.3.4"); @@ -2494,17 +2069,14 @@ TEST_F(OmahaRequestActionTest, TestChangingToMoreStableChannel) { request_params_.SetTargetChannel("stable-channel", true, nullptr)); request_params_.UpdateDownloadChannel(); EXPECT_TRUE(request_params_.ShouldPowerwash()); - ASSERT_FALSE(TestUpdateCheck("invalid xml>", - -1, - false, // ping_only - ErrorCode::kOmahaRequestXMLParseError, - metrics::CheckResult::kParsingError, - metrics::CheckReaction::kUnset, - metrics::DownloadErrorCode::kUnset, - nullptr, // response - &post_data)); - // convert post_data to string - string post_str(post_data.begin(), post_data.end()); + + tuc_params_.http_response = "invalid xml>"; + tuc_params_.expected_code = ErrorCode::kOmahaRequestXMLParseError; + tuc_params_.expected_check_result = metrics::CheckResult::kParsingError; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; + + ASSERT_FALSE(TestUpdateCheck()); + EXPECT_NE( string::npos, post_str.find("appid=\"{22222222-2222-2222-2222-222222222222}\" " @@ -2518,7 +2090,6 @@ TEST_F(OmahaRequestActionTest, TestChangingToLessStableChannel) { base::ScopedTempDir tempdir; ASSERT_TRUE(tempdir.CreateUniqueTempDir()); - brillo::Blob post_data; request_params_.set_root(tempdir.GetPath().value()); request_params_.set_app_id("{11111111-1111-1111-1111-111111111111}"); request_params_.set_app_version("5.6.7.8"); @@ -2528,17 +2099,14 @@ TEST_F(OmahaRequestActionTest, TestChangingToLessStableChannel) { request_params_.SetTargetChannel("canary-channel", false, nullptr)); request_params_.UpdateDownloadChannel(); EXPECT_FALSE(request_params_.ShouldPowerwash()); - ASSERT_FALSE(TestUpdateCheck("invalid xml>", - -1, - false, // ping_only - ErrorCode::kOmahaRequestXMLParseError, - metrics::CheckResult::kParsingError, - metrics::CheckReaction::kUnset, - metrics::DownloadErrorCode::kUnset, - nullptr, // response - &post_data)); - // Convert post_data to string. - string post_str(post_data.begin(), post_data.end()); + + tuc_params_.http_response = "invalid xml>"; + tuc_params_.expected_code = ErrorCode::kOmahaRequestXMLParseError; + tuc_params_.expected_check_result = metrics::CheckResult::kParsingError; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; + + ASSERT_FALSE(TestUpdateCheck()); + EXPECT_NE( string::npos, post_str.find("appid=\"{11111111-1111-1111-1111-111111111111}\" " @@ -2555,19 +2123,13 @@ TEST_F(OmahaRequestActionTest, PingWhenPowerwashed) { // Flag that the device was powerwashed in the past. fake_system_state_.fake_hardware()->SetPowerwashCount(1); + tuc_params_.http_response = fake_update_response_.GetNoUpdateResponse(); + tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; + + ASSERT_TRUE(TestUpdateCheck()); - brillo::Blob post_data; - ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(), - -1, - false, // ping_only - ErrorCode::kSuccess, - metrics::CheckResult::kNoUpdateAvailable, - metrics::CheckReaction::kUnset, - metrics::DownloadErrorCode::kUnset, - nullptr, - &post_data)); // We shouldn't send a ping in this case since powerwash > 0. - string post_str(post_data.begin(), post_data.end()); EXPECT_EQ(string::npos, post_str.find("SetFirstActiveOmahaPingSent(); - brillo::Blob post_data; - ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(), - -1, - false, // ping_only - ErrorCode::kSuccess, - metrics::CheckResult::kNoUpdateAvailable, - metrics::CheckReaction::kUnset, - metrics::DownloadErrorCode::kUnset, - nullptr, - &post_data)); + tuc_params_.http_response = fake_update_response_.GetNoUpdateResponse(); + tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; + + ASSERT_TRUE(TestUpdateCheck()); + // We shouldn't send a ping in this case since // first_active_omaha_ping_sent=true - string post_str(post_data.begin(), post_data.end()); EXPECT_EQ(string::npos, post_str.find("::max(), response.past_rollback_key_version.firmware_key); @@ -3166,7 +2694,6 @@ TEST_F(OmahaRequestActionTest, PastRollbackVersionsNoEntries) { } TEST_F(OmahaRequestActionTest, PastRollbackVersionsValidEntries) { - OmahaResponse response; request_params_.set_rollback_allowed_milestones(4); fake_update_response_.rollback = true; fake_update_response_.rollback_allowed_milestones = 4; @@ -3174,10 +2701,14 @@ TEST_F(OmahaRequestActionTest, PastRollbackVersionsValidEntries) { fake_update_response_.rollback_kernel_version = "2.1"; fake_update_response_.past_rollback_key_version = std::make_pair("16.15", "14.13"); - TestRollbackCheck(false /* is_consumer_device */, - 4 /* rollback_allowed_milestones */, - true /* is_policy_loaded */, - &response); + fake_update_response_.deadline = "20101020"; + tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); + tuc_params_.is_consumer_device = false; + tuc_params_.rollback_allowed_milestones = 4; + tuc_params_.is_policy_loaded = true; + + EXPECT_TRUE(TestUpdateCheck()); + EXPECT_TRUE(response.update_exists); EXPECT_TRUE(response.is_rollback); EXPECT_EQ(16, response.past_rollback_key_version.firmware_key); EXPECT_EQ(15, response.past_rollback_key_version.firmware); @@ -3186,20 +2717,22 @@ TEST_F(OmahaRequestActionTest, PastRollbackVersionsValidEntries) { } TEST_F(OmahaRequestActionTest, MismatchNumberOfVersions) { - OmahaResponse response; fake_update_response_.rollback = true; fake_update_response_.rollback_allowed_milestones = 2; + fake_update_response_.deadline = "20101020"; request_params_.set_rollback_allowed_milestones(4); // Since |request_params_.rollback_allowed_milestones| is 4 but the response // is constructed with |fake_update_response_.rollback_allowed_milestones| set // to 2, OmahaRequestAction will look for the key values of N-4 version but // only the N-2 version will exist. + tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); + tuc_params_.is_consumer_device = false; + tuc_params_.rollback_allowed_milestones = 2; + tuc_params_.is_policy_loaded = true; - TestRollbackCheck(false /* is_consumer_device */, - 2 /* rollback_allowed_milestones */, - true /* is_policy_loaded */, - &response); + EXPECT_TRUE(TestUpdateCheck()); + EXPECT_TRUE(response.update_exists); EXPECT_TRUE(response.is_rollback); EXPECT_EQ(std::numeric_limits::max(), response.past_rollback_key_version.firmware_key); From 84634dc7ae13a4e887937bded8f9486a2869e73c Mon Sep 17 00:00:00 2001 From: Alex Khouderchah Date: Thu, 4 Apr 2019 09:25:39 -0700 Subject: [PATCH 014/624] update_engine: Only use CAPATH for certificate verification libcurl has the sometimes unexpected behavior that CAINFO will always be preferred over CAPATH for certificate verification when non-null. In this case, that means root certificates in both /etc/ssl/certs and /usr/share/chromeos-ca-certificates will be used when only root certificates in the latter directory are expected to be used. Setting CAINFO to null will ensure that only CAPATH is used. BUG=chromium:949426 TEST=-All unit tests are passing. -Pre-CQ is passing. Change-Id: I7ae231881ab47353c8f4637ce48f69c834fc307f Reviewed-on: https://chromium-review.googlesource.com/1553463 Commit-Ready: ChromeOS CL Exonerator Bot Tested-by: Alex Khouderchah Reviewed-by: Ben Chan Reviewed-by: Xiaochu Liu --- libcurl_http_fetcher.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/libcurl_http_fetcher.cc b/libcurl_http_fetcher.cc index ce3475d4..4e33671f 100644 --- a/libcurl_http_fetcher.cc +++ b/libcurl_http_fetcher.cc @@ -305,6 +305,7 @@ void LibcurlHttpFetcher::SetCurlOptionsForHttps() { LOG(INFO) << "Setting up curl options for HTTPS"; CHECK_EQ(curl_easy_setopt(curl_handle_, CURLOPT_SSL_VERIFYPEER, 1), CURLE_OK); CHECK_EQ(curl_easy_setopt(curl_handle_, CURLOPT_SSL_VERIFYHOST, 2), CURLE_OK); + CHECK_EQ(curl_easy_setopt(curl_handle_, CURLOPT_CAINFO, nullptr), CURLE_OK); CHECK_EQ(curl_easy_setopt( curl_handle_, CURLOPT_CAPATH, constants::kCACertificatesPath), CURLE_OK); From 28def4f0c85de00c3602c99c9131abab7f9f1874 Mon Sep 17 00:00:00 2001 From: Zentaro Kavanagh Date: Tue, 15 Jan 2019 17:15:01 -0800 Subject: [PATCH 015/624] update_engine: Place enterprise rollback save marker file - There are now two types of rollback. One that does a full powerwash and one that save some system state. - When an enterprise rollback powerwash is scheduled, place a marker file to tell the shutdown process to save data before rebooting. - This lets rollback preserve additional data over a powerwash that can be restored later. - Change a few places that were using is_rollback to save_rollback_data to be explicit. BUG=chromium:955463 TEST=unittests Change-Id: I9f18319e711e425a6e712dd319e03bcc6ddd0a1b Reviewed-on: https://chromium-review.googlesource.com/1414030 Commit-Ready: ChromeOS CL Exonerator Bot Tested-by: Zentaro Kavanagh Reviewed-by: Amin Hassani --- common/fake_hardware.h | 10 ++-- common/hardware_interface.h | 6 +-- hardware_android.cc | 5 +- hardware_android.h | 2 +- hardware_chromeos.cc | 28 ++++++++-- hardware_chromeos.h | 2 +- mock_update_attempter.h | 3 +- omaha_request_params.h | 13 +++++ omaha_response_handler_action.cc | 2 + payload_consumer/install_plan.h | 3 ++ payload_consumer/postinstall_runner_action.cc | 9 +++- .../postinstall_runner_action_unittest.cc | 53 ++++++++++++++----- update_attempter.cc | 8 +++ update_attempter.h | 2 + update_attempter_unittest.cc | 36 ++++++++----- update_manager/android_things_policy.cc | 1 + .../enterprise_device_policy_impl.cc | 7 ++- update_manager/policy.h | 2 + 18 files changed, 144 insertions(+), 48 deletions(-) diff --git a/common/fake_hardware.h b/common/fake_hardware.h index 3e5a66e6..53b2dd5d 100644 --- a/common/fake_hardware.h +++ b/common/fake_hardware.h @@ -104,15 +104,15 @@ class FakeHardware : public HardwareInterface { int GetPowerwashCount() const override { return powerwash_count_; } - bool SchedulePowerwash(bool is_rollback) override { + bool SchedulePowerwash(bool save_rollback_data) override { powerwash_scheduled_ = true; - is_rollback_powerwash_ = is_rollback; + save_rollback_data_ = save_rollback_data; return true; } bool CancelPowerwash() override { powerwash_scheduled_ = false; - is_rollback_powerwash_ = false; + save_rollback_data_ = false; return true; } @@ -193,7 +193,7 @@ class FakeHardware : public HardwareInterface { int GetMaxKernelKeyRollforward() const { return kernel_max_rollforward_; } bool GetIsRollbackPowerwashScheduled() const { - return powerwash_scheduled_ && is_rollback_powerwash_; + return powerwash_scheduled_ && save_rollback_data_; } private: @@ -213,7 +213,7 @@ class FakeHardware : public HardwareInterface { int firmware_max_rollforward_{kFirmwareMaxRollforward}; int powerwash_count_{kPowerwashCountNotSet}; bool powerwash_scheduled_{false}; - bool is_rollback_powerwash_{false}; + bool save_rollback_data_{false}; int64_t build_timestamp_{0}; bool first_active_omaha_ping_sent_{false}; diff --git a/common/hardware_interface.h b/common/hardware_interface.h index 01405881..6c53540c 100644 --- a/common/hardware_interface.h +++ b/common/hardware_interface.h @@ -102,9 +102,9 @@ class HardwareInterface { virtual int GetPowerwashCount() const = 0; // Signals that a powerwash (stateful partition wipe) should be performed - // after reboot. If |is_rollback| is true additional state is preserved - // during shutdown that can be restored after the powerwash. - virtual bool SchedulePowerwash(bool is_rollback) = 0; + // after reboot. If |save_rollback_data| is true additional state is + // preserved during shutdown that can be restored after the powerwash. + virtual bool SchedulePowerwash(bool save_rollback_data) = 0; // Cancel the powerwash operation scheduled to be performed on next boot. virtual bool CancelPowerwash() = 0; diff --git a/hardware_android.cc b/hardware_android.cc index 21d46595..80c7757a 100644 --- a/hardware_android.cc +++ b/hardware_android.cc @@ -152,9 +152,10 @@ int HardwareAndroid::GetPowerwashCount() const { return 0; } -bool HardwareAndroid::SchedulePowerwash(bool is_rollback) { +bool HardwareAndroid::SchedulePowerwash(bool save_rollback_data) { LOG(INFO) << "Scheduling a powerwash to BCB."; - LOG_IF(WARNING, is_rollback) << "is_rollback was true but isn't supported."; + LOG_IF(WARNING, save_rollback_data) << "save_rollback_data was true but " + << "isn't supported."; string err; if (!update_bootloader_message({"--wipe_data", "--reason=wipe_data_from_ota"}, &err)) { diff --git a/hardware_android.h b/hardware_android.h index 5b3c99d8..c59a152b 100644 --- a/hardware_android.h +++ b/hardware_android.h @@ -48,7 +48,7 @@ class HardwareAndroid final : public HardwareInterface { bool SetMaxFirmwareKeyRollforward(int firmware_max_rollforward) override; bool SetMaxKernelKeyRollforward(int kernel_max_rollforward) override; int GetPowerwashCount() const override; - bool SchedulePowerwash(bool is_rollback) override; + bool SchedulePowerwash(bool save_rollback_data) override; bool CancelPowerwash() override; bool GetNonVolatileDirectory(base::FilePath* path) const override; bool GetPowerwashSafeDirectory(base::FilePath* path) const override; diff --git a/hardware_chromeos.cc b/hardware_chromeos.cc index 8ef05b2e..60583e1a 100644 --- a/hardware_chromeos.cc +++ b/hardware_chromeos.cc @@ -61,6 +61,11 @@ const char kPowerwashCountMarker[] = "powerwash_count"; const char kPowerwashMarkerFile[] = "/mnt/stateful_partition/factory_install_reset"; +// The name of the marker file used to trigger a save of rollback data +// during the next shutdown. +const char kRollbackSaveMarkerFile[] = + "/mnt/stateful_partition/.save_rollback_data"; + // The contents of the powerwash marker file for the non-rollback case. const char kPowerwashCommand[] = "safe fast keepimg reason=update_engine\n"; @@ -226,15 +231,25 @@ int HardwareChromeOS::GetPowerwashCount() const { return powerwash_count; } -bool HardwareChromeOS::SchedulePowerwash(bool is_rollback) { +bool HardwareChromeOS::SchedulePowerwash(bool save_rollback_data) { + if (save_rollback_data) { + if (!utils::WriteFile(kRollbackSaveMarkerFile, nullptr, 0)) { + PLOG(ERROR) << "Error in creating rollback save marker file: " + << kRollbackSaveMarkerFile << ". Rollback will not" + << " preserve any data."; + } else { + LOG(INFO) << "Rollback data save has been scheduled on next shutdown."; + } + } + const char* powerwash_command = - is_rollback ? kRollbackPowerwashCommand : kPowerwashCommand; + save_rollback_data ? kRollbackPowerwashCommand : kPowerwashCommand; bool result = utils::WriteFile( kPowerwashMarkerFile, powerwash_command, strlen(powerwash_command)); if (result) { LOG(INFO) << "Created " << kPowerwashMarkerFile - << " to powerwash on next reboot (is_rollback=" << is_rollback - << ")"; + << " to powerwash on next reboot (" + << "save_rollback_data=" << save_rollback_data << ")"; } else { PLOG(ERROR) << "Error in creating powerwash marker file: " << kPowerwashMarkerFile; @@ -254,6 +269,11 @@ bool HardwareChromeOS::CancelPowerwash() { << kPowerwashMarkerFile; } + // Delete the rollback save marker file if it existed. + if (!base::DeleteFile(base::FilePath(kRollbackSaveMarkerFile), false)) { + PLOG(ERROR) << "Could not remove rollback save marker"; + } + return result; } diff --git a/hardware_chromeos.h b/hardware_chromeos.h index 8829866a..04bdae3e 100644 --- a/hardware_chromeos.h +++ b/hardware_chromeos.h @@ -53,7 +53,7 @@ class HardwareChromeOS final : public HardwareInterface { bool SetMaxFirmwareKeyRollforward(int firmware_max_rollforward) override; bool SetMaxKernelKeyRollforward(int kernel_max_rollforward) override; int GetPowerwashCount() const override; - bool SchedulePowerwash(bool is_rollback) override; + bool SchedulePowerwash(bool save_rollback_data) override; bool CancelPowerwash() override; bool GetNonVolatileDirectory(base::FilePath* path) const override; bool GetPowerwashSafeDirectory(base::FilePath* path) const override; diff --git a/mock_update_attempter.h b/mock_update_attempter.h index d97163d3..c39fb621 100644 --- a/mock_update_attempter.h +++ b/mock_update_attempter.h @@ -30,12 +30,13 @@ class MockUpdateAttempter : public UpdateAttempter { public: using UpdateAttempter::UpdateAttempter; - MOCK_METHOD8(Update, + MOCK_METHOD9(Update, void(const std::string& app_version, const std::string& omaha_url, const std::string& target_channel, const std::string& target_version_prefix, bool rollback_allowed, + bool rollback_data_save_requested, int rollback_allowed_milestones, bool obey_proxies, bool interactive)); diff --git a/omaha_request_params.h b/omaha_request_params.h index 6691bee4..2d2ab69d 100644 --- a/omaha_request_params.h +++ b/omaha_request_params.h @@ -51,6 +51,7 @@ class OmahaRequestParams { delta_okay_(true), interactive_(false), rollback_allowed_(false), + rollback_data_save_requested_(false), wall_clock_based_wait_enabled_(false), update_check_count_wait_enabled_(false), min_update_checks_needed_(kDefaultMinUpdateChecks), @@ -132,6 +133,15 @@ class OmahaRequestParams { inline bool rollback_allowed() const { return rollback_allowed_; } + inline void set_rollback_data_save_requested( + bool rollback_data_save_requested) { + rollback_data_save_requested_ = rollback_data_save_requested; + } + + inline bool rollback_data_save_requested() const { + return rollback_data_save_requested_; + } + inline void set_rollback_allowed_milestones(int rollback_allowed_milestones) { rollback_allowed_milestones_ = rollback_allowed_milestones; } @@ -330,6 +340,9 @@ class OmahaRequestParams { // Whether the client is accepting rollback images too. bool rollback_allowed_; + // Whether rollbacks should preserve some system state during powerwash. + bool rollback_data_save_requested_; + // How many milestones the client can rollback to. int rollback_allowed_milestones_; diff --git a/omaha_response_handler_action.cc b/omaha_response_handler_action.cc index d05bc467..5741a2bd 100644 --- a/omaha_response_handler_action.cc +++ b/omaha_response_handler_action.cc @@ -178,6 +178,8 @@ void OmahaResponseHandlerAction::PerformAction() { return; } install_plan_.is_rollback = true; + install_plan_.rollback_data_save_requested = + params->rollback_data_save_requested(); } if (response.powerwash_required || params->ShouldPowerwash()) diff --git a/payload_consumer/install_plan.h b/payload_consumer/install_plan.h index ede36b31..17cefd84 100644 --- a/payload_consumer/install_plan.h +++ b/payload_consumer/install_plan.h @@ -146,6 +146,9 @@ struct InstallPlan { // True if this update is a rollback. bool is_rollback{false}; + // True if this rollback should preserve some system data. + bool rollback_data_save_requested{false}; + // True if the update should write verity. // False otherwise. bool write_verity{true}; diff --git a/payload_consumer/postinstall_runner_action.cc b/payload_consumer/postinstall_runner_action.cc index a782b8f2..cc3843d6 100644 --- a/payload_consumer/postinstall_runner_action.cc +++ b/payload_consumer/postinstall_runner_action.cc @@ -57,9 +57,14 @@ void PostinstallRunnerAction::PerformAction() { CHECK(HasInputObject()); install_plan_ = GetInputObject(); - // Currently we're always powerwashing when rolling back. + // We always powerwash when rolling back, however policy can determine + // if this is a full/normal powerwash, or a special rollback powerwash + // that retains a small amount of system state such as enrollment and + // network configuration. In both cases all user accounts are deleted. if (install_plan_.powerwash_required || install_plan_.is_rollback) { - if (hardware_->SchedulePowerwash(install_plan_.is_rollback)) { + bool save_rollback_data = + install_plan_.is_rollback && install_plan_.rollback_data_save_requested; + if (hardware_->SchedulePowerwash(save_rollback_data)) { powerwash_scheduled_ = true; } else { return CompletePostinstall(ErrorCode::kPostinstallPowerwashError); diff --git a/payload_consumer/postinstall_runner_action_unittest.cc b/payload_consumer/postinstall_runner_action_unittest.cc index caee5e27..04c81fac 100644 --- a/payload_consumer/postinstall_runner_action_unittest.cc +++ b/payload_consumer/postinstall_runner_action_unittest.cc @@ -100,7 +100,8 @@ class PostinstallRunnerActionTest : public ::testing::Test { void RunPostinstallAction(const string& device_path, const string& postinstall_program, bool powerwash_required, - bool is_rollback); + bool is_rollback, + bool save_rollback_data); public: void ResumeRunningAction() { @@ -170,7 +171,8 @@ void PostinstallRunnerActionTest::RunPostinstallAction( const string& device_path, const string& postinstall_program, bool powerwash_required, - bool is_rollback) { + bool is_rollback, + bool save_rollback_data) { ActionProcessor processor; processor_ = &processor; auto feeder_action = std::make_unique>(); @@ -184,6 +186,7 @@ void PostinstallRunnerActionTest::RunPostinstallAction( install_plan.download_url = "http://127.0.0.1:8080/update"; install_plan.powerwash_required = powerwash_required; install_plan.is_rollback = is_rollback; + install_plan.rollback_data_save_requested = save_rollback_data; feeder_action->set_obj(install_plan); auto runner_action = std::make_unique( &fake_boot_control_, &fake_hardware_); @@ -249,7 +252,8 @@ TEST_F(PostinstallRunnerActionTest, ProcessProgressLineTest) { TEST_F(PostinstallRunnerActionTest, RunAsRootSimpleTest) { ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr); - RunPostinstallAction(loop.dev(), kPostinstallDefaultScript, false, false); + RunPostinstallAction( + loop.dev(), kPostinstallDefaultScript, false, false, false); EXPECT_EQ(ErrorCode::kSuccess, processor_delegate_.code_); EXPECT_TRUE(processor_delegate_.processing_done_called_); @@ -260,7 +264,7 @@ TEST_F(PostinstallRunnerActionTest, RunAsRootSimpleTest) { TEST_F(PostinstallRunnerActionTest, RunAsRootRunSymlinkFileTest) { ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr); - RunPostinstallAction(loop.dev(), "bin/postinst_link", false, false); + RunPostinstallAction(loop.dev(), "bin/postinst_link", false, false, false); EXPECT_EQ(ErrorCode::kSuccess, processor_delegate_.code_); } @@ -270,6 +274,7 @@ TEST_F(PostinstallRunnerActionTest, RunAsRootPowerwashRequiredTest) { RunPostinstallAction(loop.dev(), "bin/postinst_example", /*powerwash_required=*/true, + false, false); EXPECT_EQ(ErrorCode::kSuccess, processor_delegate_.code_); @@ -278,14 +283,31 @@ TEST_F(PostinstallRunnerActionTest, RunAsRootPowerwashRequiredTest) { EXPECT_FALSE(fake_hardware_.GetIsRollbackPowerwashScheduled()); } -TEST_F(PostinstallRunnerActionTest, RunAsRootRollbackTest) { +TEST_F(PostinstallRunnerActionTest, RunAsRootRollbackTestNoDataSave) { + ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr); + + // Run a simple postinstall program, rollback happened. + RunPostinstallAction(loop.dev(), + "bin/postinst_example", + false, + /*is_rollback=*/true, + /*save_rollback_data=*/false); + EXPECT_EQ(ErrorCode::kSuccess, processor_delegate_.code_); + + // Check that powerwash was scheduled and that it's NOT a rollback powerwash. + EXPECT_TRUE(fake_hardware_.IsPowerwashScheduled()); + EXPECT_FALSE(fake_hardware_.GetIsRollbackPowerwashScheduled()); +} + +TEST_F(PostinstallRunnerActionTest, RunAsRootRollbackTestWithDataSave) { ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr); // Run a simple postinstall program, rollback happened. RunPostinstallAction(loop.dev(), "bin/postinst_example", false, - /*is_rollback=*/true); + /*is_rollback=*/true, + /*save_rollback_data=*/true); EXPECT_EQ(ErrorCode::kSuccess, processor_delegate_.code_); // Check that powerwash was scheduled and that it's a rollback powerwash. @@ -296,7 +318,8 @@ TEST_F(PostinstallRunnerActionTest, RunAsRootRollbackTest) { // Runs postinstall from a partition file that doesn't mount, so it should // fail. TEST_F(PostinstallRunnerActionTest, RunAsRootCantMountTest) { - RunPostinstallAction("/dev/null", kPostinstallDefaultScript, false, false); + RunPostinstallAction( + "/dev/null", kPostinstallDefaultScript, false, false, false); EXPECT_EQ(ErrorCode::kPostinstallRunnerError, processor_delegate_.code_); // In case of failure, Postinstall should not signal a powerwash even if it @@ -309,7 +332,7 @@ TEST_F(PostinstallRunnerActionTest, RunAsRootCantMountTest) { // fail. TEST_F(PostinstallRunnerActionTest, RunAsRootErrScriptTest) { ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr); - RunPostinstallAction(loop.dev(), "bin/postinst_fail1", false, false); + RunPostinstallAction(loop.dev(), "bin/postinst_fail1", false, false, false); EXPECT_EQ(ErrorCode::kPostinstallRunnerError, processor_delegate_.code_); } @@ -317,7 +340,7 @@ TEST_F(PostinstallRunnerActionTest, RunAsRootErrScriptTest) { // UMA with a different error code. Test those cases are properly detected. TEST_F(PostinstallRunnerActionTest, RunAsRootFirmwareBErrScriptTest) { ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr); - RunPostinstallAction(loop.dev(), "bin/postinst_fail3", false, false); + RunPostinstallAction(loop.dev(), "bin/postinst_fail3", false, false, false); EXPECT_EQ(ErrorCode::kPostinstallBootedFromFirmwareB, processor_delegate_.code_); } @@ -325,7 +348,7 @@ TEST_F(PostinstallRunnerActionTest, RunAsRootFirmwareBErrScriptTest) { // Check that you can't specify an absolute path. TEST_F(PostinstallRunnerActionTest, RunAsRootAbsolutePathNotAllowedTest) { ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr); - RunPostinstallAction(loop.dev(), "/etc/../bin/sh", false, false); + RunPostinstallAction(loop.dev(), "/etc/../bin/sh", false, false, false); EXPECT_EQ(ErrorCode::kPostinstallRunnerError, processor_delegate_.code_); } @@ -334,7 +357,8 @@ TEST_F(PostinstallRunnerActionTest, RunAsRootAbsolutePathNotAllowedTest) { // SElinux labels are only set on Android. TEST_F(PostinstallRunnerActionTest, RunAsRootCheckFileContextsTest) { ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr); - RunPostinstallAction(loop.dev(), "bin/self_check_context", false, false); + RunPostinstallAction( + loop.dev(), "bin/self_check_context", false, false, false); EXPECT_EQ(ErrorCode::kSuccess, processor_delegate_.code_); } #endif // __ANDROID__ @@ -347,7 +371,7 @@ TEST_F(PostinstallRunnerActionTest, RunAsRootSuspendResumeActionTest) { loop_.PostTask(FROM_HERE, base::Bind(&PostinstallRunnerActionTest::SuspendRunningAction, base::Unretained(this))); - RunPostinstallAction(loop.dev(), "bin/postinst_suspend", false, false); + RunPostinstallAction(loop.dev(), "bin/postinst_suspend", false, false, false); // postinst_suspend returns 0 only if it was suspended at some point. EXPECT_EQ(ErrorCode::kSuccess, processor_delegate_.code_); EXPECT_TRUE(processor_delegate_.processing_done_called_); @@ -359,7 +383,7 @@ TEST_F(PostinstallRunnerActionTest, RunAsRootCancelPostinstallActionTest) { // Wait for the action to start and then cancel it. CancelWhenStarted(); - RunPostinstallAction(loop.dev(), "bin/postinst_suspend", false, false); + RunPostinstallAction(loop.dev(), "bin/postinst_suspend", false, false, false); // When canceling the action, the action never finished and therefore we had // a ProcessingStopped call instead. EXPECT_FALSE(processor_delegate_.code_set_); @@ -382,7 +406,8 @@ TEST_F(PostinstallRunnerActionTest, RunAsRootProgressUpdatesTest) { ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr); setup_action_delegate_ = &mock_delegate_; - RunPostinstallAction(loop.dev(), "bin/postinst_progress", false, false); + RunPostinstallAction( + loop.dev(), "bin/postinst_progress", false, false, false); EXPECT_EQ(ErrorCode::kSuccess, processor_delegate_.code_); } diff --git a/update_attempter.cc b/update_attempter.cc index 31a6ce47..44eea770 100644 --- a/update_attempter.cc +++ b/update_attempter.cc @@ -239,6 +239,7 @@ void UpdateAttempter::Update(const string& app_version, const string& target_channel, const string& target_version_prefix, bool rollback_allowed, + bool rollback_data_save_requested, int rollback_allowed_milestones, bool obey_proxies, bool interactive) { @@ -275,6 +276,7 @@ void UpdateAttempter::Update(const string& app_version, target_channel, target_version_prefix, rollback_allowed, + rollback_data_save_requested, rollback_allowed_milestones, obey_proxies, interactive)) { @@ -349,6 +351,7 @@ bool UpdateAttempter::CalculateUpdateParams(const string& app_version, const string& target_channel, const string& target_version_prefix, bool rollback_allowed, + bool rollback_data_save_requested, int rollback_allowed_milestones, bool obey_proxies, bool interactive) { @@ -368,6 +371,10 @@ bool UpdateAttempter::CalculateUpdateParams(const string& app_version, // Set whether rollback is allowed. omaha_request_params_->set_rollback_allowed(rollback_allowed); + // Set whether saving data over rollback is requested. + omaha_request_params_->set_rollback_data_save_requested( + rollback_data_save_requested); + CalculateStagingParams(interactive); // If staging_wait_time_ wasn't set, staging is off, use scattering instead. if (staging_wait_time_.InSeconds() == 0) { @@ -953,6 +960,7 @@ void UpdateAttempter::OnUpdateScheduled(EvalStatus status, params.target_channel, params.target_version_prefix, params.rollback_allowed, + params.rollback_data_save_requested, params.rollback_allowed_milestones, /*obey_proxies=*/false, params.interactive); diff --git a/update_attempter.h b/update_attempter.h index 6c25eb2e..82b81cee 100644 --- a/update_attempter.h +++ b/update_attempter.h @@ -85,6 +85,7 @@ class UpdateAttempter : public ActionProcessorDelegate, const std::string& target_channel, const std::string& target_version_prefix, bool rollback_allowed, + bool rollback_data_save_requested, int rollback_allowed_milestones, bool obey_proxies, bool interactive); @@ -341,6 +342,7 @@ class UpdateAttempter : public ActionProcessorDelegate, const std::string& target_channel, const std::string& target_version_prefix, bool rollback_allowed, + bool rollback_data_save_requested, int rollback_allowed_milestones, bool obey_proxies, bool interactive); diff --git a/update_attempter_unittest.cc b/update_attempter_unittest.cc index ec6066ba..14fad389 100644 --- a/update_attempter_unittest.cc +++ b/update_attempter_unittest.cc @@ -506,7 +506,7 @@ void UpdateAttempterTest::UpdateTestStart() { EXPECT_CALL(*processor_, StartProcessing()); } - attempter_.Update("", "", "", "", false, 0, false, false); + attempter_.Update("", "", "", "", false, false, 0, false, false); loop_.PostTask(FROM_HERE, base::Bind(&UpdateAttempterTest::UpdateTestVerify, base::Unretained(this))); @@ -706,7 +706,7 @@ void UpdateAttempterTest::P2PNotEnabledStart() { fake_system_state_.set_p2p_manager(&mock_p2p_manager); mock_p2p_manager.fake().SetP2PEnabled(false); EXPECT_CALL(mock_p2p_manager, PerformHousekeeping()).Times(0); - attempter_.Update("", "", "", "", false, 0, false, false); + attempter_.Update("", "", "", "", false, false, 0, false, false); EXPECT_FALSE(actual_using_p2p_for_downloading_); EXPECT_FALSE(actual_using_p2p_for_sharing()); ScheduleQuitMainLoop(); @@ -728,7 +728,7 @@ void UpdateAttempterTest::P2PEnabledStartingFailsStart() { mock_p2p_manager.fake().SetEnsureP2PRunningResult(false); mock_p2p_manager.fake().SetPerformHousekeepingResult(false); EXPECT_CALL(mock_p2p_manager, PerformHousekeeping()).Times(0); - attempter_.Update("", "", "", "", false, 0, false, false); + attempter_.Update("", "", "", "", false, false, 0, false, false); EXPECT_FALSE(actual_using_p2p_for_downloading()); EXPECT_FALSE(actual_using_p2p_for_sharing()); ScheduleQuitMainLoop(); @@ -751,7 +751,7 @@ void UpdateAttempterTest::P2PEnabledHousekeepingFailsStart() { mock_p2p_manager.fake().SetEnsureP2PRunningResult(true); mock_p2p_manager.fake().SetPerformHousekeepingResult(false); EXPECT_CALL(mock_p2p_manager, PerformHousekeeping()); - attempter_.Update("", "", "", "", false, 0, false, false); + attempter_.Update("", "", "", "", false, false, 0, false, false); EXPECT_FALSE(actual_using_p2p_for_downloading()); EXPECT_FALSE(actual_using_p2p_for_sharing()); ScheduleQuitMainLoop(); @@ -773,7 +773,7 @@ void UpdateAttempterTest::P2PEnabledStart() { mock_p2p_manager.fake().SetEnsureP2PRunningResult(true); mock_p2p_manager.fake().SetPerformHousekeepingResult(true); EXPECT_CALL(mock_p2p_manager, PerformHousekeeping()); - attempter_.Update("", "", "", "", false, 0, false, false); + attempter_.Update("", "", "", "", false, false, 0, false, false); EXPECT_TRUE(actual_using_p2p_for_downloading()); EXPECT_TRUE(actual_using_p2p_for_sharing()); ScheduleQuitMainLoop(); @@ -801,6 +801,7 @@ void UpdateAttempterTest::P2PEnabledInteractiveStart() { "", "", false, + false, /*rollback_allowed_milestones=*/0, false, /*interactive=*/true); @@ -833,7 +834,7 @@ void UpdateAttempterTest::ReadScatterFactorFromPolicyTestStart() { attempter_.policy_provider_.reset( new policy::PolicyProvider(std::move(device_policy))); - attempter_.Update("", "", "", "", false, 0, false, false); + attempter_.Update("", "", "", "", false, false, 0, false, false); EXPECT_EQ(scatter_factor_in_seconds, attempter_.scatter_factor_.InSeconds()); ScheduleQuitMainLoop(); @@ -871,7 +872,7 @@ void UpdateAttempterTest::DecrementUpdateCheckCountTestStart() { attempter_.policy_provider_.reset( new policy::PolicyProvider(std::move(device_policy))); - attempter_.Update("", "", "", "", false, 0, false, false); + attempter_.Update("", "", "", "", false, false, 0, false, false); EXPECT_EQ(scatter_factor_in_seconds, attempter_.scatter_factor_.InSeconds()); // Make sure the file still exists. @@ -887,7 +888,7 @@ void UpdateAttempterTest::DecrementUpdateCheckCountTestStart() { // However, if the count is already 0, it's not decremented. Test that. initial_value = 0; EXPECT_TRUE(fake_prefs.SetInt64(kPrefsUpdateCheckCount, initial_value)); - attempter_.Update("", "", "", "", false, 0, false, false); + attempter_.Update("", "", "", "", false, false, 0, false, false); EXPECT_TRUE(fake_prefs.Exists(kPrefsUpdateCheckCount)); EXPECT_TRUE(fake_prefs.GetInt64(kPrefsUpdateCheckCount, &new_value)); EXPECT_EQ(initial_value, new_value); @@ -939,6 +940,7 @@ void UpdateAttempterTest::NoScatteringDoneDuringManualUpdateTestStart() { "", "", false, + false, /*rollback_allowed_milestones=*/0, false, /*interactive=*/true); @@ -993,7 +995,7 @@ void UpdateAttempterTest::StagingSetsPrefsAndTurnsOffScatteringStart() { FakePrefs fake_prefs; SetUpStagingTest(kValidStagingSchedule, &fake_prefs); - attempter_.Update("", "", "", "", false, 0, false, false); + attempter_.Update("", "", "", "", false, false, 0, false, false); // Check that prefs have the correct values. int64_t update_count; EXPECT_TRUE(fake_prefs.GetInt64(kPrefsUpdateCheckCount, &update_count)); @@ -1050,7 +1052,8 @@ void UpdateAttempterTest::StagingOffIfInteractiveStart() { FakePrefs fake_prefs; SetUpStagingTest(kValidStagingSchedule, &fake_prefs); - attempter_.Update("", "", "", "", false, 0, false, /* interactive = */ true); + attempter_.Update( + "", "", "", "", false, false, 0, false, /* interactive = */ true); CheckStagingOff(); ScheduleQuitMainLoop(); @@ -1070,7 +1073,8 @@ void UpdateAttempterTest::StagingOffIfOobeStart() { FakePrefs fake_prefs; SetUpStagingTest(kValidStagingSchedule, &fake_prefs); - attempter_.Update("", "", "", "", false, 0, false, /* interactive = */ true); + attempter_.Update( + "", "", "", "", false, false, 0, false, /* interactive = */ true); CheckStagingOff(); ScheduleQuitMainLoop(); @@ -1240,11 +1244,13 @@ TEST_F(UpdateAttempterTest, UpdateAfterInstall) { } TEST_F(UpdateAttempterTest, TargetVersionPrefixSetAndReset) { - attempter_.CalculateUpdateParams("", "", "", "1234", false, 4, false, false); + attempter_.CalculateUpdateParams( + "", "", "", "1234", false, false, 4, false, false); EXPECT_EQ("1234", fake_system_state_.request_params()->target_version_prefix()); - attempter_.CalculateUpdateParams("", "", "", "", false, 4, false, false); + attempter_.CalculateUpdateParams( + "", "", "", "", false, 4, false, false, false); EXPECT_TRUE( fake_system_state_.request_params()->target_version_prefix().empty()); } @@ -1255,6 +1261,7 @@ TEST_F(UpdateAttempterTest, RollbackAllowedSetAndReset) { "", "1234", /*rollback_allowed=*/true, + /*rollback_data_save_requested=*/false, /*rollback_allowed_milestones=*/4, false, false); @@ -1267,6 +1274,7 @@ TEST_F(UpdateAttempterTest, RollbackAllowedSetAndReset) { "", "1234", /*rollback_allowed=*/false, + /*rollback_data_save_requested=*/false, /*rollback_allowed_milestones=*/4, false, false); @@ -1392,7 +1400,7 @@ void UpdateAttempterTest::ResetRollbackHappenedStart(bool is_consumer, SetRollbackHappened(false)) .Times(expected_reset ? 1 : 0); attempter_.policy_provider_ = std::move(mock_policy_provider); - attempter_.Update("", "", "", "", false, 0, false, false); + attempter_.Update("", "", "", "", false, false, 0, false, false); ScheduleQuitMainLoop(); } diff --git a/update_manager/android_things_policy.cc b/update_manager/android_things_policy.cc index 4afcf12e..26bd0ba9 100644 --- a/update_manager/android_things_policy.cc +++ b/update_manager/android_things_policy.cc @@ -54,6 +54,7 @@ EvalStatus AndroidThingsPolicy::UpdateCheckAllowed( result->target_channel.clear(); result->target_version_prefix.clear(); result->rollback_allowed = false; + result->rollback_data_save_requested = false; result->rollback_allowed_milestones = -1; result->interactive = false; diff --git a/update_manager/enterprise_device_policy_impl.cc b/update_manager/enterprise_device_policy_impl.cc index a3430ef2..3d77d592 100644 --- a/update_manager/enterprise_device_policy_impl.cc +++ b/update_manager/enterprise_device_policy_impl.cc @@ -91,22 +91,27 @@ EvalStatus EnterpriseDevicePolicyImpl::UpdateCheckAllowed( case RollbackToTargetVersion::kDisabled: LOG(INFO) << "Policy disables rollbacks."; result->rollback_allowed = false; + result->rollback_data_save_requested = false; break; case RollbackToTargetVersion::kRollbackAndPowerwash: LOG(INFO) << "Policy allows rollbacks with powerwash."; result->rollback_allowed = true; + result->rollback_data_save_requested = false; break; case RollbackToTargetVersion::kRollbackAndRestoreIfPossible: LOG(INFO) << "Policy allows rollbacks, also tries to restore if possible."; - // We don't support restore yet, but policy still allows rollback. result->rollback_allowed = true; + result->rollback_data_save_requested = true; break; case RollbackToTargetVersion::kRollbackOnlyIfRestorePossible: + // TODO(crbug.com/947621): Remove this policy option until we know + // how it could be supported correctly. LOG(INFO) << "Policy only allows rollbacks if restore is possible."; // We don't support restore yet, policy doesn't allow rollback in this // case. result->rollback_allowed = false; + result->rollback_data_save_requested = false; break; case RollbackToTargetVersion::kMaxValue: NOTREACHED(); diff --git a/update_manager/policy.h b/update_manager/policy.h index 5d65d9ae..9e7df10d 100644 --- a/update_manager/policy.h +++ b/update_manager/policy.h @@ -50,6 +50,8 @@ struct UpdateCheckParams { std::string target_version_prefix; // Specifies whether rollback images are allowed by device policy. bool rollback_allowed; + // Specifies if rollbacks should attempt to preserve some system state. + bool rollback_data_save_requested; // Specifies the number of Chrome milestones rollback should be allowed, // starting from the stable version at any time. Value is -1 if unspecified // (e.g. no device policy is available yet), in this case no version From 5d185052bd7dea7730777987c2cade748ed4cc46 Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Tue, 23 Apr 2019 07:28:30 -0700 Subject: [PATCH 016/624] update_engine: De-duplicate zlib blocks of squashfs images Having duplicate zlib blocks in the squashfs image (e.g. from two hard linked files) will cause problems for processing puffin. Recently ARC++ image introduced two files that where hard linked of each other and this caused the squashfs parsing algorithm fail and the ARC++ image does not get splitted into smaller files. Hence we got massive OOM and Timeout problems in the builders. This CL, simpliy removes duplicate zlib blocks when constructing the list of such items. BUG=chromium:955145 TEST=cros_generate_update_payload --image gs://chromeos-releases/stable-channel/nautilus/11647.154.0/chromeos_11647.154.0_nautilus_recovery_stable-channel_mp-v2.bin --src_image gs://chromeos-releases/stable-channel/nautilus/10575.55.0/chromeos_10575.55.0_nautilus_recovery_stable-channel_mp.bin --output payload.bin Change-Id: If4eb4731355e1180953a5b327be3a16ff13e6d96 Reviewed-on: https://chromium-review.googlesource.com/1578219 Commit-Ready: ChromeOS CL Exonerator Bot Tested-by: Amin Hassani Reviewed-by: Sen Jiang --- payload_generator/squashfs_filesystem.cc | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/payload_generator/squashfs_filesystem.cc b/payload_generator/squashfs_filesystem.cc index c423b69c..e43bc0a9 100644 --- a/payload_generator/squashfs_filesystem.cc +++ b/payload_generator/squashfs_filesystem.cc @@ -261,6 +261,14 @@ bool SquashfsFilesystem::Init(const string& map, return a.offset < b.offset; }); + // Sometimes a squashfs can have a two files that are hard linked. In this + // case both files will have the same starting offset in the image and hence + // the same zlib blocks. So we need to remove these duplicates to eliminate + // further potential probems. As a matter of fact the next statement will + // fail if there are duplicates (there will be overlap between two blocks). + auto last = std::unique(zlib_blks.begin(), zlib_blks.end()); + zlib_blks.erase(last, zlib_blks.end()); + // Sanity check. Make sure zlib blocks are not overlapping. auto result = std::adjacent_find( zlib_blks.begin(), From 32e82044ba82d950248584dda5138c37a85cac3e Mon Sep 17 00:00:00 2001 From: xunchang Date: Thu, 25 Apr 2019 10:37:36 -0700 Subject: [PATCH 017/624] Set the block device to be writable before writing verity data The block device is set to be writable when the delta performer opens the partition to perfrom writes. But if the update interrupts and the device reboots during the verity data calculation, update engine will consider the write actions done on resumes and skip setting the writable flag. As a result, the resumed update will fail to write the hashtree with EPERM. A possible fix is to explicltly set the flag when the FilesystemVerifierAction wants to write verity data. Bug: 131232680 Test: Interrupt the update during verity writing, reboot and check the update resumes correctly Change-Id: I1007414139e4ae5be3fc60ec73b18264d4baa60e --- payload_consumer/verity_writer_android.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/payload_consumer/verity_writer_android.cc b/payload_consumer/verity_writer_android.cc index 06d1489b..d5437b64 100644 --- a/payload_consumer/verity_writer_android.cc +++ b/payload_consumer/verity_writer_android.cc @@ -41,6 +41,9 @@ std::unique_ptr CreateVerityWriter() { bool VerityWriterAndroid::Init(const InstallPlan::Partition& partition) { partition_ = &partition; + if (partition_->hash_tree_size != 0 || partition_->fec_size != 0) { + utils::SetBlockDeviceReadOnly(partition_->target_path, false); + } if (partition_->hash_tree_size != 0) { auto hash_function = HashTreeBuilder::HashFunction(partition_->hash_tree_algorithm); From 570ca87d31e11643f6e43d386edfb39f349f8425 Mon Sep 17 00:00:00 2001 From: Askar Aitzhan Date: Wed, 24 Apr 2019 11:16:12 +0200 Subject: [PATCH 018/624] update_engine: Include quick_fix_build_token in updatecheck request body Get the value of the new device policy which is included in AutoUpdateSettings as quick_fix_build_token, and include it in the update request body if value is set. BUG=chromium:932465 TEST=./build_packages --board=amd64-generic && \ cros_run_unit_tests --board=amd64-generic --packages update_engine Cq-Depend: chromium:1571634 Change-Id: Iaca35a08d973616a058864a11896ef9305d00174 Reviewed-on: https://chromium-review.googlesource.com/1581519 Commit-Ready: ChromeOS CL Exonerator Bot Tested-by: Askar Aitzhan Reviewed-by: Nicolas Norvez Reviewed-by: Amin Hassani --- omaha_request_action_unittest.cc | 18 +++++++++ omaha_request_builder_xml.cc | 7 ++++ omaha_request_params.h | 12 ++++++ update_attempter.cc | 8 ++++ update_attempter_unittest.cc | 66 ++++++++++++++++++++++---------- 5 files changed, 91 insertions(+), 20 deletions(-) diff --git a/omaha_request_action_unittest.cc b/omaha_request_action_unittest.cc index ec083335..bfbf6a4c 100644 --- a/omaha_request_action_unittest.cc +++ b/omaha_request_action_unittest.cc @@ -1463,6 +1463,7 @@ TEST_F(OmahaRequestActionTest, XmlEncodeIsUsedForParams) { request_params_.set_current_channel("unittest_track<"); request_params_.set_target_channel("unittest_track<"); request_params_.set_hwid(""); + request_params_.set_autoupdate_token("autoupdate_token>"); fake_prefs_.SetString(kPrefsOmahaCohort, "evil\nstring"); fake_prefs_.SetString(kPrefsOmahaCohortHint, "evil&string\\"); fake_prefs_.SetString( @@ -1489,6 +1490,8 @@ TEST_F(OmahaRequestActionTest, XmlEncodeIsUsedForParams) { // Values from Prefs that are too big are removed from the XML instead of // encoded. EXPECT_EQ(string::npos, post_str.find("cohortname=")); + EXPECT_NE(string::npos, post_str.find("autoupdate_token>")); + EXPECT_EQ(string::npos, post_str.find("autoupdate_token>")); } TEST_F(OmahaRequestActionTest, XmlDecodeTest) { @@ -1692,6 +1695,21 @@ TEST_F(OmahaRequestActionTest, OmahaEventTest) { EXPECT_EQ(ErrorCode::kError, error_event.error_code); } +TEST_F(OmahaRequestActionTest, DeviceQuickFixBuildTokenIsSetTest) { + constexpr char autoupdate_token[] = "autoupdate_token"; + + tuc_params_.http_response = fake_update_response_.GetNoUpdateResponse(); + tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; + request_params_.set_autoupdate_token(autoupdate_token); + + ASSERT_TRUE(TestUpdateCheck()); + + EXPECT_NE(post_str.find(" \n"), + string::npos); +} + void OmahaRequestActionTest::PingTest(bool ping_only) { NiceMock prefs; fake_system_state_.set_prefs(&prefs); diff --git a/omaha_request_builder_xml.cc b/omaha_request_builder_xml.cc index 899f17ff..aac01366 100644 --- a/omaha_request_builder_xml.cc +++ b/omaha_request_builder_xml.cc @@ -122,6 +122,13 @@ string GetAppBody(const OmahaEvent* event, app_body += " rollback_allowed=\"true\""; } } + string autoupdate_token = params->autoupdate_token(); + if (!autoupdate_token.empty()) { + app_body += base::StringPrintf( + " token=\"%s\"", + XmlEncodeWithDefault(autoupdate_token, "").c_str()); + } + app_body += ">\n"; } diff --git a/omaha_request_params.h b/omaha_request_params.h index 2d2ab69d..f3f68f42 100644 --- a/omaha_request_params.h +++ b/omaha_request_params.h @@ -193,6 +193,13 @@ class OmahaRequestParams { inline void set_is_install(bool is_install) { is_install_ = is_install; } inline bool is_install() const { return is_install_; } + inline void set_autoupdate_token(const std::string& token) { + autoupdate_token_ = token; + } + inline const std::string& autoupdate_token() const { + return autoupdate_token_; + } + // Returns the app id corresponding to the current value of the // download channel. virtual std::string GetAppId() const; @@ -371,6 +378,11 @@ class OmahaRequestParams { // current active partition instead of the inactive partition. bool is_install_; + // Token used when making an update request for a specific build. + // For example: Token for a Quick Fix Build: + // https://www.chromium.org/administrators/policy-list-3#DeviceQuickFixBuildToken. + std::string autoupdate_token_; + DISALLOW_COPY_AND_ASSIGN(OmahaRequestParams); }; diff --git a/update_attempter.cc b/update_attempter.cc index 44eea770..4bbf5a14 100644 --- a/update_attempter.cc +++ b/update_attempter.cc @@ -427,6 +427,14 @@ bool UpdateAttempter::CalculateUpdateParams(const string& app_version, omaha_request_params_->set_dlc_module_ids(dlc_module_ids_); omaha_request_params_->set_is_install(is_install_); + // Set Quick Fix Build token if policy is set. + string token; + if (system_state_ && system_state_->device_policy()) { + if (!system_state_->device_policy()->GetDeviceQuickFixBuildToken(&token)) + token.clear(); + } + omaha_request_params_->set_autoupdate_token(token); + LOG(INFO) << "target_version_prefix = " << omaha_request_params_->target_version_prefix() << ", rollback_allowed = " diff --git a/update_attempter_unittest.cc b/update_attempter_unittest.cc index 14fad389..16819f83 100644 --- a/update_attempter_unittest.cc +++ b/update_attempter_unittest.cc @@ -188,6 +188,7 @@ class UpdateAttempterTest : public ::testing::Test { void P2PEnabledInteractiveStart(); void P2PEnabledStartingFailsStart(); void P2PEnabledHousekeepingFailsStart(); + void UpdateToQuickFixBuildStart(); void ResetRollbackHappenedStart(bool is_consumer, bool is_policy_available, bool expected_reset); @@ -461,23 +462,23 @@ TEST_F(UpdateAttempterTest, ScheduleErrorEventActionTest) { namespace { // Actions that will be built as part of an update check. -const string kUpdateActionTypes[] = { // NOLINT(runtime/string) - OmahaRequestAction::StaticType(), - OmahaResponseHandlerAction::StaticType(), - UpdateBootFlagsAction::StaticType(), - OmahaRequestAction::StaticType(), - DownloadAction::StaticType(), - OmahaRequestAction::StaticType(), - FilesystemVerifierAction::StaticType(), - PostinstallRunnerAction::StaticType(), - OmahaRequestAction::StaticType()}; +vector GetUpdateActionTypes() { + return {OmahaRequestAction::StaticType(), + OmahaResponseHandlerAction::StaticType(), + UpdateBootFlagsAction::StaticType(), + OmahaRequestAction::StaticType(), + DownloadAction::StaticType(), + OmahaRequestAction::StaticType(), + FilesystemVerifierAction::StaticType(), + PostinstallRunnerAction::StaticType(), + OmahaRequestAction::StaticType()}; +} // Actions that will be built as part of a user-initiated rollback. -const string kRollbackActionTypes[] = { - // NOLINT(runtime/string) - InstallPlanAction::StaticType(), - PostinstallRunnerAction::StaticType(), -}; +vector GetRollbackActionTypes() { + return {InstallPlanAction::StaticType(), + PostinstallRunnerAction::StaticType()}; +} const StagingSchedule kValidStagingSchedule = { {4, 10}, {10, 40}, {19, 70}, {26, 100}}; @@ -498,10 +499,10 @@ void UpdateAttempterTest::UpdateTestStart() { { InSequence s; - for (size_t i = 0; i < arraysize(kUpdateActionTypes); ++i) { + for (const auto& update_action_type : GetUpdateActionTypes()) { EXPECT_CALL(*processor_, EnqueueAction(Pointee( - Property(&AbstractAction::Type, kUpdateActionTypes[i])))); + Property(&AbstractAction::Type, update_action_type)))); } EXPECT_CALL(*processor_, StartProcessing()); } @@ -557,10 +558,10 @@ void UpdateAttempterTest::RollbackTestStart(bool enterprise_rollback, if (is_rollback_allowed) { InSequence s; - for (size_t i = 0; i < arraysize(kRollbackActionTypes); ++i) { + for (const auto& rollback_action_type : GetRollbackActionTypes()) { EXPECT_CALL(*processor_, - EnqueueAction(Pointee(Property(&AbstractAction::Type, - kRollbackActionTypes[i])))); + EnqueueAction(Pointee( + Property(&AbstractAction::Type, rollback_action_type)))); } EXPECT_CALL(*processor_, StartProcessing()); @@ -1582,4 +1583,29 @@ TEST_F(UpdateAttempterTest, attempter_.ProcessingDone(nullptr, ErrorCode::kSuccess); } +void UpdateAttempterTest::UpdateToQuickFixBuildStart() { + // Tests that checks if device_quick_fix_build_token arrives when + // policy is set. + const char kToken[] = "some_token"; + + auto device_policy = std::make_unique(); + fake_system_state_.set_device_policy(device_policy.get()); + + EXPECT_CALL(*device_policy, LoadPolicy()).WillRepeatedly(Return(true)); + EXPECT_CALL(*device_policy, GetDeviceQuickFixBuildToken(_)) + .WillOnce(DoAll(SetArgPointee<0>(string(kToken)), Return(true))); + attempter_.policy_provider_.reset( + new policy::PolicyProvider(std::move(device_policy))); + attempter_.Update("", "", "", "", false, false, 0, false, false); + + ScheduleQuitMainLoop(); +} + +TEST_F(UpdateAttempterTest, UpdateToQuickFixBuildStart) { + loop_.PostTask(FROM_HERE, + base::Bind(&UpdateAttempterTest::UpdateToQuickFixBuildStart, + base::Unretained(this))); + loop_.Run(); +} + } // namespace chromeos_update_engine From 519b4557af407c9d07ea0c035df45af03a8071b0 Mon Sep 17 00:00:00 2001 From: Joel Kitching Date: Mon, 15 Apr 2019 14:18:08 +0800 Subject: [PATCH 019/624] update_engine: remove MetricsLibrary::Init call This function is being removed and deprecated in favour of using MetricsLibrary's constructor. Also remove Initialize from MetricsReporterInferface entirely, since it is no longer needed. BUG=chromium:940343 TEST=None Change-Id: I5ee49f7a7274f8fba10a6feffa9818017da12239 Signed-off-by: Joel Kitching Reviewed-on: https://chromium-review.googlesource.com/1564234 Commit-Ready: ChromeOS CL Exonerator Bot Tested-by: Joel Kitching Reviewed-by: Amin Hassani --- metrics_reporter_android.h | 2 -- metrics_reporter_interface.h | 2 -- metrics_reporter_omaha.cc | 4 ---- metrics_reporter_omaha.h | 2 -- metrics_reporter_stub.h | 2 -- real_system_state.cc | 2 -- 6 files changed, 14 deletions(-) diff --git a/metrics_reporter_android.h b/metrics_reporter_android.h index e320c122..7770619c 100644 --- a/metrics_reporter_android.h +++ b/metrics_reporter_android.h @@ -31,8 +31,6 @@ class MetricsReporterAndroid : public MetricsReporterInterface { ~MetricsReporterAndroid() override = default; - void Initialize() override {} - void ReportRollbackMetrics(metrics::RollbackResult result) override {} void ReportEnterpriseRollbackMetrics( diff --git a/metrics_reporter_interface.h b/metrics_reporter_interface.h index fce8bfd7..180a6803 100644 --- a/metrics_reporter_interface.h +++ b/metrics_reporter_interface.h @@ -42,8 +42,6 @@ class MetricsReporterInterface { public: virtual ~MetricsReporterInterface() = default; - virtual void Initialize() = 0; - // Helper function to report metrics related to user-initiated rollback. The // following metrics are reported: // diff --git a/metrics_reporter_omaha.cc b/metrics_reporter_omaha.cc index 14819d88..fb4e4ce9 100644 --- a/metrics_reporter_omaha.cc +++ b/metrics_reporter_omaha.cc @@ -144,10 +144,6 @@ std::unique_ptr CreateMetricsReporter() { MetricsReporterOmaha::MetricsReporterOmaha() : metrics_lib_(new MetricsLibrary()) {} -void MetricsReporterOmaha::Initialize() { - metrics_lib_->Init(); -} - void MetricsReporterOmaha::ReportDailyMetrics(base::TimeDelta os_age) { string metric = metrics::kMetricDailyOSAgeDays; LOG(INFO) << "Uploading " << utils::FormatTimeDelta(os_age) << " for metric " diff --git a/metrics_reporter_omaha.h b/metrics_reporter_omaha.h index 5680dec0..c84ac1ec 100644 --- a/metrics_reporter_omaha.h +++ b/metrics_reporter_omaha.h @@ -108,8 +108,6 @@ class MetricsReporterOmaha : public MetricsReporterInterface { ~MetricsReporterOmaha() override = default; - void Initialize() override; - void ReportRollbackMetrics(metrics::RollbackResult result) override; void ReportEnterpriseRollbackMetrics( diff --git a/metrics_reporter_stub.h b/metrics_reporter_stub.h index 25660b5c..0cfeea0c 100644 --- a/metrics_reporter_stub.h +++ b/metrics_reporter_stub.h @@ -31,8 +31,6 @@ class MetricsReporterStub : public MetricsReporterInterface { ~MetricsReporterStub() override = default; - void Initialize() override {} - void ReportRollbackMetrics(metrics::RollbackResult result) override {} void ReportEnterpriseRollbackMetrics( diff --git a/real_system_state.cc b/real_system_state.cc index 2f18b4d6..700bfb7d 100644 --- a/real_system_state.cc +++ b/real_system_state.cc @@ -54,8 +54,6 @@ RealSystemState::~RealSystemState() { } bool RealSystemState::Initialize() { - metrics_reporter_.Initialize(); - boot_control_ = boot_control::CreateBootControl(); if (!boot_control_) { LOG(WARNING) << "Unable to create BootControl instance, using stub " From 000c7624d1d5da27d799a6dcde8456b95f3ece91 Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Wed, 1 May 2019 07:02:27 -0700 Subject: [PATCH 020/624] update_engine: Set kernel_max_rollforward to inifinity for test images. Instead of checking for consumer devices to set that value to infinity, just check if the device is not an official build (test/dev images). The consumer devices will get their value set at a later (update check) stage. This allows bring up devices to easily have this value set. BUG=b:129556191 TEST=none Change-Id: I79db0c556b6d1bf80adb6adc65bc15b8d812889a Reviewed-on: https://chromium-review.googlesource.com/1591793 Commit-Ready: Amin Hassani Tested-by: Amin Hassani Reviewed-by: Zentaro Kavanagh --- real_system_state.cc | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/real_system_state.cc b/real_system_state.cc index 700bfb7d..cc030431 100644 --- a/real_system_state.cc +++ b/real_system_state.cc @@ -187,14 +187,13 @@ bool RealSystemState::Initialize() { return false; } - // For devices that are not rollback enabled (ie. consumer devices), - // initialize max kernel key version to 0xfffffffe, which is logically - // infinity. - if (policy_provider_.IsConsumerDevice()) { + // For images that are build for debugging purposes like test images + // initialize max kernel key version to 0xfffffffe, which is logical infinity. + if (!hardware_->IsOfficialBuild()) { if (!hardware()->SetMaxKernelKeyRollforward( chromeos_update_manager::kRollforwardInfinity)) { LOG(ERROR) << "Failed to set kernel_max_rollforward to infinity for" - << " consumer devices"; + << " device with test/dev image."; } } From 9456b6fa1416c8ecd415a39131d257469af6b213 Mon Sep 17 00:00:00 2001 From: Tao Bao Date: Mon, 29 Apr 2019 18:14:58 -0700 Subject: [PATCH 021/624] Support `atest update_engine_unittests`. We need a few changes to the building rules to support running the test with atest. In particular, we need a customized test config to filter out the helper executables. The change doesn't affect the traditionally way of running tests with `adb sync data` + `adb shell /d/n/u/update_engine_unittests`. This is one of the prerequisites to run update_engine_unittests with TEST_MAPPING [1]. It still requires additional works to enable the test in pre/post-submit though (currently it's failing on GCE devices due to missing shared libraries). [1] https://source.android.com/compatibility/tests/development/test-mapping Fixes: 131438455 Test: `atest update_engine_unittests` on blueline. Test: TreeHugger; check that the existing GCL config'd test keeps working. Change-Id: If602d69d84e0d926c579324d988c94b57240b113 --- Android.bp | 19 ++++++++----------- test_config.xml | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+), 11 deletions(-) create mode 100644 test_config.xml diff --git a/Android.bp b/Android.bp index ce43bea5..9e8296b3 100644 --- a/Android.bp +++ b/Android.bp @@ -550,8 +550,6 @@ cc_test { gtest: false, stem: "delta_generator", - relative_install_path: "update_engine_unittests", - no_named_install_directory: true, } // test_http_server (type: executable) @@ -566,8 +564,6 @@ cc_test { ], gtest: false, - relative_install_path: "update_engine_unittests", - no_named_install_directory: true, } // test_subprocess (type: executable) @@ -579,8 +575,6 @@ cc_test { srcs: ["test_subprocess.cc"], gtest: false, - relative_install_path: "update_engine_unittests", - no_named_install_directory: true, } // Public keys for unittests. @@ -628,11 +622,6 @@ cc_test { "libpayload_generator_exports", "libupdate_engine_android_exports", ], - required: [ - "test_http_server", - "test_subprocess", - "ue_unittest_delta_generator", - ], static_libs: [ "libpayload_generator", @@ -646,6 +635,9 @@ cc_test { ], data: [ + ":test_http_server", + ":test_subprocess", + ":ue_unittest_delta_generator", ":ue_unittest_disk_imgs", ":ue_unittest_keys", "unittest_key.pem", @@ -654,6 +646,11 @@ cc_test { "update_engine.conf", ], + // We cannot use the default generated AndroidTest.xml because of the use of helper modules + // (i.e. test_http_server, test_subprocess, ue_unittest_delta_generator). + test_config: "test_config.xml", + test_suites: ["device-tests"], + srcs: [ "boot_control_android_unittest.cc", "certificate_checker_unittest.cc", diff --git a/test_config.xml b/test_config.xml new file mode 100644 index 00000000..2639e7f3 --- /dev/null +++ b/test_config.xml @@ -0,0 +1,33 @@ + + + + From 4a1173a313c176f167bee5c2d23d34d04e0c6fd7 Mon Sep 17 00:00:00 2001 From: Xiaochu Liu Date: Wed, 10 Apr 2019 10:49:08 -0700 Subject: [PATCH 022/624] update_engine: add logs and metrics for libcurl errors curl_multi_perform may return code that is neither CURLM_OK or CURLM_CALL_MULTI_PERFORM. When error returns we log them in update_engine.log and send UMA metrics. When update_engine does not get http response code from libcurl, we log internal error code of the libcurl transfer for diagnosis. Chrome CL to add the metrics enum is here: https://chromium-review.googlesource.com/c/chromium/src/+/1566150 BUG=chromium:927039 TEST=unittest Change-Id: Ie8ce9dc0a6ce5ff6ffc2ff9425b652d125518558 Reviewed-on: https://chromium-review.googlesource.com/1562172 Commit-Ready: Sean Abraham Tested-by: Xiaochu Liu Reviewed-by: Xiaochu Liu Reviewed-by: Amin Hassani --- common/http_fetcher.h | 8 +++++++ libcurl_http_fetcher.cc | 49 +++++++++++++++++++++++++++++++++++++++++ libcurl_http_fetcher.h | 12 ++++++++++ metrics_constants.h | 4 ++++ omaha_request_action.cc | 11 +++++++-- omaha_request_action.h | 5 +++++ update_attempter.cc | 1 + 7 files changed, 88 insertions(+), 2 deletions(-) diff --git a/common/http_fetcher.h b/common/http_fetcher.h index 2b4fc833..93b0e249 100644 --- a/common/http_fetcher.h +++ b/common/http_fetcher.h @@ -29,6 +29,7 @@ #include "update_engine/common/http_common.h" #include "update_engine/common/proxy_resolver.h" +#include "update_engine/metrics_constants.h" // This class is a simple wrapper around an HTTP library (libcurl). We can // easily mock out this interface for testing. @@ -200,6 +201,13 @@ class HttpFetcherDelegate { // situations. It's OK to destroy the |fetcher| object in this callback. virtual void TransferComplete(HttpFetcher* fetcher, bool successful) = 0; virtual void TransferTerminated(HttpFetcher* fetcher) {} + + // This allows |HttpFetcher| to send UMA metrics for its internal states + // (unrecoverable libcurl internal error, etc.). + virtual void ReportUpdateCheckMetrics( + metrics::CheckResult result, + metrics::CheckReaction reaction, + metrics::DownloadErrorCode download_error_code) {} }; } // namespace chromeos_update_engine diff --git a/libcurl_http_fetcher.cc b/libcurl_http_fetcher.cc index 4e33671f..1aa7e8ba 100644 --- a/libcurl_http_fetcher.cc +++ b/libcurl_http_fetcher.cc @@ -407,6 +407,21 @@ void LibcurlHttpFetcher::CurlPerformOnce() { } } + // When retcode is not |CURLM_OK| at this point, libcurl has an internal error + // that it is less likely to recover from (libcurl bug, out-of-memory, etc.). + // In case of an update check, we send UMA metrics and log the error. + if (is_update_check_ && + (retcode == CURLM_OUT_OF_MEMORY || retcode == CURLM_INTERNAL_ERROR)) { + delegate_->ReportUpdateCheckMetrics( + metrics::CheckResult::kUnset, + metrics::CheckReaction::kUnset, + metrics::DownloadErrorCode::kInternalError); + LOG(ERROR) << "curl_multi_perform is in an unrecoverable error condition: " + << retcode; + } else if (retcode != CURLM_OK) { + LOG(ERROR) << "curl_multi_perform returns error: " << retcode; + } + // If the transfer completes while paused, we should ignore the failure once // the fetcher is unpaused. if (running_handles == 0 && transfer_paused_ && !ignore_failure_) { @@ -431,6 +446,7 @@ void LibcurlHttpFetcher::CurlPerformOnce() { no_network_retry_count_ = 0; } else { LOG(ERROR) << "Unable to get http response code."; + LogCurlHandleInfo(); } // we're done! @@ -756,6 +772,39 @@ void LibcurlHttpFetcher::GetHttpResponseCode() { CURLINFO_RESPONSE_CODE, &http_response_code) == CURLE_OK) { http_response_code_ = static_cast(http_response_code); + } else { + LOG(ERROR) << "Unable to get http response code from curl_easy_getinfo"; + } +} + +void LibcurlHttpFetcher::LogCurlHandleInfo() { + while (true) { + // Repeated calls to |curl_multi_info_read| will return a new struct each + // time, until a NULL is returned as a signal that there is no more to get + // at this point. + int msgs_in_queue; + CURLMsg* curl_msg = + curl_multi_info_read(curl_multi_handle_, &msgs_in_queue); + if (curl_msg == nullptr) + break; + // When |curl_msg| is |CURLMSG_DONE|, a transfer of an easy handle is done, + // and then data contains the return code for this transfer. + if (curl_msg->msg == CURLMSG_DONE) { + // Make sure |curl_multi_handle_| has one and only one easy handle + // |curl_handle_|. + CHECK_EQ(curl_handle_, curl_msg->easy_handle); + // Transfer return code reference: + // https://curl.haxx.se/libcurl/c/libcurl-errors.html + LOG(ERROR) << "Return code for the transfer: " << curl_msg->data.result; + } + } + + // Gets connection error if exists. + long connect_error = 0; // NOLINT(runtime/int) - curl needs long. + CURLcode res = + curl_easy_getinfo(curl_handle_, CURLINFO_OS_ERRNO, &connect_error); + if (res == CURLE_OK && connect_error) { + LOG(ERROR) << "Connect error code from the OS: " << connect_error; } } diff --git a/libcurl_http_fetcher.h b/libcurl_http_fetcher.h index 25a2df3d..24103de1 100644 --- a/libcurl_http_fetcher.h +++ b/libcurl_http_fetcher.h @@ -106,6 +106,10 @@ class LibcurlHttpFetcher : public HttpFetcher { max_retry_count_ = max_retry_count; } + void set_is_update_check(bool is_update_check) { + is_update_check_ = is_update_check; + } + private: // libcurl's CURLOPT_CLOSESOCKETFUNCTION callback function. Called when // closing a socket created with the CURLOPT_OPENSOCKETFUNCTION callback. @@ -118,6 +122,11 @@ class LibcurlHttpFetcher : public HttpFetcher { // Asks libcurl for the http response code and stores it in the object. void GetHttpResponseCode(); + // Logs curl handle info. + // This can be called only when an http request failed to avoid spamming the + // logs. This must be called after |ResumeTransfer| and before |CleanUp|. + void LogCurlHandleInfo(); + // Checks whether stored HTTP response is within the success range. inline bool IsHttpResponseSuccess() { return (http_response_code_ >= 200 && http_response_code_ < 300); @@ -265,6 +274,9 @@ class LibcurlHttpFetcher : public HttpFetcher { // ServerToCheck::kNone. ServerToCheck server_to_check_{ServerToCheck::kNone}; + // True if this object is for update check. + bool is_update_check_{false}; + int low_speed_limit_bps_{kDownloadLowSpeedLimitBps}; int low_speed_time_seconds_{kDownloadLowSpeedTimeSeconds}; int connect_timeout_seconds_{kDownloadConnectTimeoutSeconds}; diff --git a/metrics_constants.h b/metrics_constants.h index eabb8fb0..b3833a3d 100644 --- a/metrics_constants.h +++ b/metrics_constants.h @@ -60,6 +60,10 @@ enum class DownloadErrorCode { // above block and before the kInputMalformed field. This // is to ensure that error codes are not reordered. + // This error is reported when libcurl has an internal error that + // update_engine can't recover from. + kInternalError = 99, + // This error code is used to convey that malformed input was given // to the utils::GetDownloadErrorCode() function. This should never // happen but if it does it's because of an internal update_engine diff --git a/omaha_request_action.cc b/omaha_request_action.cc index 5b69ec87..c4adec7f 100644 --- a/omaha_request_action.cc +++ b/omaha_request_action.cc @@ -981,6 +981,14 @@ void OmahaRequestAction::TransferComplete(HttpFetcher* fetcher, } } +void OmahaRequestAction::ReportUpdateCheckMetrics( + metrics::CheckResult result, + metrics::CheckReaction reaction, + metrics::DownloadErrorCode download_error_code) { + system_state_->metrics_reporter()->ReportUpdateCheckMetrics( + system_state_, result, reaction, download_error_code); +} + void OmahaRequestAction::CompleteProcessing() { ScopedActionCompleter completer(processor_, this); OmahaResponse& output_object = const_cast(GetOutputObject()); @@ -1376,8 +1384,7 @@ void OmahaRequestAction::ActionCompleted(ErrorCode code) { break; } - system_state_->metrics_reporter()->ReportUpdateCheckMetrics( - system_state_, result, reaction, download_error_code); + ReportUpdateCheckMetrics(result, reaction, download_error_code); } bool OmahaRequestAction::ShouldIgnoreUpdate(const OmahaResponse& response, diff --git a/omaha_request_action.h b/omaha_request_action.h index 8e81af96..f006d69e 100644 --- a/omaha_request_action.h +++ b/omaha_request_action.h @@ -125,6 +125,11 @@ class OmahaRequestAction : public Action, void TransferComplete(HttpFetcher* fetcher, bool successful) override; + void ReportUpdateCheckMetrics( + metrics::CheckResult result, + metrics::CheckReaction reaction, + metrics::DownloadErrorCode download_error_code) override; + // Returns true if this is an Event request, false if it's an UpdateCheck. bool IsEvent() const { return event_.get() != nullptr; } diff --git a/update_attempter.cc b/update_attempter.cc index 4bbf5a14..34b7bac9 100644 --- a/update_attempter.cc +++ b/update_attempter.cc @@ -651,6 +651,7 @@ void UpdateAttempter::BuildUpdateActions(bool interactive) { // Try harder to connect to the network, esp when not interactive. // See comment in libcurl_http_fetcher.cc. update_check_fetcher->set_no_network_max_retries(interactive ? 1 : 3); + update_check_fetcher->set_is_update_check(true); auto update_check_action = std::make_unique( system_state_, nullptr, std::move(update_check_fetcher), false); auto response_handler_action = From 3d53cbc7ebb65e43b5d9dfb7287094e7e20180df Mon Sep 17 00:00:00 2001 From: Nicolas Norvez Date: Fri, 10 May 2019 15:02:07 -0700 Subject: [PATCH 023/624] init: respawn UE on crash The init file specifies a limit to the number of respawns, but that does not imply that upstart will respawn the process if it crashes. Add the "respawn" stanza so Update Engine gets restarted if it crashes. BUG=chromium:961905 TEST=killall update_engine, observe that update_engine is restarted TEST=killall update_engine in a loop, observe that it's only restarted 10 times: ~# for i in {1..20}; do echo "Loop $i"; killall update_engine; sleep 1; done Loop 1 Loop 2 Loop 3 Loop 4 Loop 5 Loop 6 Loop 7 Loop 8 Loop 9 Loop 10 Loop 11 Loop 12 update_engine: no process found Loop 13 update_engine: no process found Loop 14 update_engine: no process found Loop 15 update_engine: no process found Change-Id: I6164fccb8fe76915d0dd260703712224e759387d Reviewed-on: https://chromium-review.googlesource.com/1606528 Commit-Ready: ChromeOS CL Exonerator Bot Tested-by: Nicolas Norvez Reviewed-by: Xiaochu Liu Reviewed-by: Amin Hassani --- init/update-engine.conf | 1 + 1 file changed, 1 insertion(+) diff --git a/init/update-engine.conf b/init/update-engine.conf index d3681dbc..ca54c4a5 100644 --- a/init/update-engine.conf +++ b/init/update-engine.conf @@ -25,6 +25,7 @@ stop on stopping system-services # The default is 10 failures every 5 seconds, but even if we crash early, it is # hard to catch that. So here we set the crash rate as 10 failures every 20 # seconds which will include the default and more. +respawn respawn limit 10 20 expect fork From e1f55b0f39a109760ce6983d0b8f6f498ec90f52 Mon Sep 17 00:00:00 2001 From: Tianjie Xu Date: Tue, 21 May 2019 15:21:15 -0700 Subject: [PATCH 024/624] Skip an async callback function when the UpdateBootFlagsAction object is destroyed As the callback to MarkBootSuccessfulAsync, UpdateBootFlagsAction's member function CompleteUpdateBootFlags() can still be called even after the current UpdateBootFlagsAction object get destroyed by the action processor. We want to set a static flag in TerminateProcessing() and check its value before executing the callback function. An alternative way is to save and propagate the TaskId when scheduling the task in MarkBootSuccessfulAsync, and cancel the task in UpdateBootFlagsAction's TerminateProcessing(). Bug: 123720545 Test: No longer hit the CHECK after injecting StopProcessing. Change-Id: I98d2cc7b94d4059fb897b89932969b61936e8c2e --- update_boot_flags_action.cc | 17 ++++++++++++++++- update_boot_flags_action.h | 2 ++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/update_boot_flags_action.cc b/update_boot_flags_action.cc index 97ef7f23..ee92ae0a 100644 --- a/update_boot_flags_action.cc +++ b/update_boot_flags_action.cc @@ -50,8 +50,11 @@ void UpdateBootFlagsAction::PerformAction() { } } -void UpdateBootFlagsAction::CompleteUpdateBootFlags(bool successful) { +void UpdateBootFlagsAction::TerminateProcessing() { is_running_ = false; +} + +void UpdateBootFlagsAction::CompleteUpdateBootFlags(bool successful) { if (!successful) { // We ignore the failure for now because if the updating boot flags is flaky // or has a bug in a specific release, then blocking the update can cause @@ -61,6 +64,18 @@ void UpdateBootFlagsAction::CompleteUpdateBootFlags(bool successful) { // TODO(ahassani): Add new error code metric for kUpdateBootFlagsFailed. LOG(ERROR) << "Updating boot flags failed, but ignoring its failure."; } + + // As the callback to MarkBootSuccessfulAsync, this function can still be + // called even after the current UpdateBootFlagsAction object get destroyed by + // the action processor. In this case, check the value of the static variable + // |is_running_| and skip executing the callback function. + if (!is_running_) { + LOG(INFO) << "UpdateBootFlagsAction is no longer running."; + return; + } + + is_running_ = false; + updated_boot_flags_ = true; processor_->ActionComplete(this, ErrorCode::kSuccess); } diff --git a/update_boot_flags_action.h b/update_boot_flags_action.h index afa2c3f1..892aab7b 100644 --- a/update_boot_flags_action.h +++ b/update_boot_flags_action.h @@ -30,6 +30,8 @@ class UpdateBootFlagsAction : public AbstractAction { void PerformAction() override; + void TerminateProcessing() override; + static std::string StaticType() { return "UpdateBootFlagsAction"; } std::string Type() const override { return StaticType(); } From 0ede7bdf41b49022284cb6337bfd56b42aa594f6 Mon Sep 17 00:00:00 2001 From: Keigo Oka Date: Mon, 13 May 2019 17:35:13 +0900 Subject: [PATCH 025/624] update_engine: migrate the package to use GN. TEST=USE="asan fuzzer" FEATURES=test emerge-$BOARD update_engine # BOARD=amd64-generic TEST=FEATURES=test emerge-$BOARD update_engine TEST=USE="test binder dbus hwid_override chrome_kiosk_app chrome_network_proxy mtd dlc" ebuild-$BOARD ~/trunk/src/third_party/chromiumos-overlay/chromeos-base/update_engine/update_engine-9999.ebuild clean compile BUG=chromium:953642 Cq-Depend:chromium:1608971 Change-Id: I3de939193797858006d64c1ddbf3fc8e6f48b1eb Reviewed-on: https://chromium-review.googlesource.com/1608969 Commit-Ready: Keigo Oka Tested-by: Keigo Oka Legacy-Commit-Queue: Commit Bot Reviewed-by: Keigo Oka --- BUILD.gn | 562 +++++++++++++++++++++++++++++++++++++++ tar_bunzip2.gni | 33 +++ tar_bunzip2.gypi | 39 --- update_engine.gyp | 656 ---------------------------------------------- 4 files changed, 595 insertions(+), 695 deletions(-) create mode 100644 BUILD.gn create mode 100644 tar_bunzip2.gni delete mode 100644 tar_bunzip2.gypi delete mode 100644 update_engine.gyp diff --git a/BUILD.gn b/BUILD.gn new file mode 100644 index 00000000..fc25cd91 --- /dev/null +++ b/BUILD.gn @@ -0,0 +1,562 @@ +# +# Copyright (C) 2019 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import("//common-mk/generate-dbus-adaptors.gni") +import("//common-mk/generate-dbus-proxies.gni") +import("//common-mk/openssl_pem.gni") +import("//common-mk/pkg_config.gni") +import("//common-mk/proto_library.gni") +import("//update_engine/tar_bunzip2.gni") + +group("all") { + deps = [ + ":delta_generator", + ":libpayload_consumer", + ":libpayload_generator", + ":libupdate_engine", + ":libupdate_engine_client", + ":update_engine", + ":update_engine-dbus-adaptor", + ":update_engine-dbus-kiosk-app-client", + ":update_engine_client", + ":update_metadata-protos", + ] + + if (use.test) { + deps += [ + ":test_http_server", + ":test_subprocess", + ":update_engine-test_images", + ":update_engine-testkeys", + ":update_engine_test_libs", + ":update_engine_unittests", + ] + } + + if (use.fuzzer) { + deps += [ ":update_engine_omaha_request_action_fuzzer" ] + } +} + +pkg_config("target_defaults") { + cflags_cc = [ + "-fno-strict-aliasing", + "-Wnon-virtual-dtor", + ] + cflags = [ + "-g", + "-ffunction-sections", + "-Wall", + "-Wextra", + "-Werror", + "-Wno-unused-parameter", + ] + ldflags = [ "-Wl,--gc-sections" ] + defines = [ + "__CHROMEOS__", + "_FILE_OFFSET_BITS=64", + "_POSIX_C_SOURCE=199309L", + "USE_BINDER=${use.binder}", + "USE_DBUS=${use.dbus}", + "USE_FEC=0", + "USE_HWID_OVERRIDE=${use.hwid_override}", + "USE_CHROME_KIOSK_APP=${use.chrome_kiosk_app}", + "USE_CHROME_NETWORK_PROXY=${use.chrome_network_proxy}", + "USE_MTD=${use.mtd}", + "USE_OMAHA=1", + "USE_SHILL=1", + ] + include_dirs = [ + # We need this include dir because we include all the local code as + # "update_engine/...". + "${platform2_root}", + "${platform2_root}/update_engine/client_library/include", + ] + pkg_deps = [ + "libbrillo-${libbase_ver}", + "libchrome-${libbase_ver}", + + # system_api depends on protobuf (or protobuf-lite). It must appear + # before protobuf here or the linker flags won't be in the right + # order. + "system_api", + "protobuf-lite", + ] +} + +# Protobufs. +proto_library("update_metadata-protos") { + proto_in_dir = "." + proto_out_dir = "include/update_engine" + sources = [ + "update_metadata.proto", + ] +} + +# Chrome D-Bus bindings. +generate_dbus_adaptors("update_engine-dbus-adaptor") { + dbus_adaptors_out_dir = "include/dbus_bindings" + sources = [ + "dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml", + ] +} + +generate_dbus_proxies("update_engine-dbus-kiosk-app-client") { + mock_output_file = "include/kiosk-app/dbus-proxy-mocks.h" + proxy_output_file = "include/kiosk-app/dbus-proxies.h" + sources = [ + "dbus_bindings/org.chromium.KioskAppService.dbus-xml", + ] +} + +# The payload application component and common dependencies. +static_library("libpayload_consumer") { + sources = [ + "common/action_processor.cc", + "common/boot_control_stub.cc", + "common/clock.cc", + "common/constants.cc", + "common/cpu_limiter.cc", + "common/error_code_utils.cc", + "common/hash_calculator.cc", + "common/http_common.cc", + "common/http_fetcher.cc", + "common/hwid_override.cc", + "common/multi_range_http_fetcher.cc", + "common/platform_constants_chromeos.cc", + "common/prefs.cc", + "common/proxy_resolver.cc", + "common/subprocess.cc", + "common/terminator.cc", + "common/utils.cc", + "payload_consumer/bzip_extent_writer.cc", + "payload_consumer/cached_file_descriptor.cc", + "payload_consumer/delta_performer.cc", + "payload_consumer/download_action.cc", + "payload_consumer/extent_reader.cc", + "payload_consumer/extent_writer.cc", + "payload_consumer/file_descriptor.cc", + "payload_consumer/file_descriptor_utils.cc", + "payload_consumer/file_writer.cc", + "payload_consumer/filesystem_verifier_action.cc", + "payload_consumer/install_plan.cc", + "payload_consumer/mount_history.cc", + "payload_consumer/payload_constants.cc", + "payload_consumer/payload_metadata.cc", + "payload_consumer/payload_verifier.cc", + "payload_consumer/postinstall_runner_action.cc", + "payload_consumer/verity_writer_stub.cc", + "payload_consumer/xz_extent_writer.cc", + ] + configs += [ ":target_defaults" ] + libs = [ + "bz2", + "rt", + ] + + # TODO(deymo): Remove unused dependencies once we stop including files + # from the root directory. + all_dependent_pkg_deps = [ + "libcrypto", + "xz-embedded", + "libbspatch", + "libpuffpatch", + ] + deps = [ + ":update_metadata-protos", + ] + + if (use.mtd) { + sources += [ "payload_consumer/mtd_file_descriptor.cc" ] + libs += [ "mtdutils" ] + } +} + +# The main daemon static_library with all the code used to check for updates +# with Omaha and expose a DBus daemon. +static_library("libupdate_engine") { + sources = [ + "boot_control_chromeos.cc", + "certificate_checker.cc", + "common_service.cc", + "connection_manager.cc", + "connection_utils.cc", + "daemon.cc", + "dbus_connection.cc", + "dbus_service.cc", + "hardware_chromeos.cc", + "image_properties_chromeos.cc", + "libcurl_http_fetcher.cc", + "metrics_reporter_omaha.cc", + "metrics_utils.cc", + "omaha_request_action.cc", + "omaha_request_builder_xml.cc", + "omaha_request_params.cc", + "omaha_response_handler_action.cc", + "omaha_utils.cc", + "p2p_manager.cc", + "payload_state.cc", + "power_manager_chromeos.cc", + "real_system_state.cc", + "shill_proxy.cc", + "update_attempter.cc", + "update_boot_flags_action.cc", + "update_manager/boxed_value.cc", + "update_manager/chromeos_policy.cc", + "update_manager/default_policy.cc", + "update_manager/enough_slots_ab_updates_policy_impl.cc", + "update_manager/enterprise_device_policy_impl.cc", + "update_manager/evaluation_context.cc", + "update_manager/interactive_update_policy_impl.cc", + "update_manager/next_update_check_policy_impl.cc", + "update_manager/official_build_check_policy_impl.cc", + "update_manager/out_of_box_experience_policy_impl.cc", + "update_manager/policy.cc", + "update_manager/policy_test_utils.cc", + "update_manager/real_config_provider.cc", + "update_manager/real_device_policy_provider.cc", + "update_manager/real_random_provider.cc", + "update_manager/real_shill_provider.cc", + "update_manager/real_system_provider.cc", + "update_manager/real_time_provider.cc", + "update_manager/real_updater_provider.cc", + "update_manager/staging_utils.cc", + "update_manager/state_factory.cc", + "update_manager/update_manager.cc", + "update_manager/update_time_restrictions_policy_impl.cc", + "update_manager/weekly_time.cc", + "update_status_utils.cc", + ] + configs += [ ":target_defaults" ] + libs = [ + "bz2", + "policy-${libbase_ver}", + "rootdev", + "rt", + ] + all_dependent_pkg_deps = [ + "dbus-1", + "expat", + "libcurl", + "libdebugd-client", + "libmetrics-${libbase_ver}", + "libpower_manager-client", + "libsession_manager-client", + "libshill-client", + "libssl", + "libupdate_engine-client", + "vboot_host", + ] + deps = [ + ":libpayload_consumer", + ":update_engine-dbus-adaptor", + ":update_metadata-protos", + ] + + if (use.dlc) { + all_dependent_pkg_deps += [ "libdlcservice-client" ] + } + + if (use.chrome_network_proxy) { + sources += [ "chrome_browser_proxy_resolver.cc" ] + } + + if (use.chrome_kiosk_app) { + deps += [ ":update_engine-dbus-kiosk-app-client" ] + } + + if (use.dlc) { + sources += [ "dlcservice_chromeos.cc" ] + } else { + sources += [ "common/dlcservice_stub.cc" ] + } +} + +# update_engine daemon. +executable("update_engine") { + sources = [ + "main.cc", + ] + configs += [ ":target_defaults" ] + deps = [ + ":libupdate_engine", + ] +} + +# update_engine client library. +static_library("libupdate_engine_client") { + sources = [ + "client_library/client.cc", + "client_library/client_dbus.cc", + "update_status_utils.cc", + ] + include_dirs = [ "client_library/include" ] + configs += [ ":target_defaults" ] + pkg_deps = [ + "dbus-1", + "libupdate_engine-client", + ] +} + +# update_engine console client. +executable("update_engine_client") { + sources = [ + "common/error_code_utils.cc", + "omaha_utils.cc", + "update_engine_client.cc", + ] + configs += [ ":target_defaults" ] + deps = [ + ":libupdate_engine_client", + ] +} + +# server-side code. This is used for delta_generator and unittests but not +# for any client code. +static_library("libpayload_generator") { + sources = [ + "common/file_fetcher.cc", + "payload_generator/ab_generator.cc", + "payload_generator/annotated_operation.cc", + "payload_generator/blob_file_writer.cc", + "payload_generator/block_mapping.cc", + "payload_generator/boot_img_filesystem.cc", + "payload_generator/bzip.cc", + "payload_generator/cycle_breaker.cc", + "payload_generator/deflate_utils.cc", + "payload_generator/delta_diff_generator.cc", + "payload_generator/delta_diff_utils.cc", + "payload_generator/ext2_filesystem.cc", + "payload_generator/extent_ranges.cc", + "payload_generator/extent_utils.cc", + "payload_generator/full_update_generator.cc", + "payload_generator/graph_types.cc", + "payload_generator/graph_utils.cc", + "payload_generator/inplace_generator.cc", + "payload_generator/mapfile_filesystem.cc", + "payload_generator/payload_file.cc", + "payload_generator/payload_generation_config.cc", + "payload_generator/payload_generation_config_chromeos.cc", + "payload_generator/payload_signer.cc", + "payload_generator/raw_filesystem.cc", + "payload_generator/squashfs_filesystem.cc", + "payload_generator/tarjan.cc", + "payload_generator/topological_sort.cc", + "payload_generator/xz_chromeos.cc", + ] + configs += [ ":target_defaults" ] + all_dependent_pkg_deps = [ + "ext2fs", + "libbsdiff", + "libpuffdiff", + "liblzma", + ] + deps = [ + ":libpayload_consumer", + ":update_metadata-protos", + ] +} + +# server-side delta generator. +executable("delta_generator") { + sources = [ + "payload_generator/generate_delta_main.cc", + ] + configs += [ ":target_defaults" ] + configs -= [ "//common-mk:pie" ] + deps = [ + ":libpayload_consumer", + ":libpayload_generator", + ] +} + +if (use.test || use.fuzzer) { + static_library("update_engine_test_libs") { + sources = [ + "common/fake_prefs.cc", + "common/mock_http_fetcher.cc", + "common/test_utils.cc", + "fake_shill_proxy.cc", + "fake_system_state.cc", + "payload_consumer/fake_file_descriptor.cc", + "payload_generator/fake_filesystem.cc", + "update_manager/umtest_utils.cc", + ] + all_dependent_configs = [ "//common-mk:test" ] + configs += [ ":target_defaults" ] + pkg_deps = [ "libshill-client-test" ] + deps = [ + ":libupdate_engine", + ] + } +} + +if (use.test) { + # Public keys used for unit testing. + genopenssl_key("update_engine-testkeys") { + openssl_pem_in_dir = "." + openssl_pem_out_dir = "include/update_engine" + sources = [ + "unittest_key.pem", + "unittest_key2.pem", + ] + } + + # Unpacks sample images used for testing. + tar_bunzip2("update_engine-test_images") { + image_out_dir = "." + sources = [ + "sample_images/sample_images.tar.bz2", + ] + } + + # Test HTTP Server. + executable("test_http_server") { + sources = [ + "common/http_common.cc", + "test_http_server.cc", + ] + configs += [ ":target_defaults" ] + } + + # Test subprocess helper. + executable("test_subprocess") { + sources = [ + "test_subprocess.cc", + ] + configs += [ ":target_defaults" ] + } + + # Main unittest file. + executable("update_engine_unittests") { + sources = [ + "boot_control_chromeos_unittest.cc", + "certificate_checker_unittest.cc", + "common/action_pipe_unittest.cc", + "common/action_processor_unittest.cc", + "common/action_unittest.cc", + "common/cpu_limiter_unittest.cc", + "common/hash_calculator_unittest.cc", + "common/http_fetcher_unittest.cc", + "common/hwid_override_unittest.cc", + "common/prefs_unittest.cc", + "common/proxy_resolver_unittest.cc", + "common/subprocess_unittest.cc", + "common/terminator_unittest.cc", + "common/utils_unittest.cc", + "common_service_unittest.cc", + "connection_manager_unittest.cc", + "hardware_chromeos_unittest.cc", + "image_properties_chromeos_unittest.cc", + "metrics_reporter_omaha_unittest.cc", + "metrics_utils_unittest.cc", + "omaha_request_action_unittest.cc", + "omaha_request_builder_xml_unittest.cc", + "omaha_request_params_unittest.cc", + "omaha_response_handler_action_unittest.cc", + "omaha_utils_unittest.cc", + "p2p_manager_unittest.cc", + "payload_consumer/bzip_extent_writer_unittest.cc", + "payload_consumer/cached_file_descriptor_unittest.cc", + "payload_consumer/delta_performer_integration_test.cc", + "payload_consumer/delta_performer_unittest.cc", + "payload_consumer/download_action_unittest.cc", + "payload_consumer/extent_reader_unittest.cc", + "payload_consumer/extent_writer_unittest.cc", + "payload_consumer/file_descriptor_utils_unittest.cc", + "payload_consumer/file_writer_unittest.cc", + "payload_consumer/filesystem_verifier_action_unittest.cc", + "payload_consumer/postinstall_runner_action_unittest.cc", + "payload_consumer/xz_extent_writer_unittest.cc", + "payload_generator/ab_generator_unittest.cc", + "payload_generator/blob_file_writer_unittest.cc", + "payload_generator/block_mapping_unittest.cc", + "payload_generator/boot_img_filesystem_unittest.cc", + "payload_generator/cycle_breaker_unittest.cc", + "payload_generator/deflate_utils_unittest.cc", + "payload_generator/delta_diff_utils_unittest.cc", + "payload_generator/ext2_filesystem_unittest.cc", + "payload_generator/extent_ranges_unittest.cc", + "payload_generator/extent_utils_unittest.cc", + "payload_generator/full_update_generator_unittest.cc", + "payload_generator/graph_utils_unittest.cc", + "payload_generator/inplace_generator_unittest.cc", + "payload_generator/mapfile_filesystem_unittest.cc", + "payload_generator/payload_file_unittest.cc", + "payload_generator/payload_generation_config_unittest.cc", + "payload_generator/payload_signer_unittest.cc", + "payload_generator/squashfs_filesystem_unittest.cc", + "payload_generator/tarjan_unittest.cc", + "payload_generator/topological_sort_unittest.cc", + "payload_generator/zip_unittest.cc", + "payload_state_unittest.cc", + "testrunner.cc", + "update_attempter_unittest.cc", + "update_boot_flags_action_unittest.cc", + "update_manager/boxed_value_unittest.cc", + "update_manager/chromeos_policy_unittest.cc", + "update_manager/evaluation_context_unittest.cc", + "update_manager/generic_variables_unittest.cc", + "update_manager/prng_unittest.cc", + "update_manager/real_device_policy_provider_unittest.cc", + "update_manager/real_random_provider_unittest.cc", + "update_manager/real_shill_provider_unittest.cc", + "update_manager/real_system_provider_unittest.cc", + "update_manager/real_time_provider_unittest.cc", + "update_manager/real_updater_provider_unittest.cc", + "update_manager/staging_utils_unittest.cc", + "update_manager/update_manager_unittest.cc", + "update_manager/update_time_restrictions_policy_impl_unittest.cc", + "update_manager/variable_unittest.cc", + "update_manager/weekly_time_unittest.cc", + ] + configs += [ ":target_defaults" ] + pkg_deps = [ + "libbrillo-test-${libbase_ver}", + "libchrome-test-${libbase_ver}", + "libdebugd-client-test", + "libpower_manager-client-test", + "libsession_manager-client-test", + "libshill-client-test", + ] + deps = [ + ":libpayload_generator", + ":libupdate_engine", + ":update_engine_test_libs", + ] + } +} + +# Fuzzer target. +if (use.fuzzer) { + executable("update_engine_omaha_request_action_fuzzer") { + sources = [ + "omaha_request_action_fuzzer.cc", + ] + configs += [ + "//common-mk/common_fuzzer", + ":target_defaults", + ] + pkg_deps = [ + "libbrillo-test-${libbase_ver}", + "libchrome-test-${libbase_ver}", + ] + deps = [ + ":libupdate_engine", + ":update_engine_test_libs", + ] + } +} diff --git a/tar_bunzip2.gni b/tar_bunzip2.gni new file mode 100644 index 00000000..0a178992 --- /dev/null +++ b/tar_bunzip2.gni @@ -0,0 +1,33 @@ +# +# Copyright (C) 2019 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +template("tar_bunzip2") { + forward_variables_from(invoker, [ "image_out_dir" ]) + out_dir = "${root_gen_dir}/${image_out_dir}" + + action_foreach(target_name) { + sources = invoker.sources + script = "//common-mk/file_generator_wrapper.py" + outputs = [ + "${out_dir}/{{source_name_part}}.flag", + ] + args = [ + "sh", + "-c", + "tar -xvf \"{{source}}\" -C \"${out_dir}\" && touch ${out_dir}/{{source_name_part}}.flag", + ] + } +} diff --git a/tar_bunzip2.gypi b/tar_bunzip2.gypi deleted file mode 100644 index 4d1be284..00000000 --- a/tar_bunzip2.gypi +++ /dev/null @@ -1,39 +0,0 @@ -# -# Copyright (C) 2015 The Android Open Source Project -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -{ - 'variables': { - 'out_dir': '<(SHARED_INTERMEDIATE_DIR)/<(image_out_dir)', - }, - 'rules': [ - { - 'rule_name': 'tar-bunzip2', - 'extension': 'bz2', - 'outputs': [ - # The .flag file is used to mark the timestamp of the file extraction - # and re-run this action if a new .bz2 file is generated. - '<(out_dir)/<(RULE_INPUT_ROOT).flag', - ], - 'action': [ - 'sh', - '-c', - 'tar -xvf "<(RULE_INPUT_PATH)" -C "<(out_dir)" && touch <(out_dir)/<(RULE_INPUT_ROOT).flag', - ], - 'msvs_cygwin_shell': 0, - 'process_outputs_as_sources': 1, - 'message': 'Unpacking file <(RULE_INPUT_PATH)', - }, - ], -} diff --git a/update_engine.gyp b/update_engine.gyp deleted file mode 100644 index b7ccae8c..00000000 --- a/update_engine.gyp +++ /dev/null @@ -1,656 +0,0 @@ -# -# Copyright (C) 2015 The Android Open Source Project -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# TODO: Rename these files to pass this check. -# gyplint: disable=GypLintSourceFileNames -{ - 'variables': { - 'USE_chrome_network_proxy': '1', - 'USE_chrome_kiosk_app': '1', - }, - 'target_defaults': { - 'variables': { - 'deps': [ - 'libbrillo-<(libbase_ver)', - 'libchrome-<(libbase_ver)', - # system_api depends on protobuf (or protobuf-lite). It must appear - # before protobuf here or the linker flags won't be in the right - # order. - 'system_api', - 'protobuf-lite', - ], - # The -DUSE_* flags are passed from platform2.py. We use sane defaults - # here when these USE flags are not defined. You can set the default value - # for the USE flag in the ebuild. - 'USE_hwid_override%': '0', - }, - 'cflags': [ - '-g', - '-ffunction-sections', - '-Wall', - '-Wextra', - '-Werror', - '-Wno-unused-parameter', - ], - 'cflags_cc': [ - '-fno-strict-aliasing', - '-Wnon-virtual-dtor', - ], - 'ldflags': [ - '-Wl,--gc-sections', - ], - 'defines': [ - '__CHROMEOS__', - '_FILE_OFFSET_BITS=64', - '_POSIX_C_SOURCE=199309L', - 'USE_BINDER=<(USE_binder)', - 'USE_DBUS=<(USE_dbus)', - 'USE_FEC=0', - 'USE_HWID_OVERRIDE=<(USE_hwid_override)', - 'USE_CHROME_KIOSK_APP=<(USE_chrome_kiosk_app)', - 'USE_CHROME_NETWORK_PROXY=<(USE_chrome_network_proxy)', - 'USE_MTD=<(USE_mtd)', - 'USE_OMAHA=1', - 'USE_SHILL=1', - ], - 'include_dirs': [ - # We need this include dir because we include all the local code as - # "update_engine/...". - '<(platform2_root)/../aosp/system', - '<(platform2_root)/../aosp/system/update_engine/client_library/include', - ], - }, - 'targets': [ - # Protobufs. - { - 'target_name': 'update_metadata-protos', - 'type': 'static_library', - 'variables': { - 'proto_in_dir': '.', - 'proto_out_dir': 'include/update_engine', - }, - 'sources': [ - 'update_metadata.proto', - ], - 'includes': ['../../../platform2/common-mk/protoc.gypi'], - }, - # Chrome D-Bus bindings. - { - 'target_name': 'update_engine-dbus-adaptor', - 'type': 'none', - 'variables': { - 'dbus_adaptors_out_dir': 'include/dbus_bindings', - 'dbus_xml_extension': 'dbus-xml', - }, - 'sources': [ - 'dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml', - ], - 'includes': ['../../../platform2/common-mk/generate-dbus-adaptors.gypi'], - }, - { - 'target_name': 'update_engine-dbus-kiosk-app-client', - 'type': 'none', - 'actions': [{ - 'action_name': 'update_engine-dbus-kiosk-app-client-action', - 'variables': { - 'mock_output_file': 'include/kiosk-app/dbus-proxy-mocks.h', - 'proxy_output_file': 'include/kiosk-app/dbus-proxies.h', - }, - 'sources': [ - 'dbus_bindings/org.chromium.KioskAppService.dbus-xml', - ], - 'includes': ['../../../platform2/common-mk/generate-dbus-proxies.gypi'], - }], - }, - # The payload application component and common dependencies. - { - 'target_name': 'libpayload_consumer', - 'type': 'static_library', - 'dependencies': [ - 'update_metadata-protos', - ], - # TODO(deymo): Remove unused dependencies once we stop including files - # from the root directory. - 'variables': { - 'exported_deps': [ - 'libcrypto', - 'xz-embedded', - 'libbspatch', - 'libpuffpatch', - ], - 'deps': ['<@(exported_deps)'], - }, - 'all_dependent_settings': { - 'variables': { - 'deps': [ - '<@(exported_deps)', - ], - }, - }, - 'link_settings': { - 'variables': { - 'deps': [ - '<@(exported_deps)', - ], - }, - 'libraries': [ - '-lbz2', - '-lrt', - ], - }, - 'sources': [ - 'common/action_processor.cc', - 'common/boot_control_stub.cc', - 'common/clock.cc', - 'common/constants.cc', - 'common/cpu_limiter.cc', - 'common/error_code_utils.cc', - 'common/hash_calculator.cc', - 'common/http_common.cc', - 'common/http_fetcher.cc', - 'common/hwid_override.cc', - 'common/multi_range_http_fetcher.cc', - 'common/platform_constants_chromeos.cc', - 'common/prefs.cc', - 'common/proxy_resolver.cc', - 'common/subprocess.cc', - 'common/terminator.cc', - 'common/utils.cc', - 'payload_consumer/bzip_extent_writer.cc', - 'payload_consumer/cached_file_descriptor.cc', - 'payload_consumer/delta_performer.cc', - 'payload_consumer/download_action.cc', - 'payload_consumer/extent_reader.cc', - 'payload_consumer/extent_writer.cc', - 'payload_consumer/file_descriptor.cc', - 'payload_consumer/file_descriptor_utils.cc', - 'payload_consumer/file_writer.cc', - 'payload_consumer/filesystem_verifier_action.cc', - 'payload_consumer/install_plan.cc', - 'payload_consumer/mount_history.cc', - 'payload_consumer/payload_constants.cc', - 'payload_consumer/payload_metadata.cc', - 'payload_consumer/payload_verifier.cc', - 'payload_consumer/postinstall_runner_action.cc', - 'payload_consumer/verity_writer_stub.cc', - 'payload_consumer/xz_extent_writer.cc', - ], - 'conditions': [ - ['USE_mtd == 1', { - 'sources': [ - 'payload_consumer/mtd_file_descriptor.cc', - ], - 'link_settings': { - 'libraries': [ - '-lmtdutils', - ], - }, - }], - ], - }, - # The main daemon static_library with all the code used to check for updates - # with Omaha and expose a DBus daemon. - { - 'target_name': 'libupdate_engine', - 'type': 'static_library', - 'dependencies': [ - 'libpayload_consumer', - 'update_metadata-protos', - 'update_engine-dbus-adaptor', - ], - 'variables': { - 'exported_deps': [ - 'dbus-1', - 'expat', - 'libcurl', - 'libdebugd-client', - 'libmetrics-<(libbase_ver)', - 'libpower_manager-client', - 'libsession_manager-client', - 'libshill-client', - 'libssl', - 'libupdate_engine-client', - 'vboot_host', - ], - 'conditions':[ - ['USE_dlc == 1', { - 'exported_deps' : [ - 'libdlcservice-client', - ], - }], - ], - 'deps': ['<@(exported_deps)'], - }, - 'all_dependent_settings': { - 'variables': { - 'deps': [ - '<@(exported_deps)', - ], - }, - }, - 'link_settings': { - 'variables': { - 'deps': [ - '<@(exported_deps)', - ], - }, - 'libraries': [ - '-lbz2', - '-lpolicy-<(libbase_ver)', - '-lrootdev', - '-lrt', - ], - }, - 'sources': [ - 'boot_control_chromeos.cc', - 'certificate_checker.cc', - 'common_service.cc', - 'connection_manager.cc', - 'connection_utils.cc', - 'daemon.cc', - 'dbus_connection.cc', - 'dbus_service.cc', - 'hardware_chromeos.cc', - 'image_properties_chromeos.cc', - 'libcurl_http_fetcher.cc', - 'metrics_reporter_omaha.cc', - 'metrics_utils.cc', - 'omaha_request_action.cc', - 'omaha_request_builder_xml.cc', - 'omaha_request_params.cc', - 'omaha_response_handler_action.cc', - 'omaha_utils.cc', - 'p2p_manager.cc', - 'payload_state.cc', - 'power_manager_chromeos.cc', - 'real_system_state.cc', - 'shill_proxy.cc', - 'update_attempter.cc', - 'update_boot_flags_action.cc', - 'update_manager/boxed_value.cc', - 'update_manager/chromeos_policy.cc', - 'update_manager/default_policy.cc', - 'update_manager/enough_slots_ab_updates_policy_impl.cc', - 'update_manager/enterprise_device_policy_impl.cc', - 'update_manager/evaluation_context.cc', - 'update_manager/interactive_update_policy_impl.cc', - 'update_manager/next_update_check_policy_impl.cc', - 'update_manager/official_build_check_policy_impl.cc', - 'update_manager/out_of_box_experience_policy_impl.cc', - 'update_manager/policy.cc', - 'update_manager/policy_test_utils.cc', - 'update_manager/real_config_provider.cc', - 'update_manager/real_device_policy_provider.cc', - 'update_manager/real_random_provider.cc', - 'update_manager/real_shill_provider.cc', - 'update_manager/real_system_provider.cc', - 'update_manager/real_time_provider.cc', - 'update_manager/real_updater_provider.cc', - 'update_manager/staging_utils.cc', - 'update_manager/state_factory.cc', - 'update_manager/update_manager.cc', - 'update_manager/update_time_restrictions_policy_impl.cc', - 'update_manager/weekly_time.cc', - 'update_status_utils.cc', - ], - 'conditions': [ - ['USE_chrome_network_proxy == 1', { - 'sources': [ - 'chrome_browser_proxy_resolver.cc', - ], - }], - ['USE_chrome_kiosk_app == 1', { - 'dependencies': [ - 'update_engine-dbus-kiosk-app-client', - ], - }], - ['USE_dlc == 1', { - 'sources': [ - 'dlcservice_chromeos.cc', - ], - }], - ['USE_dlc == 0', { - 'sources': [ - 'common/dlcservice_stub.cc', - ], - }], - ], - }, - # update_engine daemon. - { - 'target_name': 'update_engine', - 'type': 'executable', - 'dependencies': [ - 'libupdate_engine', - ], - 'sources': [ - 'main.cc', - ], - }, - # update_engine client library. - { - 'target_name': 'libupdate_engine_client', - 'type': 'static_library', - 'variables': { - 'deps': [ - 'dbus-1', - 'libupdate_engine-client', - ], - }, - 'sources': [ - 'client_library/client.cc', - 'client_library/client_dbus.cc', - 'update_status_utils.cc', - ], - 'include_dirs': [ - 'client_library/include', - ], - }, - # update_engine console client. - { - 'target_name': 'update_engine_client', - 'type': 'executable', - 'dependencies': [ - 'libupdate_engine_client', - ], - 'sources': [ - 'common/error_code_utils.cc', - 'omaha_utils.cc', - 'update_engine_client.cc', - ], - }, - # server-side code. This is used for delta_generator and unittests but not - # for any client code. - { - 'target_name': 'libpayload_generator', - 'type': 'static_library', - 'dependencies': [ - 'libpayload_consumer', - 'update_metadata-protos', - ], - 'variables': { - 'exported_deps': [ - 'ext2fs', - 'libbsdiff', - 'libpuffdiff', - 'liblzma', - ], - 'deps': ['<@(exported_deps)'], - }, - 'all_dependent_settings': { - 'variables': { - 'deps': [ - '<@(exported_deps)', - ], - }, - }, - 'link_settings': { - 'variables': { - 'deps': [ - '<@(exported_deps)', - ], - }, - }, - 'sources': [ - 'common/file_fetcher.cc', - 'payload_generator/ab_generator.cc', - 'payload_generator/annotated_operation.cc', - 'payload_generator/blob_file_writer.cc', - 'payload_generator/block_mapping.cc', - 'payload_generator/boot_img_filesystem.cc', - 'payload_generator/bzip.cc', - 'payload_generator/cycle_breaker.cc', - 'payload_generator/deflate_utils.cc', - 'payload_generator/delta_diff_generator.cc', - 'payload_generator/delta_diff_utils.cc', - 'payload_generator/ext2_filesystem.cc', - 'payload_generator/extent_ranges.cc', - 'payload_generator/extent_utils.cc', - 'payload_generator/full_update_generator.cc', - 'payload_generator/graph_types.cc', - 'payload_generator/graph_utils.cc', - 'payload_generator/inplace_generator.cc', - 'payload_generator/mapfile_filesystem.cc', - 'payload_generator/payload_file.cc', - 'payload_generator/payload_generation_config.cc', - 'payload_generator/payload_generation_config_chromeos.cc', - 'payload_generator/payload_signer.cc', - 'payload_generator/raw_filesystem.cc', - 'payload_generator/squashfs_filesystem.cc', - 'payload_generator/tarjan.cc', - 'payload_generator/topological_sort.cc', - 'payload_generator/xz_chromeos.cc', - ], - }, - # server-side delta generator. - { - 'target_name': 'delta_generator', - 'type': 'executable', - 'dependencies': [ - 'libpayload_consumer', - 'libpayload_generator', - ], - 'link_settings': { - 'ldflags!': [ - '-pie', - ], - }, - 'sources': [ - 'payload_generator/generate_delta_main.cc', - ], - }, - { - 'target_name': 'update_engine_test_libs', - 'type': 'static_library', - 'variables': { - 'deps': [ - 'libshill-client-test', - ], - }, - 'dependencies': [ - 'libupdate_engine', - ], - 'includes': [ - '../../../platform2/common-mk/common_test.gypi', - ], - 'sources': [ - 'common/fake_prefs.cc', - 'common/mock_http_fetcher.cc', - 'common/test_utils.cc', - 'fake_shill_proxy.cc', - 'fake_system_state.cc', - 'payload_consumer/fake_file_descriptor.cc', - 'payload_generator/fake_filesystem.cc', - 'update_manager/umtest_utils.cc', - ], - }, - ], - 'conditions': [ - ['USE_test == 1', { - 'targets': [ - # Public keys used for unit testing. - { - 'target_name': 'update_engine-testkeys', - 'type': 'none', - 'variables': { - 'openssl_pem_in_dir': '.', - 'openssl_pem_out_dir': 'include/update_engine', - }, - 'sources': [ - 'unittest_key.pem', - 'unittest_key2.pem', - ], - 'includes': ['../../../platform2/common-mk/openssl_pem.gypi'], - }, - # Unpacks sample images used for testing. - { - 'target_name': 'update_engine-test_images', - 'type': 'none', - 'variables': { - 'image_out_dir': '.', - }, - 'sources': [ - 'sample_images/sample_images.tar.bz2', - ], - 'includes': ['tar_bunzip2.gypi'], - }, - # Test HTTP Server. - { - 'target_name': 'test_http_server', - 'type': 'executable', - 'sources': [ - 'common/http_common.cc', - 'test_http_server.cc', - ], - }, - # Test subprocess helper. - { - 'target_name': 'test_subprocess', - 'type': 'executable', - 'sources': [ - 'test_subprocess.cc', - ], - }, - # Main unittest file. - { - 'target_name': 'update_engine_unittests', - 'type': 'executable', - 'variables': { - 'deps': [ - 'libbrillo-test-<(libbase_ver)', - 'libchrome-test-<(libbase_ver)', - 'libdebugd-client-test', - 'libpower_manager-client-test', - 'libsession_manager-client-test', - 'libshill-client-test', - ], - }, - 'dependencies': [ - 'libupdate_engine', - 'libpayload_generator', - 'update_engine_test_libs', - ], - 'sources': [ - 'boot_control_chromeos_unittest.cc', - 'certificate_checker_unittest.cc', - 'common/action_pipe_unittest.cc', - 'common/action_processor_unittest.cc', - 'common/action_unittest.cc', - 'common/cpu_limiter_unittest.cc', - 'common/hash_calculator_unittest.cc', - 'common/http_fetcher_unittest.cc', - 'common/hwid_override_unittest.cc', - 'common/prefs_unittest.cc', - 'common/proxy_resolver_unittest.cc', - 'common/subprocess_unittest.cc', - 'common/terminator_unittest.cc', - 'common/utils_unittest.cc', - 'common_service_unittest.cc', - 'connection_manager_unittest.cc', - 'hardware_chromeos_unittest.cc', - 'image_properties_chromeos_unittest.cc', - 'metrics_reporter_omaha_unittest.cc', - 'metrics_utils_unittest.cc', - 'omaha_request_action_unittest.cc', - 'omaha_request_builder_xml_unittest.cc', - 'omaha_request_params_unittest.cc', - 'omaha_response_handler_action_unittest.cc', - 'omaha_utils_unittest.cc', - 'p2p_manager_unittest.cc', - 'payload_consumer/bzip_extent_writer_unittest.cc', - 'payload_consumer/cached_file_descriptor_unittest.cc', - 'payload_consumer/delta_performer_integration_test.cc', - 'payload_consumer/delta_performer_unittest.cc', - 'payload_consumer/download_action_unittest.cc', - 'payload_consumer/extent_reader_unittest.cc', - 'payload_consumer/extent_writer_unittest.cc', - 'payload_consumer/file_descriptor_utils_unittest.cc', - 'payload_consumer/file_writer_unittest.cc', - 'payload_consumer/filesystem_verifier_action_unittest.cc', - 'payload_consumer/postinstall_runner_action_unittest.cc', - 'payload_consumer/xz_extent_writer_unittest.cc', - 'payload_generator/ab_generator_unittest.cc', - 'payload_generator/blob_file_writer_unittest.cc', - 'payload_generator/block_mapping_unittest.cc', - 'payload_generator/boot_img_filesystem_unittest.cc', - 'payload_generator/cycle_breaker_unittest.cc', - 'payload_generator/deflate_utils_unittest.cc', - 'payload_generator/delta_diff_utils_unittest.cc', - 'payload_generator/ext2_filesystem_unittest.cc', - 'payload_generator/extent_ranges_unittest.cc', - 'payload_generator/extent_utils_unittest.cc', - 'payload_generator/full_update_generator_unittest.cc', - 'payload_generator/graph_utils_unittest.cc', - 'payload_generator/inplace_generator_unittest.cc', - 'payload_generator/mapfile_filesystem_unittest.cc', - 'payload_generator/payload_file_unittest.cc', - 'payload_generator/payload_generation_config_unittest.cc', - 'payload_generator/payload_signer_unittest.cc', - 'payload_generator/squashfs_filesystem_unittest.cc', - 'payload_generator/tarjan_unittest.cc', - 'payload_generator/topological_sort_unittest.cc', - 'payload_generator/zip_unittest.cc', - 'payload_state_unittest.cc', - 'testrunner.cc', - 'update_attempter_unittest.cc', - 'update_boot_flags_action_unittest.cc', - 'update_manager/boxed_value_unittest.cc', - 'update_manager/chromeos_policy_unittest.cc', - 'update_manager/evaluation_context_unittest.cc', - 'update_manager/generic_variables_unittest.cc', - 'update_manager/prng_unittest.cc', - 'update_manager/real_device_policy_provider_unittest.cc', - 'update_manager/real_random_provider_unittest.cc', - 'update_manager/real_shill_provider_unittest.cc', - 'update_manager/real_system_provider_unittest.cc', - 'update_manager/real_time_provider_unittest.cc', - 'update_manager/real_updater_provider_unittest.cc', - 'update_manager/staging_utils_unittest.cc', - 'update_manager/update_manager_unittest.cc', - 'update_manager/update_time_restrictions_policy_impl_unittest.cc', - 'update_manager/variable_unittest.cc', - 'update_manager/weekly_time_unittest.cc', - ], - }, - ], - }], - # Fuzzer target. - ['USE_fuzzer == 1', { - 'targets': [ - { - 'target_name': 'update_engine_omaha_request_action_fuzzer', - 'type': 'executable', - 'variables': { - 'deps': [ - 'libbrillo-test-<(libbase_ver)', - 'libchrome-test-<(libbase_ver)', - ], - }, - 'includes': [ - '../../../platform2/common-mk/common_fuzzer.gypi', - ], - 'dependencies': [ - 'libupdate_engine', - 'update_engine_test_libs', - ], - 'sources': [ - 'omaha_request_action_fuzzer.cc', - ], - }, - ], - }], - ], -} From 1329fd880962c9441d4ca462e8cda3fbc29049c9 Mon Sep 17 00:00:00 2001 From: Xiaochu Liu Date: Thu, 16 May 2019 17:27:34 -0700 Subject: [PATCH 026/624] update_engine: trigger a crash on unrecoverable condition CURLM_INTERNAL_ERROR and CURLM_OUT_OF_MEMORY are two libcurl error codes that caller (update_engine) has no way to recover on its own. Reference: https://curl.haxx.se/libcurl/c/libcurl-errors.html Since those error conditions aren't recoverable and might be responsible for the failures to update observed in crbug.com/927039, we exit and let the system respawn update_engine to start from a fresh state and recover. BUG=chromium:962730,chromium:927039 TEST=unittest Change-Id: I55946e58e518da5bc5cb0c23690430c6298b8582 Reviewed-on: https://chromium-review.googlesource.com/1616425 Commit-Ready: ChromeOS CL Exonerator Bot Tested-by: Xiaochu Liu Legacy-Commit-Queue: Commit Bot Reviewed-by: Amin Hassani --- libcurl_http_fetcher.cc | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/libcurl_http_fetcher.cc b/libcurl_http_fetcher.cc index 1aa7e8ba..f4694357 100644 --- a/libcurl_http_fetcher.cc +++ b/libcurl_http_fetcher.cc @@ -416,7 +416,11 @@ void LibcurlHttpFetcher::CurlPerformOnce() { metrics::CheckResult::kUnset, metrics::CheckReaction::kUnset, metrics::DownloadErrorCode::kInternalError); - LOG(ERROR) << "curl_multi_perform is in an unrecoverable error condition: " + // According to https://curl.haxx.se/libcurl/c/libcurl-errors.html: + // CURLM_INTERNAL_ERROR and CURLM_OUT_OF_MEMORY are two libcurl error codes + // that caller has no way to recover on its own. Thus, we exit and let the + // system respawn update_engine to start from a fresh state and recover. + LOG(FATAL) << "curl_multi_perform is in an unrecoverable error condition: " << retcode; } else if (retcode != CURLM_OK) { LOG(ERROR) << "curl_multi_perform returns error: " << retcode; From 3d9f2edc37a4f931dffae7cf1d00d1920fcb9812 Mon Sep 17 00:00:00 2001 From: Tao Bao Date: Tue, 28 May 2019 17:30:21 -0700 Subject: [PATCH 027/624] Remove the support for PRODUCT_STATIC_BOOT_CONTROL_HAL. Bug: 34254109 Test: TreeHugger Change-Id: I53ce8bf9bd641f8eb63e96adc055eb29774be2e2 --- Android.bp | 1 - 1 file changed, 1 deletion(-) diff --git a/Android.bp b/Android.bp index 9e8296b3..e861797e 100644 --- a/Android.bp +++ b/Android.bp @@ -370,7 +370,6 @@ cc_binary { }, required: [ - "android.hardware.boot@1.0-impl-wrapper.recovery", "update_engine_payload_key.recovery", ], } From d84c42808be09c5e416a2693d772f92fe2032a1f Mon Sep 17 00:00:00 2001 From: Aya ElAttar Date: Mon, 20 May 2019 15:23:00 +0200 Subject: [PATCH 028/624] Removed RollbackOnlyIfRestorePossible option from DeviceRollbackToTargetVersion policy As this option was not implemented and won't be launched, so it should be removed from documentation and codebase before feature launch. BUG=chromium:947621 TEST=./build_packages --board=amd64-generic && \ cros_run_unit_tests --board=amd64-generic --packages update_engine Change-Id: I11ffeb7b65f55e9b026bd4673d16900c34627b34 Reviewed-on: https://chromium-review.googlesource.com/1619795 Commit-Ready: ChromeOS CL Exonerator Bot Tested-by: Aya Elsayed Legacy-Commit-Queue: Commit Bot Reviewed-by: Sergey Poromov Reviewed-by: Amin Hassani --- update_manager/boxed_value.cc | 2 -- update_manager/boxed_value_unittest.cc | 5 ----- update_manager/chromeos_policy_unittest.cc | 6 ------ update_manager/enterprise_device_policy_impl.cc | 9 --------- update_manager/rollback_prefs.h | 3 +-- 5 files changed, 1 insertion(+), 24 deletions(-) diff --git a/update_manager/boxed_value.cc b/update_manager/boxed_value.cc index cee1ece8..ef321e6f 100644 --- a/update_manager/boxed_value.cc +++ b/update_manager/boxed_value.cc @@ -167,8 +167,6 @@ string BoxedValue::ValuePrinter(const void* value) { return "Rollback and powerwash"; case RollbackToTargetVersion::kRollbackAndRestoreIfPossible: return "Rollback and restore if possible"; - case RollbackToTargetVersion::kRollbackOnlyIfRestorePossible: - return "Rollback only if restore is possible"; case RollbackToTargetVersion::kMaxValue: NOTREACHED(); return "Max value"; diff --git a/update_manager/boxed_value_unittest.cc b/update_manager/boxed_value_unittest.cc index 2fa94ec0..f98b6b65 100644 --- a/update_manager/boxed_value_unittest.cc +++ b/update_manager/boxed_value_unittest.cc @@ -215,11 +215,6 @@ TEST(UmBoxedValueTest, RollbackToTargetVersionToString) { BoxedValue(new RollbackToTargetVersion( RollbackToTargetVersion::kRollbackAndRestoreIfPossible)) .ToString()); - EXPECT_EQ( - "Rollback only if restore is possible", - BoxedValue(new RollbackToTargetVersion( - RollbackToTargetVersion::kRollbackOnlyIfRestorePossible)) - .ToString()); } TEST(UmBoxedValueTest, SetConnectionTypeToString) { diff --git a/update_manager/chromeos_policy_unittest.cc b/update_manager/chromeos_policy_unittest.cc index 5341ebb3..fb8c789f 100644 --- a/update_manager/chromeos_policy_unittest.cc +++ b/update_manager/chromeos_policy_unittest.cc @@ -284,12 +284,6 @@ TEST_F(UmChromeOSPolicyTest, UpdateCheckAllowedRollbackAndRestoreIfPossible) { true, RollbackToTargetVersion::kRollbackAndRestoreIfPossible)); } -TEST_F(UmChromeOSPolicyTest, UpdateCheckAllowedRollbackOnlyIfRestorePossible) { - // We're not allowed to do rollback until we support data save and restore. - EXPECT_FALSE(TestRollbackAllowed( - true, RollbackToTargetVersion::kRollbackOnlyIfRestorePossible)); -} - TEST_F(UmChromeOSPolicyTest, UpdateCheckAllowedRollbackDisabled) { EXPECT_FALSE(TestRollbackAllowed(true, RollbackToTargetVersion::kDisabled)); } diff --git a/update_manager/enterprise_device_policy_impl.cc b/update_manager/enterprise_device_policy_impl.cc index 3d77d592..dea38bad 100644 --- a/update_manager/enterprise_device_policy_impl.cc +++ b/update_manager/enterprise_device_policy_impl.cc @@ -104,15 +104,6 @@ EvalStatus EnterpriseDevicePolicyImpl::UpdateCheckAllowed( result->rollback_allowed = true; result->rollback_data_save_requested = true; break; - case RollbackToTargetVersion::kRollbackOnlyIfRestorePossible: - // TODO(crbug.com/947621): Remove this policy option until we know - // how it could be supported correctly. - LOG(INFO) << "Policy only allows rollbacks if restore is possible."; - // We don't support restore yet, policy doesn't allow rollback in this - // case. - result->rollback_allowed = false; - result->rollback_data_save_requested = false; - break; case RollbackToTargetVersion::kMaxValue: NOTREACHED(); // Don't add a default case to let the compiler warn about newly diff --git a/update_manager/rollback_prefs.h b/update_manager/rollback_prefs.h index 11d09d67..95677011 100644 --- a/update_manager/rollback_prefs.h +++ b/update_manager/rollback_prefs.h @@ -31,9 +31,8 @@ enum class RollbackToTargetVersion { kDisabled = 1, kRollbackAndPowerwash = 2, kRollbackAndRestoreIfPossible = 3, - kRollbackOnlyIfRestorePossible = 4, // This value must be the last entry. - kMaxValue = 5 + kMaxValue = 4 }; } // namespace chromeos_update_manager From aa8e1a4085771c42d87400af900b01fba6c6e1fb Mon Sep 17 00:00:00 2001 From: Matt Ziegelbaum Date: Thu, 9 May 2019 21:41:58 -0400 Subject: [PATCH 029/624] update_engine: add chrome os device requisition to the omaha request Requisition type is not currently sent to Omaha and is thus unavailable for AU rules. This adds a requisition attribute to the if the device has a requisition type. Currently, the types may be one of remora, shark, or rialto. TEST=unittest BUG=b:132014633,b:133324571 Change-Id: I0e53d3a5749da4cbb95ce73cff35191066339009 Reviewed-on: https://chromium-review.googlesource.com/1604218 Commit-Ready: Matthew Ziegelbaum Tested-by: Matthew Ziegelbaum Legacy-Commit-Queue: Commit Bot Reviewed-by: Amin Hassani --- common/fake_hardware.h | 9 +++++++++ common/hardware_interface.h | 4 ++++ hardware_android.cc | 5 +++++ hardware_android.h | 1 + hardware_chromeos.cc | 34 +++++++++++++++++++++++++------- hardware_chromeos.h | 1 + omaha_request_action_unittest.cc | 14 +++++++++++++ omaha_request_builder_xml.cc | 8 ++++++++ omaha_request_params.cc | 1 + omaha_request_params.h | 7 +++++++ omaha_request_params_unittest.cc | 4 ++++ 11 files changed, 81 insertions(+), 7 deletions(-) diff --git a/common/fake_hardware.h b/common/fake_hardware.h index 53b2dd5d..6604534c 100644 --- a/common/fake_hardware.h +++ b/common/fake_hardware.h @@ -77,6 +77,10 @@ class FakeHardware : public HardwareInterface { std::string GetECVersion() const override { return ec_version_; } + std::string GetDeviceRequisition() const override { + return device_requisition_; + } + int GetMinKernelKeyVersion() const override { return min_kernel_key_version_; } @@ -173,6 +177,10 @@ class FakeHardware : public HardwareInterface { void SetECVersion(const std::string& ec_version) { ec_version_ = ec_version; } + void SetDeviceRequisition(const std::string& requisition) { + device_requisition_ = requisition; + } + void SetMinKernelKeyVersion(int min_kernel_key_version) { min_kernel_key_version_ = min_kernel_key_version; } @@ -207,6 +215,7 @@ class FakeHardware : public HardwareInterface { std::string hardware_class_{"Fake HWID BLAH-1234"}; std::string firmware_version_{"Fake Firmware v1.0.1"}; std::string ec_version_{"Fake EC v1.0a"}; + std::string device_requisition_{"fake_requisition"}; int min_kernel_key_version_{kMinKernelKeyVersion}; int min_firmware_key_version_{kMinFirmwareKeyVersion}; int kernel_max_rollforward_{kKernelMaxRollforward}; diff --git a/common/hardware_interface.h b/common/hardware_interface.h index 6c53540c..da9f10ef 100644 --- a/common/hardware_interface.h +++ b/common/hardware_interface.h @@ -70,6 +70,10 @@ class HardwareInterface { // running a custom chrome os ec. virtual std::string GetECVersion() const = 0; + // Returns the OEM device requisition or an empty string if the system does + // not have a requisition, or if not running Chrome OS. + virtual std::string GetDeviceRequisition() const = 0; + // Returns the minimum kernel key version that verified boot on Chrome OS // will allow to boot. This is the value of crossystem tpm_kernver. Returns // -1 on error, or if not running on Chrome OS. diff --git a/hardware_android.cc b/hardware_android.cc index 80c7757a..82f1b9a6 100644 --- a/hardware_android.cc +++ b/hardware_android.cc @@ -121,6 +121,11 @@ string HardwareAndroid::GetECVersion() const { return GetProperty(kPropBootBaseband, ""); } +string HardwareAndroid::GetDeviceRequisition() const { + LOG(WARNING) << "STUB: Getting requisition is not supported."; + return ""; +} + int HardwareAndroid::GetMinKernelKeyVersion() const { LOG(WARNING) << "STUB: No Kernel key version is available."; return -1; diff --git a/hardware_android.h b/hardware_android.h index c59a152b..6edf4683 100644 --- a/hardware_android.h +++ b/hardware_android.h @@ -42,6 +42,7 @@ class HardwareAndroid final : public HardwareInterface { std::string GetHardwareClass() const override; std::string GetFirmwareVersion() const override; std::string GetECVersion() const override; + std::string GetDeviceRequisition() const override; int GetMinKernelKeyVersion() const override; int GetMinFirmwareKeyVersion() const override; int GetMaxFirmwareKeyRollforward() const override; diff --git a/hardware_chromeos.cc b/hardware_chromeos.cc index 60583e1a..dd21c1bc 100644 --- a/hardware_chromeos.cc +++ b/hardware_chromeos.cc @@ -81,6 +81,27 @@ const char* kConfigOptsIsOOBEEnabled = "is_oobe_enabled"; const char* kActivePingKey = "first_active_omaha_ping_sent"; +const char* kOemRequisitionKey = "oem_device_requisition"; + +// Gets a string value from the vpd for a given key using the `vpd_get_value` +// shell command. Returns true on success. +int GetVpdValue(string key, string* result) { + int exit_code = 0; + string value; + vector cmd = {"vpd_get_value", key}; + if (!chromeos_update_engine::Subprocess::SynchronousExec( + cmd, &exit_code, &value) || + exit_code) { + LOG(ERROR) << "Failed to get vpd key for " << value + << " with exit code: " << exit_code; + return false; + } + + base::TrimWhitespaceASCII(value, base::TRIM_ALL, &value); + *result = value; + return true; +} + } // namespace namespace chromeos_update_engine { @@ -190,6 +211,11 @@ string HardwareChromeOS::GetECVersion() const { return utils::ParseECVersion(input_line); } +string HardwareChromeOS::GetDeviceRequisition() const { + string requisition; + return GetVpdValue(kOemRequisitionKey, &requisition) ? requisition : ""; +} + int HardwareChromeOS::GetMinKernelKeyVersion() const { return VbGetSystemPropertyInt("tpm_kernver"); } @@ -311,17 +337,11 @@ void HardwareChromeOS::LoadConfig(const string& root_prefix, bool normal_mode) { } bool HardwareChromeOS::GetFirstActiveOmahaPingSent() const { - int exit_code = 0; string active_ping_str; - vector cmd = {"vpd_get_value", kActivePingKey}; - if (!Subprocess::SynchronousExec(cmd, &exit_code, &active_ping_str) || - exit_code) { - LOG(ERROR) << "Failed to get vpd key for " << kActivePingKey - << " with exit code: " << exit_code; + if (!GetVpdValue(kActivePingKey, &active_ping_str)) { return false; } - base::TrimWhitespaceASCII(active_ping_str, base::TRIM_ALL, &active_ping_str); int active_ping; if (active_ping_str.empty() || !base::StringToInt(active_ping_str, &active_ping)) { diff --git a/hardware_chromeos.h b/hardware_chromeos.h index 04bdae3e..230e864d 100644 --- a/hardware_chromeos.h +++ b/hardware_chromeos.h @@ -47,6 +47,7 @@ class HardwareChromeOS final : public HardwareInterface { std::string GetHardwareClass() const override; std::string GetFirmwareVersion() const override; std::string GetECVersion() const override; + std::string GetDeviceRequisition() const override; int GetMinKernelKeyVersion() const override; int GetMinFirmwareKeyVersion() const override; int GetMaxFirmwareKeyRollforward() const override; diff --git a/omaha_request_action_unittest.cc b/omaha_request_action_unittest.cc index bfbf6a4c..91de9d4e 100644 --- a/omaha_request_action_unittest.cc +++ b/omaha_request_action_unittest.cc @@ -2762,4 +2762,18 @@ TEST_F(OmahaRequestActionTest, MismatchNumberOfVersions) { response.past_rollback_key_version.kernel); } +TEST_F(OmahaRequestActionTest, IncludeRequisitionTest) { + request_params_.set_device_requisition("remora"); + tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); + ASSERT_TRUE(TestUpdateCheck()); + EXPECT_NE(string::npos, post_str.find("requisition=\"remora\"")); +} + +TEST_F(OmahaRequestActionTest, NoIncludeRequisitionTest) { + request_params_.set_device_requisition(""); + tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); + ASSERT_TRUE(TestUpdateCheck()); + EXPECT_EQ(string::npos, post_str.find("requisition")); +} + } // namespace chromeos_update_engine diff --git a/omaha_request_builder_xml.cc b/omaha_request_builder_xml.cc index aac01366..2cb002ed 100644 --- a/omaha_request_builder_xml.cc +++ b/omaha_request_builder_xml.cc @@ -313,6 +313,13 @@ string GetAppXml(const OmahaEvent* event, } } + string requisition_arg; + if (!params->device_requisition().empty()) { + requisition_arg = "requisition=\"" + + XmlEncodeWithDefault(params->device_requisition(), "") + + "\" "; + } + // clang-format off string app_xml = " fw_version(), "") + "\" " + "ec_version=\"" + XmlEncodeWithDefault(params->ec_version(), "") + "\" " + install_date_in_days_str + + requisition_arg + ">\n" + app_body + " \n"; diff --git a/omaha_request_params.cc b/omaha_request_params.cc index 8c410f17..70867a3f 100644 --- a/omaha_request_params.cc +++ b/omaha_request_params.cc @@ -95,6 +95,7 @@ bool OmahaRequestParams::Init(const string& in_app_version, fw_version_ = system_state_->hardware()->GetFirmwareVersion(); ec_version_ = system_state_->hardware()->GetECVersion(); } + device_requisition_ = system_state_->hardware()->GetDeviceRequisition(); if (image_props_.current_channel == mutable_image_props_.target_channel) { // deltas are only okay if the /.nodelta file does not exist. if we don't diff --git a/omaha_request_params.h b/omaha_request_params.h index f3f68f42..7b281da8 100644 --- a/omaha_request_params.h +++ b/omaha_request_params.h @@ -85,6 +85,7 @@ class OmahaRequestParams { inline std::string hwid() const { return hwid_; } inline std::string fw_version() const { return fw_version_; } inline std::string ec_version() const { return ec_version_; } + inline std::string device_requisition() const { return device_requisition_; } inline void set_app_version(const std::string& version) { image_props_.version = version; @@ -265,6 +266,9 @@ class OmahaRequestParams { void set_is_powerwash_allowed(bool powerwash_allowed) { mutable_image_props_.is_powerwash_allowed = powerwash_allowed; } + void set_device_requisition(const std::string& requisition) { + device_requisition_ = requisition; + } private: FRIEND_TEST(OmahaRequestParamsTest, ChannelIndexTest); @@ -334,6 +338,9 @@ class OmahaRequestParams { std::string hwid_; // Hardware Qualification ID of the client std::string fw_version_; // Chrome OS Firmware Version. std::string ec_version_; // Chrome OS EC Version. + // TODO(b:133324571) tracks removal of this field once it is no longer + // needed in AU requests. Remove by October 1st 2019. + std::string device_requisition_; // Chrome OS Requisition type. bool delta_okay_; // If this client can accept a delta bool interactive_; // Whether this is a user-initiated update check diff --git a/omaha_request_params_unittest.cc b/omaha_request_params_unittest.cc index 73324319..bfcbc328 100644 --- a/omaha_request_params_unittest.cc +++ b/omaha_request_params_unittest.cc @@ -258,4 +258,8 @@ TEST_F(OmahaRequestParamsTest, CollectECFWVersionsTest) { EXPECT_TRUE(params_.CollectECFWVersions()); } +TEST_F(OmahaRequestParamsTest, RequisitionIsSetTest) { + EXPECT_TRUE(params_.Init("", "", false)); + EXPECT_EQ("fake_requisition", params_.device_requisition()); +} } // namespace chromeos_update_engine From d4fe0fbd7b6f9bf276e67bd4635f9a537ff133ed Mon Sep 17 00:00:00 2001 From: Keigo Oka Date: Wed, 29 May 2019 15:51:26 +0900 Subject: [PATCH 030/624] update_engine: migrate client package to use GN TEST=ebuild-$BOARD ~/trunk/src/third_party/chromiumos-overlay/chromeos-base/update_engine-client/update_engine-client-9999.ebuild clean install # BOARD=amd64-generic TEST=ebuild-$BOARD ~/trunk/src/third_party/chromiumos-overlay/chromeos-base/update_engine/update_engine-9999.ebuild clean install TEST=emerge-$BOARD update_engine-client # after cros_workon --board=$BOARD start update_engine-client TEST=emerge-$BOARD dlcservice BUG=chromium:953642 Cq-Depend: chromium:1634490 Change-Id: I770539960cdfeb2319d6439c8fc53cd21b3fad2f Reviewed-on: https://chromium-review.googlesource.com/1634669 Commit-Ready: Keigo Oka Tested-by: Keigo Oka Legacy-Commit-Queue: Commit Bot Reviewed-by: Tatsuhisa Yamaguchi Reviewed-by: Keigo Oka --- client-headers/BUILD.gn | 35 ++++++++++++++++ .../libupdate_engine-client-test.pc.in | 0 .../libupdate_engine-client.pc.in | 0 update_engine-client.gyp | 41 ------------------- 4 files changed, 35 insertions(+), 41 deletions(-) create mode 100644 client-headers/BUILD.gn rename libupdate_engine-client-test.pc.in => client-headers/libupdate_engine-client-test.pc.in (100%) rename libupdate_engine-client.pc.in => client-headers/libupdate_engine-client.pc.in (100%) delete mode 100644 update_engine-client.gyp diff --git a/client-headers/BUILD.gn b/client-headers/BUILD.gn new file mode 100644 index 00000000..88f8bb9c --- /dev/null +++ b/client-headers/BUILD.gn @@ -0,0 +1,35 @@ +# +# Copyright (C) 2019 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import("//common-mk/generate-dbus-proxies.gni") + +group("all") { + deps = [ + ":libupdate_engine-client-headers", + ] +} + +# update_engine client library generated headers. Used by other daemons and +# by the update_engine_client console program to interact with update_engine. +generate_dbus_proxies("libupdate_engine-client-headers") { + sources = [ + "../dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml", + ] + dbus_service_config = "../dbus_bindings/dbus-service-config.json" + mock_output_file = "include/update_engine/dbus-proxy-mocks.h" + proxy_output_file = "include/update_engine/dbus-proxies.h" + proxy_path_in_mocks = "update_engine/dbus-proxies.h" +} diff --git a/libupdate_engine-client-test.pc.in b/client-headers/libupdate_engine-client-test.pc.in similarity index 100% rename from libupdate_engine-client-test.pc.in rename to client-headers/libupdate_engine-client-test.pc.in diff --git a/libupdate_engine-client.pc.in b/client-headers/libupdate_engine-client.pc.in similarity index 100% rename from libupdate_engine-client.pc.in rename to client-headers/libupdate_engine-client.pc.in diff --git a/update_engine-client.gyp b/update_engine-client.gyp deleted file mode 100644 index 588fc63b..00000000 --- a/update_engine-client.gyp +++ /dev/null @@ -1,41 +0,0 @@ -# -# Copyright (C) 2015 The Android Open Source Project -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -{ - 'targets': [ - # update_engine client library generated headers. Used by other daemons and - # by the update_engine_client console program to interact with - # update_engine. - { - 'target_name': 'libupdate_engine-client-headers', - 'type': 'none', - 'actions': [ - { - 'action_name': 'update_engine_client-dbus-proxies', - 'variables': { - 'dbus_service_config': 'dbus_bindings/dbus-service-config.json', - 'proxy_output_file': 'include/update_engine/dbus-proxies.h', - 'mock_output_file': 'include/update_engine/dbus-proxy-mocks.h', - 'proxy_path_in_mocks': 'update_engine/dbus-proxies.h', - }, - 'sources': [ - 'dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml', - ], - 'includes': ['../../../platform2/common-mk/generate-dbus-proxies.gypi'], - }, - ], - }, - ], -} From 3c7e4b604621ee7f0a02855bd61ddb1d5ec87e5e Mon Sep 17 00:00:00 2001 From: Dan Willemsen Date: Thu, 6 Jun 2019 14:56:15 -0700 Subject: [PATCH 031/624] Match src paths with aidl package name In order for the build system to track updates to the header files during incremental builds, always specify the src files using the same path as the package for C++ compilations. Bug: 112114177 Test: treehugger Change-Id: I08b4a0b8270188d67edac93ddec11b0e0cde25c0 --- Android.bp | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/Android.bp b/Android.bp index e861797e..de4d3b3c 100644 --- a/Android.bp +++ b/Android.bp @@ -405,8 +405,7 @@ cc_library_shared { ], srcs: [ - "binder_bindings/android/brillo/IUpdateEngine.aidl", - "binder_bindings/android/brillo/IUpdateEngineStatusCallback.aidl", + ":libupdate_engine_client_aidl", "client_library/client.cc", "client_library/client_binder.cc", "parcelable_update_engine_status.cc", @@ -414,6 +413,15 @@ cc_library_shared { ], } +filegroup { + name: "libupdate_engine_client_aidl", + srcs: [ + "binder_bindings/android/brillo/IUpdateEngine.aidl", + "binder_bindings/android/brillo/IUpdateEngineStatusCallback.aidl", + ], + path: "binder_bindings", +} + // update_engine_client (type: executable) // ======================================================== // update_engine console client. From 8f2fa748ed8ebeae21341df4d50d1aebd0e633e7 Mon Sep 17 00:00:00 2001 From: Denis Nikitin Date: Wed, 29 May 2019 10:36:52 -0700 Subject: [PATCH 032/624] update_engine: Fix fallthrough warning Fix clang warnings which appears when -Wimplicit-fallthrough flag gets added in the build. BUG=chromium:904913 TEST=package build fixed with enabled warning Cq-Depend: chromium:1643102 Change-Id: I5a90de65f383fd14e7793647912abeed735a029c Reviewed-on: https://chromium-review.googlesource.com/1635712 Commit-Ready: Manoj Gupta Tested-by: Manoj Gupta Legacy-Commit-Queue: Commit Bot Reviewed-by: Manoj Gupta --- update_attempter.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/update_attempter.cc b/update_attempter.cc index 34b7bac9..d97917a5 100644 --- a/update_attempter.cc +++ b/update_attempter.cc @@ -25,6 +25,7 @@ #include #include +#include #include #include #include @@ -624,8 +625,10 @@ void UpdateAttempter::CalculateStagingParams(bool interactive) { case StagingCase::kNoSavedValue: prefs_->SetInt64(kPrefsWallClockStagingWaitPeriod, staging_wait_time_.InDays()); + FALLTHROUGH; case StagingCase::kSetStagingFromPref: omaha_request_params_->set_waiting_period(staging_wait_time_); + FALLTHROUGH; case StagingCase::kNoAction: // Staging is on, enable wallclock based wait so that its values get used. omaha_request_params_->set_wall_clock_based_wait_enabled(true); From 21ac9965fbd4a3dc5401f9b529224b9728fac469 Mon Sep 17 00:00:00 2001 From: Amr Aboelkher Date: Wed, 15 May 2019 14:50:05 +0200 Subject: [PATCH 033/624] Set the Quick fix build token if the device is enterprise enrolled Checks whether the DeviceQuickFixBuildToken has been set when : - device is enterprise enrolled - device has the policy been set BUG=chromium:962467 TEST=./build_packages --board=amd64-generic && \ cros_run_unit_tests --board=amd64-generic --packages update_engine Change-Id: Ie65167f7a83577256ee4bbfb07ea19ee4e4c6401 Reviewed-on: https://chromium-review.googlesource.com/1611549 Commit-Ready: ChromeOS CL Exonerator Bot Tested-by: Amr Aboelkher Legacy-Commit-Queue: Commit Bot Reviewed-by: Amin Hassani --- update_attempter.cc | 3 ++- update_attempter_unittest.cc | 36 +++++++++++++++++++++++++----------- 2 files changed, 27 insertions(+), 12 deletions(-) diff --git a/update_attempter.cc b/update_attempter.cc index d97917a5..fcafd56f 100644 --- a/update_attempter.cc +++ b/update_attempter.cc @@ -428,7 +428,8 @@ bool UpdateAttempter::CalculateUpdateParams(const string& app_version, omaha_request_params_->set_dlc_module_ids(dlc_module_ids_); omaha_request_params_->set_is_install(is_install_); - // Set Quick Fix Build token if policy is set. + // Set Quick Fix Build token if policy is set and the device is enterprise + // enrolled. string token; if (system_state_ && system_state_->device_policy()) { if (!system_state_->device_policy()->GetDeviceQuickFixBuildToken(&token)) diff --git a/update_attempter_unittest.cc b/update_attempter_unittest.cc index 16819f83..e246e1bc 100644 --- a/update_attempter_unittest.cc +++ b/update_attempter_unittest.cc @@ -188,7 +188,7 @@ class UpdateAttempterTest : public ::testing::Test { void P2PEnabledInteractiveStart(); void P2PEnabledStartingFailsStart(); void P2PEnabledHousekeepingFailsStart(); - void UpdateToQuickFixBuildStart(); + void UpdateToQuickFixBuildStart(bool set_token); void ResetRollbackHappenedStart(bool is_consumer, bool is_policy_available, bool expected_reset); @@ -1583,28 +1583,42 @@ TEST_F(UpdateAttempterTest, attempter_.ProcessingDone(nullptr, ErrorCode::kSuccess); } -void UpdateAttempterTest::UpdateToQuickFixBuildStart() { - // Tests that checks if device_quick_fix_build_token arrives when - // policy is set. - const char kToken[] = "some_token"; - +void UpdateAttempterTest::UpdateToQuickFixBuildStart(bool set_token) { + // Tests that checks if |device_quick_fix_build_token| arrives when + // policy is set and the device is enterprise enrolled based on |set_token|. + string token = set_token ? "some_token" : ""; auto device_policy = std::make_unique(); fake_system_state_.set_device_policy(device_policy.get()); - EXPECT_CALL(*device_policy, LoadPolicy()).WillRepeatedly(Return(true)); - EXPECT_CALL(*device_policy, GetDeviceQuickFixBuildToken(_)) - .WillOnce(DoAll(SetArgPointee<0>(string(kToken)), Return(true))); + + if (set_token) + EXPECT_CALL(*device_policy, GetDeviceQuickFixBuildToken(_)) + .WillOnce(DoAll(SetArgPointee<0>(token), Return(true))); + else + EXPECT_CALL(*device_policy, GetDeviceQuickFixBuildToken(_)) + .WillOnce(Return(false)); attempter_.policy_provider_.reset( new policy::PolicyProvider(std::move(device_policy))); attempter_.Update("", "", "", "", false, false, 0, false, false); + EXPECT_EQ(token, attempter_.omaha_request_params_->autoupdate_token()); ScheduleQuitMainLoop(); } -TEST_F(UpdateAttempterTest, UpdateToQuickFixBuildStart) { +TEST_F(UpdateAttempterTest, + QuickFixTokenWhenDeviceIsEnterpriseEnrolledAndPolicyIsSet) { loop_.PostTask(FROM_HERE, base::Bind(&UpdateAttempterTest::UpdateToQuickFixBuildStart, - base::Unretained(this))); + base::Unretained(this), + /*set_token=*/true)); + loop_.Run(); +} + +TEST_F(UpdateAttempterTest, EmptyQuickFixToken) { + loop_.PostTask(FROM_HERE, + base::Bind(&UpdateAttempterTest::UpdateToQuickFixBuildStart, + base::Unretained(this), + /*set_token=*/false)); loop_.Run(); } From b7ee3875756ec586ac45e75f0fad80f6c8e3680a Mon Sep 17 00:00:00 2001 From: Jae Hoon Kim Date: Thu, 6 Jun 2019 14:59:03 -0700 Subject: [PATCH 034/624] update_engine: Accumulate functions into OmahaRequestBuilderXml class. Convert functions within relation to GetRequestXml into OmahaRequestBuilderXml class. The refactoring allows for a complete encapsulation of required parameters to build the omaha request in xml format. The vision for OmahaRequestBuilder is an interface that opens up the possibility to create classes for building various formats of omaha requests (i.e. OmahaRequestBuilderJson). BUG=chromium:940505 TEST=cros_workon_make --board=octopus update_engine --test TEST=/usr/bin/update_engine_client --check_for_update # after bouncing update-engine + check /var/log/update_engine.log. Change-Id: I0b4501288fbf7127fc39513ef61b4ab4f8ceebd5 Reviewed-on: https://chromium-review.googlesource.com/1648075 Tested-by: Jae Hoon Kim Commit-Ready: Jae Hoon Kim Legacy-Commit-Queue: Commit Bot Reviewed-by: Amin Hassani --- omaha_request_action.cc | 17 +- omaha_request_builder_xml.cc | 237 ++++++++++---------------- omaha_request_builder_xml.h | 137 ++++++++------- omaha_request_builder_xml_unittest.cc | 24 ++- 4 files changed, 197 insertions(+), 218 deletions(-) diff --git a/omaha_request_action.cc b/omaha_request_action.cc index c4adec7f..f24cd42e 100644 --- a/omaha_request_action.cc +++ b/omaha_request_action.cc @@ -422,14 +422,15 @@ void OmahaRequestAction::PerformAction() { return; } - string request_post(GetRequestXml(event_.get(), - params_, - ping_only_, - ShouldPing(), // include_ping - ping_active_days_, - ping_roll_call_days_, - GetInstallDate(system_state_), - system_state_)); + OmahaRequestBuilderXml omaha_request(event_.get(), + params_, + ping_only_, + ShouldPing(), // include_ping + ping_active_days_, + ping_roll_call_days_, + GetInstallDate(system_state_), + system_state_->prefs()); + string request_post = omaha_request.GetRequest(); // Set X-Goog-Update headers. http_fetcher_->SetHeader(kXGoogleUpdateInteractivity, diff --git a/omaha_request_builder_xml.cc b/omaha_request_builder_xml.cc index 2cb002ed..ad7c4249 100644 --- a/omaha_request_builder_xml.cc +++ b/omaha_request_builder_xml.cc @@ -80,15 +80,18 @@ string XmlEncodeWithDefault(const string& input, const string& default_value) { return default_value; } -string GetPingAttribute(const string& name, int ping_days) { - if (ping_days > 0 || ping_days == kNeverPinged) - return base::StringPrintf(" %s=\"%d\"", name.c_str(), ping_days); - return ""; -} +string OmahaRequestBuilderXml::GetPing() const { + // Returns an XML ping element attribute assignment with attribute + // |name| and value |ping_days| if |ping_days| has a value that needs + // to be sent, or an empty string otherwise. + auto GetPingAttribute = [](const char* name, int ping_days) -> string { + if (ping_days > 0 || ping_days == kNeverPinged) + return base::StringPrintf(" %s=\"%d\"", name, ping_days); + return ""; + }; -string GetPingXml(int ping_active_days, int ping_roll_call_days) { - string ping_active = GetPingAttribute("a", ping_active_days); - string ping_roll_call = GetPingAttribute("r", ping_roll_call_days); + string ping_active = GetPingAttribute("a", ping_active_days_); + string ping_roll_call = GetPingAttribute("r", ping_roll_call_days_); if (!ping_active.empty() || !ping_roll_call.empty()) { return base::StringPrintf(" \n", ping_active.c_str(), @@ -97,36 +100,27 @@ string GetPingXml(int ping_active_days, int ping_roll_call_days) { return ""; } -string GetAppBody(const OmahaEvent* event, - OmahaRequestParams* params, - bool ping_only, - bool include_ping, - bool skip_updatecheck, - int ping_active_days, - int ping_roll_call_days, - PrefsInterface* prefs) { +string OmahaRequestBuilderXml::GetAppBody(bool skip_updatecheck) const { string app_body; - if (event == nullptr) { - if (include_ping) - app_body = GetPingXml(ping_active_days, ping_roll_call_days); - if (!ping_only) { + if (event_ == nullptr) { + if (include_ping_) + app_body = GetPing(); + if (!ping_only_) { if (!skip_updatecheck) { app_body += " target_version_prefix().empty()) { + if (!params_->target_version_prefix().empty()) { app_body += base::StringPrintf( " targetversionprefix=\"%s\"", - XmlEncodeWithDefault(params->target_version_prefix(), "") - .c_str()); + XmlEncodeWithDefault(params_->target_version_prefix()).c_str()); // Rollback requires target_version_prefix set. - if (params->rollback_allowed()) { + if (params_->rollback_allowed()) { app_body += " rollback_allowed=\"true\""; } } - string autoupdate_token = params->autoupdate_token(); + string autoupdate_token = params_->autoupdate_token(); if (!autoupdate_token.empty()) { app_body += base::StringPrintf( - " token=\"%s\"", - XmlEncodeWithDefault(autoupdate_token, "").c_str()); + " token=\"%s\"", XmlEncodeWithDefault(autoupdate_token).c_str()); } app_body += ">\n"; @@ -141,7 +135,7 @@ string GetAppBody(const OmahaEvent* event, // rebooted. The previous version event is also not sent if it was already // sent for this new version with a previous updatecheck. string prev_version; - if (!prefs->GetString(kPrefsPreviousVersion, &prev_version)) { + if (!prefs_->GetString(kPrefsPreviousVersion, &prev_version)) { prev_version = "0.0.0.0"; } // We only store a non-empty previous version value after a successful @@ -154,7 +148,7 @@ string GetAppBody(const OmahaEvent* event, OmahaEvent::kTypeRebootedAfterUpdate, OmahaEvent::kResultSuccess, XmlEncodeWithDefault(prev_version, "0.0.0.0").c_str()); - LOG_IF(WARNING, !prefs->SetString(kPrefsPreviousVersion, "")) + LOG_IF(WARNING, !prefs_->SetString(kPrefsPreviousVersion, "")) << "Unable to reset the previous version."; } } @@ -162,29 +156,28 @@ string GetAppBody(const OmahaEvent* event, // The error code is an optional attribute so append it only if the result // is not success. string error_code; - if (event->result != OmahaEvent::kResultSuccess) { + if (event_->result != OmahaEvent::kResultSuccess) { error_code = base::StringPrintf(" errorcode=\"%d\"", - static_cast(event->error_code)); + static_cast(event_->error_code)); } app_body = base::StringPrintf( " \n", - event->type, - event->result, + event_->type, + event_->result, error_code.c_str()); } return app_body; } -string GetCohortArgXml(PrefsInterface* prefs, - const string arg_name, - const string prefs_key) { +string OmahaRequestBuilderXml::GetCohortArg(const string arg_name, + const string prefs_key) const { // There's nothing wrong with not having a given cohort setting, so we check // existence first to avoid the warning log message. - if (!prefs->Exists(prefs_key)) + if (!prefs_->Exists(prefs_key)) return ""; string cohort_value; - if (!prefs->GetString(prefs_key, &cohort_value) || cohort_value.empty()) + if (!prefs_->GetString(prefs_key, &cohort_value) || cohort_value.empty()) return ""; // This is a sanity check to avoid sending a huge XML file back to Ohama due // to a compromised stateful partition making the update check fail in low @@ -215,30 +208,14 @@ bool IsValidComponentID(const string& id) { return true; } -string GetAppXml(const OmahaEvent* event, - OmahaRequestParams* params, - const OmahaAppData& app_data, - bool ping_only, - bool include_ping, - bool skip_updatecheck, - int ping_active_days, - int ping_roll_call_days, - int install_date_in_days, - SystemState* system_state) { - string app_body = GetAppBody(event, - params, - ping_only, - include_ping, - skip_updatecheck, - ping_active_days, - ping_roll_call_days, - system_state->prefs()); +string OmahaRequestBuilderXml::GetApp(const OmahaAppData& app_data) const { + string app_body = GetAppBody(app_data.skip_update); string app_versions; // If we are downgrading to a more stable channel and we are allowed to do // powerwash, then pass 0.0.0.0 as the version. This is needed to get the // highest-versioned payload on the destination channel. - if (params->ShouldPowerwash()) { + if (params_->ShouldPowerwash()) { LOG(INFO) << "Passing OS version as 0.0.0.0 as we are set to powerwash " << "on downgrading to the version in the more stable channel"; app_versions = "version=\"0.0.0.0\" from_version=\"" + @@ -248,47 +225,44 @@ string GetAppXml(const OmahaEvent* event, XmlEncodeWithDefault(app_data.version, "0.0.0.0") + "\" "; } - string download_channel = params->download_channel(); + string download_channel = params_->download_channel(); string app_channels = - "track=\"" + XmlEncodeWithDefault(download_channel, "") + "\" "; - if (params->current_channel() != download_channel) { + "track=\"" + XmlEncodeWithDefault(download_channel) + "\" "; + if (params_->current_channel() != download_channel) { app_channels += "from_track=\"" + - XmlEncodeWithDefault(params->current_channel(), "") + "\" "; + XmlEncodeWithDefault(params_->current_channel()) + "\" "; } - string delta_okay_str = params->delta_okay() ? "true" : "false"; + string delta_okay_str = params_->delta_okay() ? "true" : "false"; // If install_date_days is not set (e.g. its value is -1 ), don't // include the attribute. string install_date_in_days_str = ""; - if (install_date_in_days >= 0) { + if (install_date_in_days_ >= 0) { install_date_in_days_str = - base::StringPrintf("installdate=\"%d\" ", install_date_in_days); + base::StringPrintf("installdate=\"%d\" ", install_date_in_days_); } string app_cohort_args; - app_cohort_args += - GetCohortArgXml(system_state->prefs(), "cohort", kPrefsOmahaCohort); - app_cohort_args += GetCohortArgXml( - system_state->prefs(), "cohorthint", kPrefsOmahaCohortHint); - app_cohort_args += GetCohortArgXml( - system_state->prefs(), "cohortname", kPrefsOmahaCohortName); + app_cohort_args += GetCohortArg("cohort", kPrefsOmahaCohort); + app_cohort_args += GetCohortArg("cohorthint", kPrefsOmahaCohortHint); + app_cohort_args += GetCohortArg("cohortname", kPrefsOmahaCohortName); string fingerprint_arg; - if (!params->os_build_fingerprint().empty()) { + if (!params_->os_build_fingerprint().empty()) { fingerprint_arg = "fingerprint=\"" + - XmlEncodeWithDefault(params->os_build_fingerprint(), "") + + XmlEncodeWithDefault(params_->os_build_fingerprint()) + "\" "; } string buildtype_arg; - if (!params->os_build_type().empty()) { + if (!params_->os_build_type().empty()) { buildtype_arg = "os_build_type=\"" + - XmlEncodeWithDefault(params->os_build_type(), "") + "\" "; + XmlEncodeWithDefault(params_->os_build_type()) + "\" "; } string product_components_args; - if (!params->ShouldPowerwash() && !app_data.product_components.empty()) { + if (!params_->ShouldPowerwash() && !app_data.product_components.empty()) { brillo::KeyValueStore store; if (store.LoadFromString(app_data.product_components)) { for (const string& key : store.GetKeys()) { @@ -305,7 +279,7 @@ string GetAppXml(const OmahaEvent* event, product_components_args += base::StringPrintf("_%s.version=\"%s\" ", key.c_str(), - XmlEncodeWithDefault(version, "").c_str()); + XmlEncodeWithDefault(version).c_str()); } } else { LOG(ERROR) << "Failed to parse product_components:\n" @@ -314,27 +288,27 @@ string GetAppXml(const OmahaEvent* event, } string requisition_arg; - if (!params->device_requisition().empty()) { + if (!params_->device_requisition().empty()) { requisition_arg = "requisition=\"" + - XmlEncodeWithDefault(params->device_requisition(), "") + + XmlEncodeWithDefault(params_->device_requisition()) + "\" "; } // clang-format off string app_xml = " app_lang(), "en-US") + "\" " + - "board=\"" + XmlEncodeWithDefault(params->os_board(), "") + "\" " + - "hardware_class=\"" + XmlEncodeWithDefault(params->hwid(), "") + "\" " + + "lang=\"" + XmlEncodeWithDefault(params_->app_lang(), "en-US") + "\" " + + "board=\"" + XmlEncodeWithDefault(params_->os_board()) + "\" " + + "hardware_class=\"" + XmlEncodeWithDefault(params_->hwid()) + "\" " + "delta_okay=\"" + delta_okay_str + "\" " - "fw_version=\"" + XmlEncodeWithDefault(params->fw_version(), "") + "\" " + - "ec_version=\"" + XmlEncodeWithDefault(params->ec_version(), "") + "\" " + + "fw_version=\"" + XmlEncodeWithDefault(params_->fw_version()) + "\" " + + "ec_version=\"" + XmlEncodeWithDefault(params_->ec_version()) + "\" " + install_date_in_days_str + requisition_arg + ">\n" + @@ -344,73 +318,21 @@ string GetAppXml(const OmahaEvent* event, return app_xml; } -string GetOsXml(OmahaRequestParams* params) { +string OmahaRequestBuilderXml::GetOs() const { string os_xml = " os_version(), "") + "\" " + "platform=\"" + - XmlEncodeWithDefault(params->os_platform(), "") + "\" " + "sp=\"" + - XmlEncodeWithDefault(params->os_sp(), "") + + XmlEncodeWithDefault(params_->os_version()) + "\" " + "platform=\"" + + XmlEncodeWithDefault(params_->os_platform()) + "\" " + "sp=\"" + + XmlEncodeWithDefault(params_->os_sp()) + "\">" "\n"; return os_xml; } -string GetRequestXml(const OmahaEvent* event, - OmahaRequestParams* params, - bool ping_only, - bool include_ping, - int ping_active_days, - int ping_roll_call_days, - int install_date_in_days, - SystemState* system_state) { - string os_xml = GetOsXml(params); - OmahaAppData product_app = { - .id = params->GetAppId(), - .version = params->app_version(), - .product_components = params->product_components()}; - // Skips updatecheck for platform app in case of an install operation. - string app_xml = GetAppXml(event, - params, - product_app, - ping_only, - include_ping, - params->is_install(), /* skip_updatecheck */ - ping_active_days, - ping_roll_call_days, - install_date_in_days, - system_state); - if (!params->system_app_id().empty()) { - OmahaAppData system_app = {.id = params->system_app_id(), - .version = params->system_version()}; - app_xml += GetAppXml(event, - params, - system_app, - ping_only, - include_ping, - false, /* skip_updatecheck */ - ping_active_days, - ping_roll_call_days, - install_date_in_days, - system_state); - } - // Create APP ID according to |dlc_module_id| (sticking the current AppID to - // the DLC module ID with an underscode). - for (const auto& dlc_module_id : params->dlc_module_ids()) { - OmahaAppData dlc_module_app = { - .id = params->GetAppId() + "_" + dlc_module_id, - .version = params->app_version()}; - app_xml += GetAppXml(event, - params, - dlc_module_app, - ping_only, - include_ping, - false, /* skip_updatecheck */ - ping_active_days, - ping_roll_call_days, - install_date_in_days, - system_state); - } +string OmahaRequestBuilderXml::GetRequest() const { + string os_xml = GetOs(); + string app_xml = GetApps(); string request_xml = base::StringPrintf( "\n" @@ -418,11 +340,38 @@ string GetRequestXml(const OmahaEvent* event, " installsource=\"%s\" ismachine=\"1\">\n%s%s\n", constants::kOmahaUpdaterID, kOmahaUpdaterVersion, - params->interactive() ? "ondemandupdate" : "scheduler", + params_->interactive() ? "ondemandupdate" : "scheduler", os_xml.c_str(), app_xml.c_str()); return request_xml; } +string OmahaRequestBuilderXml::GetApps() const { + string app_xml = ""; + OmahaAppData product_app = { + .id = params_->GetAppId(), + .version = params_->app_version(), + .product_components = params_->product_components(), + // Skips updatecheck for platform app in case of an install operation. + .skip_update = params_->is_install()}; + app_xml += GetApp(product_app); + if (!params_->system_app_id().empty()) { + OmahaAppData system_app = {.id = params_->system_app_id(), + .version = params_->system_version(), + .skip_update = false}; + app_xml += GetApp(system_app); + } + // Create APP ID according to |dlc_module_id| (sticking the current AppID to + // the DLC module ID with an underscode). + for (const auto& dlc_module_id : params_->dlc_module_ids()) { + OmahaAppData dlc_module_app = { + .id = params_->GetAppId() + "_" + dlc_module_id, + .version = params_->app_version(), + .skip_update = false}; + app_xml += GetApp(dlc_module_app); + } + return app_xml; +} + } // namespace chromeos_update_engine diff --git a/omaha_request_builder_xml.h b/omaha_request_builder_xml.h index 011c5929..c390b9ef 100644 --- a/omaha_request_builder_xml.h +++ b/omaha_request_builder_xml.h @@ -36,9 +36,6 @@ #include "update_engine/omaha_response.h" #include "update_engine/system_state.h" -// TODO(ahassani): Make the xml builder into a class of its own so we don't have -// to pass all these parameters around. - namespace chromeos_update_engine { extern const int kNeverPinged; @@ -87,74 +84,98 @@ struct OmahaAppData { std::string id; std::string version; std::string product_components; + bool skip_update; }; // Encodes XML entities in a given string. Input must be ASCII-7 valid. If // the input is invalid, the default value is used instead. std::string XmlEncodeWithDefault(const std::string& input, - const std::string& default_value); + const std::string& default_value = ""); // Escapes text so it can be included as character data and attribute // values. The |input| string must be valid ASCII-7, no UTF-8 supported. // Returns whether the |input| was valid and escaped properly in |output|. bool XmlEncode(const std::string& input, std::string* output); -// Returns an XML ping element attribute assignment with attribute -// |name| and value |ping_days| if |ping_days| has a value that needs -// to be sent, or an empty string otherwise. -std::string GetPingAttribute(const std::string& name, int ping_days); - -// Returns an XML ping element if any of the elapsed days need to be -// sent, or an empty string otherwise. -std::string GetPingXml(int ping_active_days, int ping_roll_call_days); - -// Returns an XML that goes into the body of the element of the Omaha -// request based on the given parameters. -std::string GetAppBody(const OmahaEvent* event, - OmahaRequestParams* params, - bool ping_only, - bool include_ping, - bool skip_updatecheck, - int ping_active_days, - int ping_roll_call_days, - PrefsInterface* prefs); - -// Returns the cohort* argument to include in the tag for the passed -// |arg_name| and |prefs_key|, if any. The return value is suitable to -// concatenate to the list of arguments and includes a space at the end. -std::string GetCohortArgXml(PrefsInterface* prefs, - const std::string arg_name, - const std::string prefs_key); - +// Returns a boolean based on examining each character on whether it's a valid +// component (meaning all characters are an alphanum excluding '-', '_', '.'). bool IsValidComponentID(const std::string& id); -// Returns an XML that corresponds to the entire node of the Omaha -// request based on the given parameters. -std::string GetAppXml(const OmahaEvent* event, - OmahaRequestParams* params, - const OmahaAppData& app_data, - bool ping_only, - bool include_ping, - bool skip_updatecheck, - int ping_active_days, - int ping_roll_call_days, - int install_date_in_days, - SystemState* system_state); - -// Returns an XML that corresponds to the entire node of the Omaha -// request based on the given parameters. -std::string GetOsXml(OmahaRequestParams* params); - -// Returns an XML that corresponds to the entire Omaha request based on the -// given parameters. -std::string GetRequestXml(const OmahaEvent* event, - OmahaRequestParams* params, - bool ping_only, - bool include_ping, - int ping_active_days, - int ping_roll_call_days, - int install_date_in_days, - SystemState* system_state); +class OmahaRequestBuilder { + public: + OmahaRequestBuilder() = default; + virtual ~OmahaRequestBuilder() = default; + + virtual std::string GetRequest() const = 0; + + private: + DISALLOW_COPY_AND_ASSIGN(OmahaRequestBuilder); +}; + +class OmahaRequestBuilderXml : OmahaRequestBuilder { + public: + OmahaRequestBuilderXml(const OmahaEvent* event, + OmahaRequestParams* params, + bool ping_only, + bool include_ping, + int ping_active_days, + int ping_roll_call_days, + int install_date_in_days, + PrefsInterface* prefs) + : event_(event), + params_(params), + ping_only_(ping_only), + include_ping_(include_ping), + ping_active_days_(ping_active_days), + ping_roll_call_days_(ping_roll_call_days), + install_date_in_days_(install_date_in_days), + prefs_(prefs) {} + + ~OmahaRequestBuilderXml() override = default; + + // Returns an XML that corresponds to the entire Omaha request. + std::string GetRequest() const override; + + private: + // Returns an XML that corresponds to the entire node of the Omaha + // request based on the member variables. + std::string GetOs() const; + + // Returns an XML that corresponds to all nodes of the Omaha + // request based on the given parameters. + std::string GetApps() const; + + // Returns an XML that corresponds to the single node of the Omaha + // request based on the given parameters. + std::string GetApp(const OmahaAppData& app_data) const; + + // Returns an XML that goes into the body of the element of the Omaha + // request based on the given parameters. + // The skip_updatecheck argument if set to true will omit the emission of + // the updatecheck xml tag in the body of the element. + std::string GetAppBody(bool skip_updatecheck) const; + + // Returns the cohort* argument to include in the tag for the passed + // |arg_name| and |prefs_key|, if any. The return value is suitable to + // concatenate to the list of arguments and includes a space at the end. + std::string GetCohortArg(const std::string arg_name, + const std::string prefs_key) const; + + // Returns an XML ping element if any of the elapsed days need to be + // sent, or an empty string otherwise. + std::string GetPing() const; + + const OmahaEvent* event_; + OmahaRequestParams* params_; + bool ping_only_; + bool include_ping_; + int ping_active_days_; + int ping_roll_call_days_; + int install_date_in_days_; + PrefsInterface* prefs_; + + DISALLOW_COPY_AND_ASSIGN(OmahaRequestBuilderXml); +}; } // namespace chromeos_update_engine diff --git a/omaha_request_builder_xml_unittest.cc b/omaha_request_builder_xml_unittest.cc index 3293c44d..5c37571e 100644 --- a/omaha_request_builder_xml_unittest.cc +++ b/omaha_request_builder_xml_unittest.cc @@ -17,10 +17,14 @@ #include "update_engine/omaha_request_builder_xml.h" #include +#include +#include #include +using std::pair; using std::string; +using std::vector; namespace chromeos_update_engine { @@ -28,14 +32,17 @@ class OmahaRequestBuilderXmlTest : public ::testing::Test {}; TEST_F(OmahaRequestBuilderXmlTest, XmlEncodeTest) { string output; - EXPECT_TRUE(XmlEncode("ab", &output)); - EXPECT_EQ("ab", output); - EXPECT_TRUE(XmlEncode("a\"\'\\", &output)); - EXPECT_EQ("<&>"'\\", output); - EXPECT_TRUE(XmlEncode("<&>", &output)); - EXPECT_EQ("&lt;&amp;&gt;", output); + vector> xml_encode_pairs = { + {"ab", "ab"}, + {"a\"\'\\", "<&>"'\\"}, + {"<&>", "&lt;&amp;&gt;"}}; + for (const auto& xml_encode_pair : xml_encode_pairs) { + const auto& before_encoding = xml_encode_pair.first; + const auto& after_encoding = xml_encode_pair.second; + EXPECT_TRUE(XmlEncode(before_encoding, &output)); + EXPECT_EQ(after_encoding, output); + } // Check that unterminated UTF-8 strings are handled properly. EXPECT_FALSE(XmlEncode("\xc2", &output)); // Fail with invalid ASCII-7 chars. @@ -43,6 +50,7 @@ TEST_F(OmahaRequestBuilderXmlTest, XmlEncodeTest) { } TEST_F(OmahaRequestBuilderXmlTest, XmlEncodeWithDefaultTest) { + EXPECT_EQ("", XmlEncodeWithDefault("")); EXPECT_EQ("<&>", XmlEncodeWithDefault("<&>", "something else")); EXPECT_EQ("", XmlEncodeWithDefault("\xc2", "")); } From 7982100a403be400db0030cf9f18390eb021610e Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Mon, 6 May 2019 17:40:49 -0700 Subject: [PATCH 035/624] update_engine: Add --properties_format flag to delta_generator We need to be able to capture information about a payload by just looking at it. These information needed for nebraska to be able to process a response from a request. These information includes: - Payload and its metadata hashes and sizes. - Payload metadata signature. - The APP ID of the original image. - Whether the payload is a delta or full. - The payload's target version. This CL adds the ability to generate a json file with the payloads properties such as above. Also this CL refactors how this information is generated into a single class. BUG=chromium:960433 TEST=delta_generator --in_file=hello-signed-delta --properties_file=prop TEST=delta_generator --in_file=hello-signed-delta --properties_file=payload.json --properties_format="json" Change-Id: Ia61be0bf37bcacfd82f8982a7977fdae2f18cb30 Reviewed-on: https://chromium-review.googlesource.com/1610801 Tested-by: Amin Hassani Commit-Ready: ChromeOS CL Exonerator Bot Legacy-Commit-Queue: Commit Bot Reviewed-by: Sen Jiang Reviewed-by: Nicolas Norvez --- Android.bp | 1 + BUILD.gn | 1 + payload_consumer/payload_metadata.cc | 31 +++++ payload_consumer/payload_metadata.h | 7 ++ payload_generator/generate_delta_main.cc | 37 ++++-- payload_generator/payload_properties.cc | 143 +++++++++++++++++++++++ payload_generator/payload_properties.h | 73 ++++++++++++ payload_generator/payload_signer.cc | 31 ----- payload_generator/payload_signer.h | 3 - 9 files changed, 285 insertions(+), 42 deletions(-) create mode 100644 payload_generator/payload_properties.cc create mode 100644 payload_generator/payload_properties.h diff --git a/Android.bp b/Android.bp index dac1acde..0c3cac14 100644 --- a/Android.bp +++ b/Android.bp @@ -498,6 +498,7 @@ cc_library_static { "payload_generator/payload_file.cc", "payload_generator/payload_generation_config_android.cc", "payload_generator/payload_generation_config.cc", + "payload_generator/payload_properties.cc", "payload_generator/payload_signer.cc", "payload_generator/raw_filesystem.cc", "payload_generator/squashfs_filesystem.cc", diff --git a/BUILD.gn b/BUILD.gn index fc25cd91..3e54eeff 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -350,6 +350,7 @@ static_library("libpayload_generator") { "payload_generator/payload_file.cc", "payload_generator/payload_generation_config.cc", "payload_generator/payload_generation_config_chromeos.cc", + "payload_generator/payload_properties.cc", "payload_generator/payload_signer.cc", "payload_generator/raw_filesystem.cc", "payload_generator/squashfs_filesystem.cc", diff --git a/payload_consumer/payload_metadata.cc b/payload_consumer/payload_metadata.cc index b631c87c..337edb43 100644 --- a/payload_consumer/payload_metadata.cc +++ b/payload_consumer/payload_metadata.cc @@ -25,6 +25,8 @@ #include "update_engine/payload_consumer/payload_constants.h" #include "update_engine/payload_consumer/payload_verifier.h" +using std::string; + namespace chromeos_update_engine { const uint64_t PayloadMetadata::kDeltaVersionOffset = sizeof(kDeltaMagic); @@ -224,4 +226,33 @@ ErrorCode PayloadMetadata::ValidateMetadataSignature( return ErrorCode::kSuccess; } +bool PayloadMetadata::ParsePayloadFile(const string& payload_path, + DeltaArchiveManifest* manifest, + Signatures* metadata_signatures) { + brillo::Blob payload; + TEST_AND_RETURN_FALSE( + utils::ReadFileChunk(payload_path, 0, kMaxPayloadHeaderSize, &payload)); + TEST_AND_RETURN_FALSE(ParsePayloadHeader(payload)); + + if (manifest != nullptr) { + TEST_AND_RETURN_FALSE( + utils::ReadFileChunk(payload_path, + kMaxPayloadHeaderSize, + GetMetadataSize() - kMaxPayloadHeaderSize, + &payload)); + TEST_AND_RETURN_FALSE(GetManifest(payload, manifest)); + } + + if (metadata_signatures != nullptr && + GetMajorVersion() >= kBrilloMajorPayloadVersion) { + payload.clear(); + TEST_AND_RETURN_FALSE(utils::ReadFileChunk( + payload_path, GetMetadataSize(), GetMetadataSignatureSize(), &payload)); + TEST_AND_RETURN_FALSE( + metadata_signatures->ParseFromArray(payload.data(), payload.size())); + } + + return true; +} + } // namespace chromeos_update_engine diff --git a/payload_consumer/payload_metadata.h b/payload_consumer/payload_metadata.h index 1b4c5c89..ec8eea63 100644 --- a/payload_consumer/payload_metadata.h +++ b/payload_consumer/payload_metadata.h @@ -86,6 +86,13 @@ class PayloadMetadata { bool GetManifest(const brillo::Blob& payload, DeltaArchiveManifest* out_manifest) const; + // Parses a payload file |payload_path| and prepares the metadata properties, + // manifest and metadata signatures. Can be used as an easy to use utility to + // get the payload information without manually the process. + bool ParsePayloadFile(const std::string& payload_path, + DeltaArchiveManifest* manifest, + Signatures* metadata_signatures); + private: // Set |*out_offset| to the byte offset at which the manifest protobuf begins // in a payload. Return true on success, false if the offset is unknown. diff --git a/payload_generator/generate_delta_main.cc b/payload_generator/generate_delta_main.cc index de0a0918..10ae2a05 100644 --- a/payload_generator/generate_delta_main.cc +++ b/payload_generator/generate_delta_main.cc @@ -38,6 +38,7 @@ #include "update_engine/payload_consumer/payload_constants.h" #include "update_engine/payload_generator/delta_diff_generator.h" #include "update_engine/payload_generator/payload_generation_config.h" +#include "update_engine/payload_generator/payload_properties.h" #include "update_engine/payload_generator/payload_signer.h" #include "update_engine/payload_generator/xz.h" #include "update_engine/update_metadata.pb.h" @@ -53,6 +54,9 @@ namespace chromeos_update_engine { namespace { +constexpr char kPayloadPropertiesFormatKeyValue[] = "key-value"; +constexpr char kPayloadPropertiesFormatJson[] = "json"; + void ParseSignatureSizes(const string& signature_sizes_flag, vector* signature_sizes) { signature_sizes->clear(); @@ -268,14 +272,24 @@ bool ApplyPayload(const string& payload_file, return true; } -int ExtractProperties(const string& payload_path, const string& props_file) { - brillo::KeyValueStore properties; - TEST_AND_RETURN_FALSE( - PayloadSigner::ExtractPayloadProperties(payload_path, &properties)); +bool ExtractProperties(const string& payload_path, + const string& props_file, + const string& props_format) { + string properties; + PayloadProperties payload_props(payload_path); + if (props_format == kPayloadPropertiesFormatKeyValue) { + TEST_AND_RETURN_FALSE(payload_props.GetPropertiesAsKeyValue(&properties)); + } else if (props_format == kPayloadPropertiesFormatJson) { + TEST_AND_RETURN_FALSE(payload_props.GetPropertiesAsJson(&properties)); + } else { + LOG(FATAL) << "Invalid option " << props_format + << " for --properties_format flag."; + } if (props_file == "-") { - printf("%s", properties.SaveToString().c_str()); + printf("%s", properties.c_str()); } else { - properties.Save(base::FilePath(props_file)); + utils::WriteFile( + props_file.c_str(), properties.c_str(), properties.length()); LOG(INFO) << "Generated properties file at " << props_file; } return true; @@ -362,7 +376,11 @@ int Main(int argc, char** argv) { DEFINE_string(properties_file, "", "If passed, dumps the payload properties of the payload passed " - "in --in_file and exits."); + "in --in_file and exits. Look at --properties_format."); + DEFINE_string(properties_format, + kPayloadPropertiesFormatKeyValue, + "Defines the format of the --properties_file. The acceptable " + "values are: key-value (default) and json"); DEFINE_int64(max_timestamp, 0, "The maximum timestamp of the OS allowed to apply this " @@ -467,7 +485,10 @@ int Main(int argc, char** argv) { return VerifySignedPayload(FLAGS_in_file, FLAGS_public_key); } if (!FLAGS_properties_file.empty()) { - return ExtractProperties(FLAGS_in_file, FLAGS_properties_file) ? 0 : 1; + return ExtractProperties( + FLAGS_in_file, FLAGS_properties_file, FLAGS_properties_format) + ? 0 + : 1; } // A payload generation was requested. Convert the flags to a diff --git a/payload_generator/payload_properties.cc b/payload_generator/payload_properties.cc new file mode 100644 index 00000000..53e69f38 --- /dev/null +++ b/payload_generator/payload_properties.cc @@ -0,0 +1,143 @@ +// +// Copyright 2019 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include "update_engine/payload_generator/payload_properties.h" + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "update_engine/common/constants.h" +#include "update_engine/common/hash_calculator.h" +#include "update_engine/common/utils.h" +#include "update_engine/payload_consumer/payload_metadata.h" +#include "update_engine/update_metadata.pb.h" + +using std::string; +using std::vector; + +namespace chromeos_update_engine { + +namespace { +// These ones are needed by the GoldenEye. +const char kPayloadPropertyJsonVersion[] = "version"; +const char kPayloadPropertyJsonPayloadHash[] = "sha256_hex"; +const char kPayloadPropertyJsonMetadataSize[] = "metadata_size"; +const char kPayloadPropertyJsonMetadataSignature[] = "metadata_signature"; + +// These are needed by the Nebraska and devserver. +const char kPayloadPropertyJsonPayloadSize[] = "size"; +const char kPayloadPropertyJsonIsDelta[] = "is_delta"; +const char kPayloadPropertyJsonTargetVersion[] = "target_version"; +const char kPayloadPropertyJsonSourceVersion[] = "source_version"; +} // namespace + +PayloadProperties::PayloadProperties(const string& payload_path) + : payload_path_(payload_path) {} + +bool PayloadProperties::GetPropertiesAsJson(string* json_str) { + TEST_AND_RETURN_FALSE(LoadFromPayload()); + + base::DictionaryValue properties; + properties.SetInteger(kPayloadPropertyJsonVersion, version_); + properties.SetInteger(kPayloadPropertyJsonMetadataSize, metadata_size_); + properties.SetString(kPayloadPropertyJsonMetadataSignature, + metadata_signatures_); + properties.SetInteger(kPayloadPropertyJsonPayloadSize, payload_size_); + properties.SetString(kPayloadPropertyJsonPayloadHash, payload_hash_); + properties.SetBoolean(kPayloadPropertyJsonIsDelta, is_delta_); + properties.SetString(kPayloadPropertyJsonTargetVersion, target_version_); + if (is_delta_) { + properties.SetString(kPayloadPropertyJsonSourceVersion, source_version_); + } + + return base::JSONWriter::Write(properties, json_str); +} + +bool PayloadProperties::GetPropertiesAsKeyValue(string* key_value_str) { + TEST_AND_RETURN_FALSE(LoadFromPayload()); + + brillo::KeyValueStore properties; + properties.SetString(kPayloadPropertyFileSize, std::to_string(payload_size_)); + properties.SetString(kPayloadPropertyMetadataSize, + std::to_string(metadata_size_)); + properties.SetString(kPayloadPropertyFileHash, payload_hash_); + properties.SetString(kPayloadPropertyMetadataHash, metadata_hash_); + + *key_value_str = properties.SaveToString(); + return true; +} + +bool PayloadProperties::LoadFromPayload() { + PayloadMetadata payload_metadata; + DeltaArchiveManifest manifest; + Signatures metadata_signatures; + TEST_AND_RETURN_FALSE(payload_metadata.ParsePayloadFile( + payload_path_, &manifest, &metadata_signatures)); + + metadata_size_ = payload_metadata.GetMetadataSize(); + payload_size_ = utils::FileSize(payload_path_); + + brillo::Blob metadata_hash; + TEST_AND_RETURN_FALSE(HashCalculator::RawHashOfFile( + payload_path_, metadata_size_, &metadata_hash) == + static_cast(metadata_size_)); + metadata_hash_ = brillo::data_encoding::Base64Encode(metadata_hash); + + brillo::Blob payload_hash; + TEST_AND_RETURN_FALSE(HashCalculator::RawHashOfFile( + payload_path_, payload_size_, &payload_hash) == + static_cast(payload_size_)); + payload_hash_ = brillo::data_encoding::Base64Encode(payload_hash); + + if (payload_metadata.GetMetadataSignatureSize() > 0) { + TEST_AND_RETURN_FALSE(metadata_signatures.signatures_size() > 0); + vector base64_signatures; + for (const auto& sig : metadata_signatures.signatures()) { + base64_signatures.push_back( + brillo::data_encoding::Base64Encode(sig.data())); + } + metadata_signatures_ = base::JoinString(base64_signatures, ":"); + } + + is_delta_ = manifest.has_old_image_info() || manifest.has_old_kernel_info() || + manifest.has_old_rootfs_info() || + std::any_of(manifest.partitions().begin(), + manifest.partitions().end(), + [](const PartitionUpdate& part) { + return part.has_old_partition_info(); + }); + + if (manifest.has_new_image_info()) { + target_version_ = manifest.new_image_info().version(); + } else { + target_version_ = "99999.0.0"; + } + + // No need to set the source version if it was not a delta payload. + if (is_delta_ && manifest.has_old_image_info()) { + source_version_ = manifest.old_image_info().version(); + } + return true; +} + +} // namespace chromeos_update_engine diff --git a/payload_generator/payload_properties.h b/payload_generator/payload_properties.h new file mode 100644 index 00000000..3b34511c --- /dev/null +++ b/payload_generator/payload_properties.h @@ -0,0 +1,73 @@ +// +// Copyright 2019 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef UPDATE_ENGINE_PAYLOAD_GENERATOR_PAYLOAD_PROPERTIES_H_ +#define UPDATE_ENGINE_PAYLOAD_GENERATOR_PAYLOAD_PROPERTIES_H_ + +#include + +#include +#include + +namespace chromeos_update_engine { + +// A class for extracting information about a payload from the payload file +// itself. Currently the metadata can be exported as a json file or a key/value +// properties file. But more can be added if required. +class PayloadProperties { + public: + explicit PayloadProperties(const std::string& payload_path); + ~PayloadProperties() = default; + + // Get the properties in a json format. The json file will be used in + // autotests, cros flash, etc. Mainly in Chrome OS. + bool GetPropertiesAsJson(std::string* json_str); + + // Get the properties of the payload as a key/value store. This is mainly used + // in Android. + bool GetPropertiesAsKeyValue(std::string* key_value_str); + + private: + // Does the main job of reading the payload and extracting information from + // it. + bool LoadFromPayload(); + + // The path to the payload file. + std::string payload_path_; + + // The version of the metadata json format. If the output json file changes + // format, this needs to be increased. + int version_{2}; + + size_t metadata_size_; + std::string metadata_hash_; + std::string metadata_signatures_; + + size_t payload_size_; + std::string payload_hash_; + + // Whether the payload is a delta (true) or full (false). + bool is_delta_; + + std::string target_version_; + std::string source_version_; + + DISALLOW_COPY_AND_ASSIGN(PayloadProperties); +}; + +} // namespace chromeos_update_engine + +#endif // UPDATE_ENGINE_PAYLOAD_GENERATOR_PAYLOAD_PROPERTIES_H_ diff --git a/payload_generator/payload_signer.cc b/payload_generator/payload_signer.cc index 2d0489a2..2a7021f0 100644 --- a/payload_generator/payload_signer.cc +++ b/payload_generator/payload_signer.cc @@ -439,35 +439,4 @@ bool PayloadSigner::GetMetadataSignature(const void* const metadata, return true; } -bool PayloadSigner::ExtractPayloadProperties( - const string& payload_path, brillo::KeyValueStore* properties) { - brillo::Blob payload; - TEST_AND_RETURN_FALSE( - utils::ReadFileChunk(payload_path, 0, kMaxPayloadHeaderSize, &payload)); - - PayloadMetadata payload_metadata; - TEST_AND_RETURN_FALSE(payload_metadata.ParsePayloadHeader(payload)); - uint64_t metadata_size = payload_metadata.GetMetadataSize(); - - uint64_t file_size = utils::FileSize(payload_path); - properties->SetString(kPayloadPropertyFileSize, std::to_string(file_size)); - properties->SetString(kPayloadPropertyMetadataSize, - std::to_string(metadata_size)); - - brillo::Blob file_hash, metadata_hash; - TEST_AND_RETURN_FALSE( - HashCalculator::RawHashOfFile(payload_path, file_size, &file_hash) == - static_cast(file_size)); - - TEST_AND_RETURN_FALSE(HashCalculator::RawHashOfFile( - payload_path, metadata_size, &metadata_hash) == - static_cast(metadata_size)); - - properties->SetString(kPayloadPropertyFileHash, - brillo::data_encoding::Base64Encode(file_hash)); - properties->SetString(kPayloadPropertyMetadataHash, - brillo::data_encoding::Base64Encode(metadata_hash)); - return true; -} - } // namespace chromeos_update_engine diff --git a/payload_generator/payload_signer.h b/payload_generator/payload_signer.h index b2d6606c..83ddadc1 100644 --- a/payload_generator/payload_signer.h +++ b/payload_generator/payload_signer.h @@ -119,9 +119,6 @@ class PayloadSigner { const std::string& private_key_path, std::string* out_signature); - static bool ExtractPayloadProperties(const std::string& payload_path, - brillo::KeyValueStore* properties); - private: // This should never be constructed DISALLOW_IMPLICIT_CONSTRUCTORS(PayloadSigner); From 6ada591382e2f60146006156a60a81df02e31c6d Mon Sep 17 00:00:00 2001 From: Jae Hoon Kim Date: Fri, 14 Jun 2019 10:11:34 -0700 Subject: [PATCH 036/624] update_engine: Make the ChromeOS/AOSP Omaha Client transmit requestid. As per Omaha's protocol specification, ChromeOS/AOSP needs to transmit the 'requestid` attribute in the request. The format of the 'requestid' attribute is sent as GUID version 4. This change will add the 'requestid' attribute in the omaha request. BUG=chromium:940505 TEST=cros_workon_make --board=octopus update_engine --test TEST=/usr/bin/update_engine_client --check_for_update # after bouncing update-engine + check /var/log/update_engine.log. 'requestid' attribute will be in the omaha request. Change-Id: I76f1fe82d1e976b5316b4af9148097f1266dea91 Reviewed-on: https://chromium-review.googlesource.com/1653709 Tested-by: Jae Hoon Kim Commit-Ready: Jae Hoon Kim Legacy-Commit-Queue: Commit Bot Reviewed-by: Amin Hassani --- omaha_request_builder_xml.cc | 5 ++- omaha_request_builder_xml_unittest.cc | 47 ++++++++++++++++++++++++++- 2 files changed, 50 insertions(+), 2 deletions(-) diff --git a/omaha_request_builder_xml.cc b/omaha_request_builder_xml.cc index ad7c4249..e335c40a 100644 --- a/omaha_request_builder_xml.cc +++ b/omaha_request_builder_xml.cc @@ -20,6 +20,7 @@ #include +#include #include #include #include @@ -336,8 +337,10 @@ string OmahaRequestBuilderXml::GetRequest() const { string request_xml = base::StringPrintf( "\n" - "\n%s%s\n", + base::GenerateGUID().c_str() /* requestid */, constants::kOmahaUpdaterID, kOmahaUpdaterVersion, params_->interactive() ? "ondemandupdate" : "scheduler", diff --git a/omaha_request_builder_xml_unittest.cc b/omaha_request_builder_xml_unittest.cc index 5c37571e..23abebbc 100644 --- a/omaha_request_builder_xml_unittest.cc +++ b/omaha_request_builder_xml_unittest.cc @@ -20,15 +20,40 @@ #include #include +#include #include +#include "update_engine/fake_system_state.h" + using std::pair; using std::string; using std::vector; namespace chromeos_update_engine { -class OmahaRequestBuilderXmlTest : public ::testing::Test {}; +namespace { +// Helper to find key and extract value from the given string |xml|, instead +// of using a full parser. The attribute key will be followed by "=\"" as xml +// attribute values must be within double quotes (not single quotes). +static string FindAttributeKeyValueInXml(const string& xml, + const string& key, + const size_t val_size) { + string key_with_quotes = key + "=\""; + const size_t val_start_pos = xml.find(key); + if (val_start_pos == string::npos) + return ""; + return xml.substr(val_start_pos + key_with_quotes.size(), val_size); +} +} // namespace + +class OmahaRequestBuilderXmlTest : public ::testing::Test { + protected: + void SetUp() override {} + void TearDown() override {} + + FakeSystemState fake_system_state_; + static constexpr size_t kGuidSize = 36; +}; TEST_F(OmahaRequestBuilderXmlTest, XmlEncodeTest) { string output; @@ -55,4 +80,24 @@ TEST_F(OmahaRequestBuilderXmlTest, XmlEncodeWithDefaultTest) { EXPECT_EQ("", XmlEncodeWithDefault("\xc2", "")); } +TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlRequestIdTest) { + OmahaEvent omaha_event; + OmahaRequestParams omaha_request_params{&fake_system_state_}; + OmahaRequestBuilderXml omaha_request{&omaha_event, + &omaha_request_params, + false, + false, + 0, + 0, + 0, + fake_system_state_.prefs()}; + const string request_xml = omaha_request.GetRequest(); + const string key = "requestid"; + const string request_id = + FindAttributeKeyValueInXml(request_xml, key, kGuidSize); + // A valid |request_id| is either a GUID version 4 or empty string. + if (!request_id.empty()) + EXPECT_TRUE(base::IsValidGUID(request_id)); +} + } // namespace chromeos_update_engine From 9d9492ff411c649807a33693e610f05e3ae10595 Mon Sep 17 00:00:00 2001 From: Jae Hoon Kim Date: Mon, 17 Jun 2019 14:52:48 -0700 Subject: [PATCH 037/624] update_engine: Indicate existence of owner instead of owner's email in log. Logging the device owner's email is possibly a PII, so instead log whether the device has a owner as a boolean value. Enterprise devices do not have a device owner. The variable var_has_owner is dependent on DevicePolicy::GetOwner(). BUG=chromium:973108 TEST=unittest Change-Id: I535f664a4fcf75c6102346b8566605710b062255 Reviewed-on: https://chromium-review.googlesource.com/1660911 Tested-by: Jae Hoon Kim Commit-Ready: Jae Hoon Kim Legacy-Commit-Queue: Commit Bot Reviewed-by: Amin Hassani --- update_manager/chromeos_policy.cc | 5 +- update_manager/chromeos_policy_unittest.cc | 4 +- update_manager/device_policy_provider.h | 6 +-- update_manager/evaluation_context_unittest.cc | 48 +++++++------------ update_manager/fake_device_policy_provider.h | 4 +- update_manager/real_device_policy_provider.cc | 22 ++++++--- update_manager/real_device_policy_provider.h | 44 +++++++++-------- .../real_device_policy_provider_unittest.cc | 22 ++++++++- 8 files changed, 88 insertions(+), 67 deletions(-) diff --git a/update_manager/chromeos_policy.cc b/update_manager/chromeos_policy.cc index bdb88f8e..08c355ea 100644 --- a/update_manager/chromeos_policy.cc +++ b/update_manager/chromeos_policy.cc @@ -555,8 +555,9 @@ EvalStatus ChromeOSPolicy::P2PEnabled(EvaluationContext* ec, if (policy_au_p2p_enabled_p) { enabled = *policy_au_p2p_enabled_p; } else { - const string* policy_owner_p = ec->GetValue(dp_provider->var_owner()); - if (!policy_owner_p || policy_owner_p->empty()) + const bool* policy_has_owner_p = + ec->GetValue(dp_provider->var_has_owner()); + if (!policy_has_owner_p || !*policy_has_owner_p) enabled = true; } } diff --git a/update_manager/chromeos_policy_unittest.cc b/update_manager/chromeos_policy_unittest.cc index fb8c789f..25c91fa2 100644 --- a/update_manager/chromeos_policy_unittest.cc +++ b/update_manager/chromeos_policy_unittest.cc @@ -1365,7 +1365,7 @@ TEST_F(UmChromeOSPolicyTest, // Override specific device policy attributes. fake_state_.device_policy_provider()->var_au_p2p_enabled()->reset(nullptr); - fake_state_.device_policy_provider()->var_owner()->reset(nullptr); + fake_state_.device_policy_provider()->var_has_owner()->reset(new bool(false)); fake_state_.device_policy_provider()->var_http_downloads_enabled()->reset( new bool(false)); @@ -1610,7 +1610,7 @@ TEST_F(UmChromeOSPolicyTest, P2PEnabledAllowedByUpdater) { TEST_F(UmChromeOSPolicyTest, P2PEnabledAllowedDeviceEnterpriseEnrolled) { fake_state_.device_policy_provider()->var_au_p2p_enabled()->reset(nullptr); - fake_state_.device_policy_provider()->var_owner()->reset(nullptr); + fake_state_.device_policy_provider()->var_has_owner()->reset(new bool(false)); bool result; ExpectPolicyStatus(EvalStatus::kSucceeded, &Policy::P2PEnabled, &result); diff --git a/update_manager/device_policy_provider.h b/update_manager/device_policy_provider.h index 873282ef..b68fe964 100644 --- a/update_manager/device_policy_provider.h +++ b/update_manager/device_policy_provider.h @@ -66,9 +66,9 @@ class DevicePolicyProvider : public Provider { virtual Variable>* var_allowed_connection_types_for_update() = 0; - // Variable stating the name of the device owner. For enterprise enrolled - // devices, this will be an empty string. - virtual Variable* var_owner() = 0; + // Variable stating whether the device has an owner. For enterprise enrolled + // devices, this will be false as the device owner has an empty string. + virtual Variable* var_has_owner() = 0; virtual Variable* var_http_downloads_enabled() = 0; diff --git a/update_manager/evaluation_context_unittest.cc b/update_manager/evaluation_context_unittest.cc index eb42eb74..151b0b55 100644 --- a/update_manager/evaluation_context_unittest.cc +++ b/update_manager/evaluation_context_unittest.cc @@ -210,13 +210,11 @@ TEST_F(UmEvaluationContextTest, RunOnValueChangeOrTimeoutWithoutVariables) { fake_const_var_.reset(new string("Hello world!")); EXPECT_EQ(*eval_ctx_->GetValue(&fake_const_var_), "Hello world!"); - EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout( #if BASE_VER < 576279 - Bind(&base::DoNothing) + EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(Bind(&base::DoNothing))); #else - base::DoNothing() + EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(base::DoNothing())); #endif - )); } // Test that reevaluation occurs when an async variable it depends on changes. @@ -286,23 +284,19 @@ TEST_F(UmEvaluationContextTest, RunOnValueChangeOrTimeoutExpires) { EXPECT_TRUE(value); // Ensure that we cannot reschedule an evaluation. - EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout( #if BASE_VER < 576279 - Bind(&base::DoNothing) + EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(Bind(&base::DoNothing))); #else - base::DoNothing() + EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(base::DoNothing())); #endif - )); // Ensure that we can reschedule an evaluation after resetting expiration. eval_ctx_->ResetExpiration(); - EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout( #if BASE_VER < 576279 - Bind(&base::DoNothing) + EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(Bind(&base::DoNothing))); #else - base::DoNothing() + EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(base::DoNothing())); #endif - )); } // Test that we clear the events when destroying the EvaluationContext. @@ -348,13 +342,11 @@ TEST_F(UmEvaluationContextTest, ObjectDeletedWithPendingEventsTest) { fake_poll_var_.reset(new string("Polled value")); eval_ctx_->GetValue(&fake_async_var_); eval_ctx_->GetValue(&fake_poll_var_); - EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout( #if BASE_VER < 576279 - Bind(&base::DoNothing) + EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(Bind(&base::DoNothing))); #else - base::DoNothing() + EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(base::DoNothing())); #endif - )); // TearDown() checks for leaked observers on this async_variable, which means // that our object is still alive after removing its reference. } @@ -446,13 +438,11 @@ TEST_F(UmEvaluationContextTest, // The "false" from IsWallclockTimeGreaterThan means that's not that timestamp // yet, so this should schedule a callback for when that happens. - EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout( #if BASE_VER < 576279 - Bind(&base::DoNothing) + EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(Bind(&base::DoNothing))); #else - base::DoNothing() + EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(base::DoNothing())); #endif - )); } TEST_F(UmEvaluationContextTest, @@ -462,13 +452,11 @@ TEST_F(UmEvaluationContextTest, // The "false" from IsMonotonicTimeGreaterThan means that's not that timestamp // yet, so this should schedule a callback for when that happens. - EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout( #if BASE_VER < 576279 - Bind(&base::DoNothing) + EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(Bind(&base::DoNothing))); #else - base::DoNothing() + EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(base::DoNothing())); #endif - )); } TEST_F(UmEvaluationContextTest, @@ -481,13 +469,11 @@ TEST_F(UmEvaluationContextTest, fake_clock_.GetWallclockTime() - TimeDelta::FromSeconds(1))); // Callback should not be scheduled. - EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout( #if BASE_VER < 576279 - Bind(&base::DoNothing) + EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(Bind(&base::DoNothing))); #else - base::DoNothing() + EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(base::DoNothing())); #endif - )); } TEST_F(UmEvaluationContextTest, @@ -500,13 +486,11 @@ TEST_F(UmEvaluationContextTest, fake_clock_.GetMonotonicTime() - TimeDelta::FromSeconds(1))); // Callback should not be scheduled. - EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout( #if BASE_VER < 576279 - Bind(&base::DoNothing) + EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(Bind(&base::DoNothing))); #else - base::DoNothing() + EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(base::DoNothing())); #endif - )); } TEST_F(UmEvaluationContextTest, DumpContext) { diff --git a/update_manager/fake_device_policy_provider.h b/update_manager/fake_device_policy_provider.h index 7cd4d7b8..86bdef1e 100644 --- a/update_manager/fake_device_policy_provider.h +++ b/update_manager/fake_device_policy_provider.h @@ -68,7 +68,7 @@ class FakeDevicePolicyProvider : public DevicePolicyProvider { return &var_allowed_connection_types_for_update_; } - FakeVariable* var_owner() override { return &var_owner_; } + FakeVariable* var_has_owner() override { return &var_has_owner_; } FakeVariable* var_http_downloads_enabled() override { return &var_http_downloads_enabled_; @@ -110,7 +110,7 @@ class FakeDevicePolicyProvider : public DevicePolicyProvider { FakeVariable> var_allowed_connection_types_for_update_{ "allowed_connection_types_for_update", kVariableModePoll}; - FakeVariable var_owner_{"owner", kVariableModePoll}; + FakeVariable var_has_owner_{"owner", kVariableModePoll}; FakeVariable var_http_downloads_enabled_{"http_downloads_enabled", kVariableModePoll}; FakeVariable var_au_p2p_enabled_{"au_p2p_enabled", kVariableModePoll}; diff --git a/update_manager/real_device_policy_provider.cc b/update_manager/real_device_policy_provider.cc index 586ee3e2..781e2acb 100644 --- a/update_manager/real_device_policy_provider.cc +++ b/update_manager/real_device_policy_provider.cc @@ -104,11 +104,12 @@ void RealDevicePolicyProvider::RefreshDevicePolicyAndReschedule() { } template -void RealDevicePolicyProvider::UpdateVariable( - AsyncCopyVariable* var, bool (DevicePolicy::*getter_method)(T*) const) { +void RealDevicePolicyProvider::UpdateVariable(AsyncCopyVariable* var, + bool (DevicePolicy::*getter)(T*) + const) { T new_value; if (policy_provider_->device_policy_is_loaded() && - (policy_provider_->GetDevicePolicy().*getter_method)(&new_value)) { + (policy_provider_->GetDevicePolicy().*getter)(&new_value)) { var->SetValue(new_value); } else { var->UnsetValue(); @@ -118,10 +119,10 @@ void RealDevicePolicyProvider::UpdateVariable( template void RealDevicePolicyProvider::UpdateVariable( AsyncCopyVariable* var, - bool (RealDevicePolicyProvider::*getter_method)(T*) const) { + bool (RealDevicePolicyProvider::*getter)(T*) const) { T new_value; if (policy_provider_->device_policy_is_loaded() && - (this->*getter_method)(&new_value)) { + (this->*getter)(&new_value)) { var->SetValue(new_value); } else { var->UnsetValue(); @@ -198,6 +199,15 @@ bool RealDevicePolicyProvider::ConvertDisallowedTimeIntervals( return true; } +bool RealDevicePolicyProvider::ConvertHasOwner(bool* has_owner) const { + string owner; + if (!policy_provider_->GetDevicePolicy().GetOwner(&owner)) { + return false; + } + *has_owner = !owner.empty(); + return true; +} + void RealDevicePolicyProvider::RefreshDevicePolicy() { if (!policy_provider_->Reload()) { LOG(INFO) << "No device policies/settings present."; @@ -225,7 +235,7 @@ void RealDevicePolicyProvider::RefreshDevicePolicy() { UpdateVariable( &var_allowed_connection_types_for_update_, &RealDevicePolicyProvider::ConvertAllowedConnectionTypesForUpdate); - UpdateVariable(&var_owner_, &DevicePolicy::GetOwner); + UpdateVariable(&var_has_owner_, &RealDevicePolicyProvider::ConvertHasOwner); UpdateVariable(&var_http_downloads_enabled_, &DevicePolicy::GetHttpDownloadsEnabled); UpdateVariable(&var_au_p2p_enabled_, &DevicePolicy::GetAuP2PEnabled); diff --git a/update_manager/real_device_policy_provider.h b/update_manager/real_device_policy_provider.h index bda4cff1..9da052d8 100644 --- a/update_manager/real_device_policy_provider.h +++ b/update_manager/real_device_policy_provider.h @@ -34,7 +34,7 @@ namespace chromeos_update_manager { -// DevicePolicyProvider concrete implementation. +// |DevicePolicyProvider| concrete implementation. class RealDevicePolicyProvider : public DevicePolicyProvider { public: #if USE_DBUS @@ -89,7 +89,7 @@ class RealDevicePolicyProvider : public DevicePolicyProvider { return &var_allowed_connection_types_for_update_; } - Variable* var_owner() override { return &var_owner_; } + Variable* var_has_owner() override { return &var_has_owner_; } Variable* var_http_downloads_enabled() override { return &var_http_downloads_enabled_; @@ -113,12 +113,13 @@ class RealDevicePolicyProvider : public DevicePolicyProvider { FRIEND_TEST(UmRealDevicePolicyProviderTest, RefreshScheduledTest); FRIEND_TEST(UmRealDevicePolicyProviderTest, NonExistentDevicePolicyReloaded); FRIEND_TEST(UmRealDevicePolicyProviderTest, ValuesUpdated); + FRIEND_TEST(UmRealDevicePolicyProviderTest, HasOwnerConverted); - // A static handler for the PropertyChangedCompleted signal from the session + // A static handler for the |PropertyChangedCompleted| signal from the session // manager used as a callback. void OnPropertyChangedCompletedSignal(const std::string& success); - // Called when the signal in UpdateEngineLibcrosProxyResolvedInterface is + // Called when the signal in |UpdateEngineLibcrosProxyResolvedInterface| is // connected. void OnSignalConnected(const std::string& interface_name, const std::string& signal_name, @@ -134,36 +135,41 @@ class RealDevicePolicyProvider : public DevicePolicyProvider { // passed, which is a DevicePolicy getter method. template void UpdateVariable(AsyncCopyVariable* var, - bool (policy::DevicePolicy::*getter_method)(T*) const); + bool (policy::DevicePolicy::*getter)(T*) const); // Updates the async variable |var| based on the result value of the getter // method passed, which is a wrapper getter on this class. template void UpdateVariable(AsyncCopyVariable* var, - bool (RealDevicePolicyProvider::*getter_method)(T*) - const); + bool (RealDevicePolicyProvider::*getter)(T*) const); - // Wrapper for DevicePolicy::GetRollbackToTargetVersion() that converts the - // result to RollbackToTargetVersion. + // Wrapper for |DevicePolicy::GetRollbackToTargetVersion()| that converts the + // result to |RollbackToTargetVersion|. bool ConvertRollbackToTargetVersion( RollbackToTargetVersion* rollback_to_target_version) const; - // Wrapper for DevicePolicy::GetScatterFactorInSeconds() that converts the - // result to a base::TimeDelta. It returns the same value as - // GetScatterFactorInSeconds(). + // Wrapper for |DevicePolicy::GetScatterFactorInSeconds()| that converts the + // result to a |base::TimeDelta|. It returns the same value as + // |GetScatterFactorInSeconds()|. bool ConvertScatterFactor(base::TimeDelta* scatter_factor) const; - // Wrapper for DevicePolicy::GetAllowedConnectionTypesForUpdate() that - // converts the result to a set of ConnectionType elements instead of strings. + // Wrapper for |DevicePolicy::GetAllowedConnectionTypesForUpdate()| that + // converts the result to a set of |ConnectionType| elements instead of + // strings. bool ConvertAllowedConnectionTypesForUpdate( std::set* allowed_types) const; - // Wrapper for DevicePolicy::GetUpdateTimeRestrictions() that converts - // the DevicePolicy::WeeklyTimeInterval structs to WeeklyTimeInterval objects, - // which offer more functionality. + // Wrapper for |DevicePolicy::GetUpdateTimeRestrictions()| that converts + // the |DevicePolicy::WeeklyTimeInterval| structs to |WeeklyTimeInterval| + // objects, which offer more functionality. bool ConvertDisallowedTimeIntervals( WeeklyTimeIntervalVector* disallowed_intervals_out) const; + // Wrapper for |DevicePolicy::GetOwner()| that converts the result to a + // boolean of whether the device has an owner. (Enterprise enrolled + // devices do not have an owner). + bool ConvertHasOwner(bool* has_owner) const; + // Used for fetching information about the device policy. policy::PolicyProvider* policy_provider_; @@ -181,7 +187,7 @@ class RealDevicePolicyProvider : public DevicePolicyProvider { AsyncCopyVariable var_device_policy_is_loaded_{"policy_is_loaded", false}; - // Variables mapping the exposed methods from the policy::DevicePolicy. + // Variables mapping the exposed methods from the |policy::DevicePolicy|. AsyncCopyVariable var_release_channel_{"release_channel"}; AsyncCopyVariable var_release_channel_delegated_{ "release_channel_delegated"}; @@ -196,7 +202,7 @@ class RealDevicePolicyProvider : public DevicePolicyProvider { AsyncCopyVariable> var_allowed_connection_types_for_update_{ "allowed_connection_types_for_update"}; - AsyncCopyVariable var_owner_{"owner"}; + AsyncCopyVariable var_has_owner_{"owner"}; AsyncCopyVariable var_http_downloads_enabled_{"http_downloads_enabled"}; AsyncCopyVariable var_au_p2p_enabled_{"au_p2p_enabled"}; AsyncCopyVariable var_allow_kiosk_app_control_chrome_version_{ diff --git a/update_manager/real_device_policy_provider_unittest.cc b/update_manager/real_device_policy_provider_unittest.cc index 0d7b0d0d..8f2c377b 100644 --- a/update_manager/real_device_policy_provider_unittest.cc +++ b/update_manager/real_device_policy_provider_unittest.cc @@ -186,7 +186,7 @@ TEST_F(UmRealDevicePolicyProviderTest, NonExistentDevicePolicyEmptyVariables) { UmTestUtils::ExpectVariableNotSet(provider_->var_scatter_factor()); UmTestUtils::ExpectVariableNotSet( provider_->var_allowed_connection_types_for_update()); - UmTestUtils::ExpectVariableNotSet(provider_->var_owner()); + UmTestUtils::ExpectVariableNotSet(provider_->var_has_owner()); UmTestUtils::ExpectVariableNotSet(provider_->var_http_downloads_enabled()); UmTestUtils::ExpectVariableNotSet(provider_->var_au_p2p_enabled()); UmTestUtils::ExpectVariableNotSet( @@ -230,6 +230,26 @@ TEST_F(UmRealDevicePolicyProviderTest, ValuesUpdated) { string("myapp"), provider_->var_auto_launched_kiosk_app_id()); } +TEST_F(UmRealDevicePolicyProviderTest, HasOwnerConverted) { + SetUpExistentDevicePolicy(); + EXPECT_TRUE(provider_->Init()); + loop_.RunOnce(false); + Mock::VerifyAndClearExpectations(&mock_policy_provider_); + + EXPECT_CALL(mock_device_policy_, GetOwner(_)) + .Times(2) + .WillOnce(DoAll(SetArgPointee<0>(string("")), Return(true))) + .WillOnce(DoAll(SetArgPointee<0>(string("abc@test.org")), Return(true))); + + // Enterprise enrolled device. + provider_->RefreshDevicePolicy(); + UmTestUtils::ExpectVariableHasValue(false, provider_->var_has_owner()); + + // Has a device owner. + provider_->RefreshDevicePolicy(); + UmTestUtils::ExpectVariableHasValue(true, provider_->var_has_owner()); +} + TEST_F(UmRealDevicePolicyProviderTest, RollbackToTargetVersionConverted) { SetUpExistentDevicePolicy(); EXPECT_CALL(mock_device_policy_, GetRollbackToTargetVersion(_)) From fe052b260379127ac4d8cfb951e67071539935fe Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Thu, 20 Jun 2019 15:02:53 -0700 Subject: [PATCH 038/624] update_engine: Remove unused binder_service_brillo.* Chrome OS is not using this class. Just remove it. Also cleanup the use cases of USE_OMAHA and USE_BINDER. USE_OMAHA is only used in Chrome OS, and USE_BINDER is only used in Android, So it doesn't make sense to have USE_OMAHA macros inside USE_BINDER. BUG=chromium:977320 TEST=unittest Change-Id: I4e302273585b7e105b177da01fe6dab07ad41676 Reviewed-on: https://chromium-review.googlesource.com/1669973 Tested-by: Amin Hassani Commit-Ready: Amin Hassani Legacy-Commit-Queue: Commit Bot Reviewed-by: Sen Jiang --- BUILD.gn | 2 +- binder_service_brillo.cc | 247 --------------------------------------- binder_service_brillo.h | 114 ------------------ daemon.cc | 4 - daemon.h | 11 -- 5 files changed, 1 insertion(+), 377 deletions(-) delete mode 100644 binder_service_brillo.cc delete mode 100644 binder_service_brillo.h diff --git a/BUILD.gn b/BUILD.gn index 3e54eeff..2ee0b9e2 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -69,7 +69,7 @@ pkg_config("target_defaults") { "__CHROMEOS__", "_FILE_OFFSET_BITS=64", "_POSIX_C_SOURCE=199309L", - "USE_BINDER=${use.binder}", + "USE_BINDER=0", "USE_DBUS=${use.dbus}", "USE_FEC=0", "USE_HWID_OVERRIDE=${use.hwid_override}", diff --git a/binder_service_brillo.cc b/binder_service_brillo.cc deleted file mode 100644 index cc747639..00000000 --- a/binder_service_brillo.cc +++ /dev/null @@ -1,247 +0,0 @@ -// -// Copyright (C) 2015 The Android Open Source Project -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -#include "update_engine/binder_service_brillo.h" - -#include - -#include - -#include -#include - -#include "update_engine/update_status_utils.h" - -using android::sp; -using android::String16; -using android::String8; -using android::binder::Status; -using android::brillo::IUpdateEngineStatusCallback; -using android::brillo::ParcelableUpdateEngineStatus; -using brillo::ErrorPtr; -using std::string; -using update_engine::UpdateEngineStatus; - -namespace chromeos_update_engine { - -namespace { -string NormalString(const String16& in) { - return string{String8{in}.string()}; -} - -Status ToStatus(ErrorPtr* error) { - return Status::fromServiceSpecificError( - 1, String8{error->get()->GetMessage().c_str()}); -} -} // namespace - -template -Status BinderUpdateEngineBrilloService::CallCommonHandler( - bool (UpdateEngineService::*Handler)(ErrorPtr*, Parameters...), - Arguments... arguments) { - ErrorPtr error; - if (((common_.get())->*Handler)(&error, arguments...)) - return Status::ok(); - return ToStatus(&error); -} - -Status BinderUpdateEngineBrilloService::SetUpdateAttemptFlags(int flags) { - return CallCommonHandler(&UpdateEngineService::SetUpdateAttemptFlags, flags); -} - -Status BinderUpdateEngineBrilloService::AttemptUpdate( - const String16& app_version, - const String16& omaha_url, - int flags, - bool* out_result) { - return CallCommonHandler(&UpdateEngineService::AttemptUpdate, - NormalString(app_version), - NormalString(omaha_url), - flags, - out_result); -} - -Status BinderUpdateEngineBrilloService::AttemptRollback(bool powerwash) { - return CallCommonHandler(&UpdateEngineService::AttemptRollback, powerwash); -} - -Status BinderUpdateEngineBrilloService::CanRollback(bool* out_can_rollback) { - return CallCommonHandler(&UpdateEngineService::CanRollback, out_can_rollback); -} - -Status BinderUpdateEngineBrilloService::ResetStatus() { - return CallCommonHandler(&UpdateEngineService::ResetStatus); -} - -Status BinderUpdateEngineBrilloService::GetStatus( - ParcelableUpdateEngineStatus* status) { - UpdateEngineStatus update_engine_status; - auto ret = - CallCommonHandler(&UpdateEngineService::GetStatus, &update_engine_status); - - if (ret.isOk()) { - *status = ParcelableUpdateEngineStatus(update_engine_status); - } - - return ret; -} - -Status BinderUpdateEngineBrilloService::RebootIfNeeded() { - return CallCommonHandler(&UpdateEngineService::RebootIfNeeded); -} - -Status BinderUpdateEngineBrilloService::SetChannel( - const String16& target_channel, bool powerwash) { - return CallCommonHandler(&UpdateEngineService::SetChannel, - NormalString(target_channel), - powerwash); -} - -Status BinderUpdateEngineBrilloService::GetChannel(bool get_current_channel, - String16* out_channel) { - string channel_string; - auto ret = CallCommonHandler( - &UpdateEngineService::GetChannel, get_current_channel, &channel_string); - - *out_channel = String16(channel_string.c_str()); - return ret; -} - -Status BinderUpdateEngineBrilloService::SetCohortHint( - const String16& in_cohort_hint) { - return CallCommonHandler(&UpdateEngineService::SetCohortHint, - NormalString(in_cohort_hint)); -} - -Status BinderUpdateEngineBrilloService::GetCohortHint( - String16* out_cohort_hint) { - string cohort_hint; - auto ret = - CallCommonHandler(&UpdateEngineService::GetCohortHint, &cohort_hint); - - *out_cohort_hint = String16(cohort_hint.c_str()); - return ret; -} - -Status BinderUpdateEngineBrilloService::SetP2PUpdatePermission(bool enabled) { - return CallCommonHandler(&UpdateEngineService::SetP2PUpdatePermission, - enabled); -} - -Status BinderUpdateEngineBrilloService::GetP2PUpdatePermission( - bool* out_p2p_permission) { - return CallCommonHandler(&UpdateEngineService::GetP2PUpdatePermission, - out_p2p_permission); -} - -Status BinderUpdateEngineBrilloService::SetUpdateOverCellularPermission( - bool enabled) { - return CallCommonHandler( - &UpdateEngineService::SetUpdateOverCellularPermission, enabled); -} - -Status BinderUpdateEngineBrilloService::SetUpdateOverCellularTarget( - const String16& target_version, int64_t target_size) { - return CallCommonHandler(&UpdateEngineService::SetUpdateOverCellularTarget, - NormalString(target_version), - target_size); -} - -Status BinderUpdateEngineBrilloService::GetUpdateOverCellularPermission( - bool* out_cellular_permission) { - return CallCommonHandler( - &UpdateEngineService::GetUpdateOverCellularPermission, - out_cellular_permission); -} - -Status BinderUpdateEngineBrilloService::GetDurationSinceUpdate( - int64_t* out_duration) { - return CallCommonHandler(&UpdateEngineService::GetDurationSinceUpdate, - out_duration); -} - -Status BinderUpdateEngineBrilloService::GetPrevVersion( - String16* out_prev_version) { - string version_string; - auto ret = - CallCommonHandler(&UpdateEngineService::GetPrevVersion, &version_string); - - *out_prev_version = String16(version_string.c_str()); - return ret; -} - -Status BinderUpdateEngineBrilloService::GetRollbackPartition( - String16* out_rollback_partition) { - string partition_string; - auto ret = CallCommonHandler(&UpdateEngineService::GetRollbackPartition, - &partition_string); - - if (ret.isOk()) { - *out_rollback_partition = String16(partition_string.c_str()); - } - - return ret; -} - -Status BinderUpdateEngineBrilloService::RegisterStatusCallback( - const sp& callback) { - callbacks_.emplace_back(callback); - - auto binder_wrapper = android::BinderWrapper::Get(); - - binder_wrapper->RegisterForDeathNotifications( - IUpdateEngineStatusCallback::asBinder(callback), - base::Bind(&BinderUpdateEngineBrilloService::UnregisterStatusCallback, - base::Unretained(this), - base::Unretained(callback.get()))); - - return Status::ok(); -} - -Status BinderUpdateEngineBrilloService::GetLastAttemptError( - int* out_last_attempt_error) { - return CallCommonHandler(&UpdateEngineService::GetLastAttemptError, - out_last_attempt_error); -} - -Status BinderUpdateEngineBrilloService::GetEolStatus(int* out_eol_status) { - return CallCommonHandler(&UpdateEngineService::GetEolStatus, out_eol_status); -} - -void BinderUpdateEngineBrilloService::UnregisterStatusCallback( - IUpdateEngineStatusCallback* callback) { - auto it = callbacks_.begin(); - while (it != callbacks_.end() && it->get() != callback) - it++; - - if (it == callbacks_.end()) { - LOG(ERROR) << "Got death notification for unknown callback."; - return; - } - - LOG(INFO) << "Erasing orphan callback"; - callbacks_.erase(it); -} - -void BinderUpdateEngineBrilloService::SendStatusUpdate( - const UpdateEngineStatus& update_engine_status) { - ParcelableUpdateEngineStatus parcelable_status(update_engine_status); - for (auto& callback : callbacks_) { - callback->HandleStatusUpdate(parcelable_status); - } -} - -} // namespace chromeos_update_engine diff --git a/binder_service_brillo.h b/binder_service_brillo.h deleted file mode 100644 index d0d0dc92..00000000 --- a/binder_service_brillo.h +++ /dev/null @@ -1,114 +0,0 @@ -// -// Copyright (C) 2016 The Android Open Source Project -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -#ifndef UPDATE_ENGINE_BINDER_SERVICE_BRILLO_H_ -#define UPDATE_ENGINE_BINDER_SERVICE_BRILLO_H_ - -#include - -#include -#include -#include - -#include - -#include "update_engine/common_service.h" -#include "update_engine/parcelable_update_engine_status.h" -#include "update_engine/service_observer_interface.h" - -#include "android/brillo/BnUpdateEngine.h" -#include "android/brillo/IUpdateEngineStatusCallback.h" - -namespace chromeos_update_engine { - -class BinderUpdateEngineBrilloService : public android::brillo::BnUpdateEngine, - public ServiceObserverInterface { - public: - explicit BinderUpdateEngineBrilloService(SystemState* system_state) - : common_(new UpdateEngineService(system_state)) {} - virtual ~BinderUpdateEngineBrilloService() = default; - - const char* ServiceName() const { - return "android.brillo.UpdateEngineService"; - } - - // ServiceObserverInterface overrides. - void SendStatusUpdate( - const update_engine::UpdateEngineStatus& update_engine_status) override; - void SendPayloadApplicationComplete(ErrorCode error_code) override {} - - // android::brillo::BnUpdateEngine overrides. - android::binder::Status SetUpdateAttemptFlags(int flags) override; - android::binder::Status AttemptUpdate(const android::String16& app_version, - const android::String16& omaha_url, - int flags, - bool* out_result) override; - android::binder::Status AttemptRollback(bool powerwash) override; - android::binder::Status CanRollback(bool* out_can_rollback) override; - android::binder::Status ResetStatus() override; - android::binder::Status GetStatus( - android::brillo::ParcelableUpdateEngineStatus* status); - android::binder::Status RebootIfNeeded() override; - android::binder::Status SetChannel(const android::String16& target_channel, - bool powerwash) override; - android::binder::Status GetChannel(bool get_current_channel, - android::String16* out_channel) override; - android::binder::Status SetCohortHint( - const android::String16& cohort_hint) override; - android::binder::Status GetCohortHint( - android::String16* out_cohort_hint) override; - android::binder::Status SetP2PUpdatePermission(bool enabled) override; - android::binder::Status GetP2PUpdatePermission( - bool* out_p2p_permission) override; - android::binder::Status SetUpdateOverCellularPermission( - bool enabled) override; - android::binder::Status SetUpdateOverCellularTarget( - const android::String16& target_version, int64_t target_size) override; - android::binder::Status GetUpdateOverCellularPermission( - bool* out_cellular_permission) override; - android::binder::Status GetDurationSinceUpdate( - int64_t* out_duration) override; - android::binder::Status GetPrevVersion( - android::String16* out_prev_version) override; - android::binder::Status GetRollbackPartition( - android::String16* out_rollback_partition) override; - android::binder::Status RegisterStatusCallback( - const android::sp& callback) - override; - android::binder::Status GetLastAttemptError( - int* out_last_attempt_error) override; - android::binder::Status GetEolStatus(int* out_eol_status) override; - - private: - // Generic function for dispatching to the common service. - template - android::binder::Status CallCommonHandler( - bool (UpdateEngineService::*Handler)(brillo::ErrorPtr*, Parameters...), - Arguments... arguments); - - // To be used as a death notification handler only. - void UnregisterStatusCallback( - android::brillo::IUpdateEngineStatusCallback* callback); - - std::unique_ptr common_; - - std::vector> - callbacks_; -}; - -} // namespace chromeos_update_engine - -#endif // UPDATE_ENGINE_BINDER_SERVICE_BRILLO_H_ diff --git a/daemon.cc b/daemon.cc index d42344aa..f370564e 100644 --- a/daemon.cc +++ b/daemon.cc @@ -64,12 +64,8 @@ int UpdateEngineDaemon::OnInit() { #if USE_BINDER // Create the Binder Service. -#if USE_OMAHA - binder_service_ = new BinderUpdateEngineBrilloService{real_system_state}; -#else // !USE_OMAHA binder_service_ = new BinderUpdateEngineAndroidService{ daemon_state_android->service_delegate()}; -#endif // USE_OMAHA auto binder_wrapper = android::BinderWrapper::Get(); if (!binder_wrapper->RegisterService(binder_service_->ServiceName(), binder_service_)) { diff --git a/daemon.h b/daemon.h index c10bb284..3c896bce 100644 --- a/daemon.h +++ b/daemon.h @@ -26,11 +26,7 @@ #include #if USE_BINDER -#if USE_OMAHA -#include "update_engine/binder_service_brillo.h" -#else // !USE_OMAHA #include "update_engine/binder_service_android.h" -#endif // USE_OMAHA #endif // USE_BINDER #include "update_engine/common/subprocess.h" #include "update_engine/daemon_state_interface.h" @@ -65,14 +61,7 @@ class UpdateEngineDaemon : public brillo::Daemon { #if USE_BINDER brillo::BinderWatcher binder_watcher_; -#endif // USE_BINDER - -#if USE_BINDER -#if USE_OMAHA - android::sp binder_service_; -#else // !USE_OMAHA android::sp binder_service_; -#endif // USE_OMAHA #endif // USE_BINDER // The daemon state with all the required daemon classes for the configured From 473d3bc2b991aaa7367112472d2a3282eb3f9f77 Mon Sep 17 00:00:00 2001 From: Keigo Oka Date: Fri, 7 Jun 2019 17:17:05 +0900 Subject: [PATCH 039/624] update_engine: run gn lint on presubmit TEST=checked repo upload runs gnlint.py BUG=chromium:971584 Change-Id: Iab58c1b42aaca5c27421835b4923c880a16a7ba1 Reviewed-on: https://chromium-review.googlesource.com/1648086 Tested-by: Keigo Oka Commit-Ready: Keigo Oka Legacy-Commit-Queue: Commit Bot Reviewed-by: Amin Hassani --- PRESUBMIT.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/PRESUBMIT.cfg b/PRESUBMIT.cfg index f2c78315..7256e542 100644 --- a/PRESUBMIT.cfg +++ b/PRESUBMIT.cfg @@ -1,6 +1,6 @@ [Hook Scripts] hook0=../../../../chromite/bin/cros lint ${PRESUBMIT_FILES} -hook1=../../../platform2/common-mk/gyplint.py ${PRESUBMIT_FILES} +hook1=../../../platform2/common-mk/gnlint.py ${PRESUBMIT_FILES} [Hook Overrides] clang_format_check: true From edb65502fe728573c508e6ef04d90180b00e7ac8 Mon Sep 17 00:00:00 2001 From: Jae Hoon Kim Date: Fri, 14 Jun 2019 11:52:17 -0700 Subject: [PATCH 040/624] update_engine: Make the ChromeOS/AOSP Omaha Client transmit sessionid. As per Omaha's protocol specification, ChromeOS/AOSP needs to transmit the 'sessionid` attribute in the request. The format of the 'sessionid' attribute is sent as GUID version 4. The sessionid is kept throughout the entirety of the update flow. 1. When the (pings/download/updates) is done, the pings to Omaha will send empty sessionids. 2. If there is a schedule error/issue and a new update is scheduled, a new sessionid will be applied. 3. During errors/issues, the same sessionid will be used. 4. All new will start with a fresh sessionid. BUG=chromium:940515 TEST=cros_workon_make --board=octopus update_engine --test TEST=/usr/bin/update_engine_client --check_for_update # after bouncing update-engine + check /var/log/update_engine.log. 'sessionid' attribute will be in the omaha request. Change-Id: If4d29b630e3ab1b547606ef1c5fb06cc7a9cd61f Reviewed-on: https://chromium-review.googlesource.com/1658422 Tested-by: Jae Hoon Kim Commit-Ready: Jae Hoon Kim Legacy-Commit-Queue: Commit Bot Reviewed-by: Amin Hassani --- omaha_request_action.cc | 11 ++- omaha_request_action.h | 8 +- omaha_request_action_unittest.cc | 28 ++++-- omaha_request_builder_xml.cc | 3 +- omaha_request_builder_xml.h | 7 +- omaha_request_builder_xml_unittest.cc | 27 +++++- update_attempter.cc | 27 ++++-- update_attempter.h | 7 ++ update_attempter_unittest.cc | 120 +++++++++++++++++++++----- 9 files changed, 194 insertions(+), 44 deletions(-) diff --git a/omaha_request_action.cc b/omaha_request_action.cc index f24cd42e..40e52f08 100644 --- a/omaha_request_action.cc +++ b/omaha_request_action.cc @@ -111,6 +111,7 @@ constexpr char kValNoUpdate[] = "noupdate"; constexpr char kXGoogleUpdateInteractivity[] = "X-Goog-Update-Interactivity"; constexpr char kXGoogleUpdateAppId[] = "X-Goog-Update-AppId"; constexpr char kXGoogleUpdateUpdater[] = "X-Goog-Update-Updater"; +constexpr char kXGoogleUpdateSessionId[] = "X-Goog-Update-SessionId"; // updatecheck attributes (without the underscore prefix). constexpr char kAttrEol[] = "eol"; @@ -285,7 +286,8 @@ OmahaRequestAction::OmahaRequestAction( SystemState* system_state, OmahaEvent* event, std::unique_ptr http_fetcher, - bool ping_only) + bool ping_only, + const string& session_id) : system_state_(system_state), params_(system_state->request_params()), event_(event), @@ -293,7 +295,8 @@ OmahaRequestAction::OmahaRequestAction( policy_provider_(std::make_unique()), ping_only_(ping_only), ping_active_days_(0), - ping_roll_call_days_(0) { + ping_roll_call_days_(0), + session_id_(session_id) { policy_provider_->Reload(); } @@ -429,7 +432,8 @@ void OmahaRequestAction::PerformAction() { ping_active_days_, ping_roll_call_days_, GetInstallDate(system_state_), - system_state_->prefs()); + system_state_->prefs(), + session_id_); string request_post = omaha_request.GetRequest(); // Set X-Goog-Update headers. @@ -440,6 +444,7 @@ void OmahaRequestAction::PerformAction() { kXGoogleUpdateUpdater, base::StringPrintf( "%s-%s", constants::kOmahaUpdaterID, kOmahaUpdaterVersion)); + http_fetcher_->SetHeader(kXGoogleUpdateSessionId, session_id_); http_fetcher_->SetPostData( request_post.data(), request_post.size(), kHttpContentTypeTextXml); diff --git a/omaha_request_action.h b/omaha_request_action.h index f006d69e..8dffb5c0 100644 --- a/omaha_request_action.h +++ b/omaha_request_action.h @@ -104,7 +104,8 @@ class OmahaRequestAction : public Action, OmahaRequestAction(SystemState* system_state, OmahaEvent* event, std::unique_ptr http_fetcher, - bool ping_only); + bool ping_only, + const std::string& session_id); ~OmahaRequestAction() override; typedef ActionTraits::InputObjectType InputObjectType; typedef ActionTraits::OutputObjectType OutputObjectType; @@ -143,6 +144,9 @@ class OmahaRequestAction : public Action, GetInstallDateWhenOOBECompletedWithValidDate); FRIEND_TEST(OmahaRequestActionTest, GetInstallDateWhenOOBECompletedDateChanges); + friend class UpdateAttempterTest; + FRIEND_TEST(UpdateAttempterTest, SessionIdTestEnforceEmptyStrPingOmaha); + FRIEND_TEST(UpdateAttempterTest, SessionIdTestConsistencyInUpdateFlow); // Enumeration used in PersistInstallDate(). enum InstallDateProvisioningSource { @@ -307,6 +311,8 @@ class OmahaRequestAction : public Action, int ping_active_days_; int ping_roll_call_days_; + std::string session_id_; + DISALLOW_COPY_AND_ASSIGN(OmahaRequestAction); }; diff --git a/omaha_request_action_unittest.cc b/omaha_request_action_unittest.cc index 91de9d4e..e13d10e9 100644 --- a/omaha_request_action_unittest.cc +++ b/omaha_request_action_unittest.cc @@ -84,6 +84,7 @@ const char kCurrentVersion[] = "0.1.0.0"; const char kTestAppId[] = "test-app-id"; const char kTestAppId2[] = "test-app2-id"; const char kTestAppIdSkipUpdatecheck[] = "test-app-id-skip-updatecheck"; +const char kTestSessionId[] = "12341234-1234-1234-1234-1234123412341234"; // This is a helper struct to allow unit tests build an update response with the // values they care about. @@ -296,6 +297,8 @@ class OmahaRequestActionTestProcessorDelegate : public ActionProcessorDelegate { fetcher->GetHeader("X-Goog-Update-Interactivity")); EXPECT_EQ(kTestAppId, fetcher->GetHeader("X-Goog-Update-AppId")); EXPECT_NE("", fetcher->GetHeader("X-Goog-Update-Updater")); + EXPECT_EQ(kTestSessionId, + fetcher->GetHeader("X-Goog-Update-SessionId")); } post_data_ = fetcher->post_data(); } else if (action->Type() == @@ -327,6 +330,7 @@ struct TestUpdateCheckParams { metrics::CheckResult expected_check_result; metrics::CheckReaction expected_check_reaction; metrics::DownloadErrorCode expected_download_error_code; + string session_id; }; class OmahaRequestActionTest : public ::testing::Test { @@ -366,6 +370,7 @@ class OmahaRequestActionTest : public ::testing::Test { .expected_check_result = metrics::CheckResult::kUpdateAvailable, .expected_check_reaction = metrics::CheckReaction::kUpdating, .expected_download_error_code = metrics::DownloadErrorCode::kUnset, + .session_id = kTestSessionId, }; } @@ -439,8 +444,12 @@ bool OmahaRequestActionTest::TestUpdateCheck() { // are not using the default request_params_. EXPECT_EQ(&request_params_, fake_system_state_.request_params()); - auto omaha_request_action = std::make_unique( - &fake_system_state_, nullptr, std::move(fetcher), tuc_params_.ping_only); + auto omaha_request_action = + std::make_unique(&fake_system_state_, + nullptr, + std::move(fetcher), + tuc_params_.ping_only, + tuc_params_.session_id); auto mock_policy_provider = std::make_unique>(); @@ -510,7 +519,8 @@ void OmahaRequestActionTest::TestEvent(OmahaEvent* event, event, std::make_unique( http_response.data(), http_response.size(), nullptr), - false); + false, + ""); ActionProcessor processor; processor.set_delegate(&delegate_); processor.EnqueueAction(std::move(action)); @@ -1311,7 +1321,8 @@ TEST_F(OmahaRequestActionTest, NoOutputPipeTest) { nullptr, std::make_unique( http_response.data(), http_response.size(), nullptr), - false); + false, + ""); ActionProcessor processor; processor.set_delegate(&delegate_); processor.EnqueueAction(std::move(action)); @@ -1445,7 +1456,8 @@ TEST_F(OmahaRequestActionTest, TerminateTransferTest) { nullptr, std::make_unique( http_response.data(), http_response.size(), nullptr), - false); + false, + ""); TerminateEarlyTestProcessorDelegate delegate; ActionProcessor processor; processor.set_delegate(&delegate); @@ -1580,7 +1592,8 @@ TEST_F(OmahaRequestActionTest, IsEventTest) { nullptr, std::make_unique( http_response.data(), http_response.size(), nullptr), - false); + false, + ""); EXPECT_FALSE(update_check_action.IsEvent()); OmahaRequestAction event_action( @@ -1588,7 +1601,8 @@ TEST_F(OmahaRequestActionTest, IsEventTest) { new OmahaEvent(OmahaEvent::kTypeUpdateComplete), std::make_unique( http_response.data(), http_response.size(), nullptr), - false); + false, + ""); EXPECT_TRUE(event_action.IsEvent()); } diff --git a/omaha_request_builder_xml.cc b/omaha_request_builder_xml.cc index e335c40a..3e4a3359 100644 --- a/omaha_request_builder_xml.cc +++ b/omaha_request_builder_xml.cc @@ -337,10 +337,11 @@ string OmahaRequestBuilderXml::GetRequest() const { string request_xml = base::StringPrintf( "\n" - "\n%s%s\n", base::GenerateGUID().c_str() /* requestid */, + session_id_.c_str(), constants::kOmahaUpdaterID, kOmahaUpdaterVersion, params_->interactive() ? "ondemandupdate" : "scheduler", diff --git a/omaha_request_builder_xml.h b/omaha_request_builder_xml.h index c390b9ef..0ba44b88 100644 --- a/omaha_request_builder_xml.h +++ b/omaha_request_builder_xml.h @@ -121,7 +121,8 @@ class OmahaRequestBuilderXml : OmahaRequestBuilder { int ping_active_days, int ping_roll_call_days, int install_date_in_days, - PrefsInterface* prefs) + PrefsInterface* prefs, + const std::string& session_id) : event_(event), params_(params), ping_only_(ping_only), @@ -129,7 +130,8 @@ class OmahaRequestBuilderXml : OmahaRequestBuilder { ping_active_days_(ping_active_days), ping_roll_call_days_(ping_roll_call_days), install_date_in_days_(install_date_in_days), - prefs_(prefs) {} + prefs_(prefs), + session_id_(session_id) {} ~OmahaRequestBuilderXml() override = default; @@ -173,6 +175,7 @@ class OmahaRequestBuilderXml : OmahaRequestBuilder { int ping_roll_call_days_; int install_date_in_days_; PrefsInterface* prefs_; + std::string session_id_; DISALLOW_COPY_AND_ASSIGN(OmahaRequestBuilderXml); }; diff --git a/omaha_request_builder_xml_unittest.cc b/omaha_request_builder_xml_unittest.cc index 23abebbc..4375bed3 100644 --- a/omaha_request_builder_xml_unittest.cc +++ b/omaha_request_builder_xml_unittest.cc @@ -90,7 +90,8 @@ TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlRequestIdTest) { 0, 0, 0, - fake_system_state_.prefs()}; + fake_system_state_.prefs(), + ""}; const string request_xml = omaha_request.GetRequest(); const string key = "requestid"; const string request_id = @@ -100,4 +101,28 @@ TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlRequestIdTest) { EXPECT_TRUE(base::IsValidGUID(request_id)); } +TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlSessionIdTest) { + const string gen_session_id = base::GenerateGUID(); + OmahaEvent omaha_event; + OmahaRequestParams omaha_request_params{&fake_system_state_}; + OmahaRequestBuilderXml omaha_request{&omaha_event, + &omaha_request_params, + false, + false, + 0, + 0, + 0, + fake_system_state_.prefs(), + gen_session_id}; + const string request_xml = omaha_request.GetRequest(); + const string key = "sessionid"; + const string session_id = + FindAttributeKeyValueInXml(request_xml, key, kGuidSize); + // A valid |session_id| is either a GUID version 4 or empty string. + if (!session_id.empty()) { + EXPECT_TRUE(base::IsValidGUID(session_id)); + } + EXPECT_EQ(gen_session_id, session_id); +} + } // namespace chromeos_update_engine diff --git a/update_attempter.cc b/update_attempter.cc index fcafd56f..73785bfe 100644 --- a/update_attempter.cc +++ b/update_attempter.cc @@ -648,6 +648,10 @@ void UpdateAttempter::BuildUpdateActions(bool interactive) { CHECK(!processor_->IsRunning()); processor_->set_delegate(this); + // The session ID needs to be kept throughout the update flow. The value + // of the session ID will reset/update only when it is a new update flow. + session_id_ = base::GenerateGUID(); + // Actions: auto update_check_fetcher = std::make_unique( GetProxyResolver(), system_state_->hardware()); @@ -656,8 +660,12 @@ void UpdateAttempter::BuildUpdateActions(bool interactive) { // See comment in libcurl_http_fetcher.cc. update_check_fetcher->set_no_network_max_retries(interactive ? 1 : 3); update_check_fetcher->set_is_update_check(true); - auto update_check_action = std::make_unique( - system_state_, nullptr, std::move(update_check_fetcher), false); + auto update_check_action = + std::make_unique(system_state_, + nullptr, + std::move(update_check_fetcher), + false, + session_id_); auto response_handler_action = std::make_unique(system_state_); auto update_boot_flags_action = @@ -667,7 +675,8 @@ void UpdateAttempter::BuildUpdateActions(bool interactive) { new OmahaEvent(OmahaEvent::kTypeUpdateDownloadStarted), std::make_unique(GetProxyResolver(), system_state_->hardware()), - false); + false, + session_id_); LibcurlHttpFetcher* download_fetcher = new LibcurlHttpFetcher(GetProxyResolver(), system_state_->hardware()); @@ -688,7 +697,8 @@ void UpdateAttempter::BuildUpdateActions(bool interactive) { new OmahaEvent(OmahaEvent::kTypeUpdateDownloadFinished), std::make_unique(GetProxyResolver(), system_state_->hardware()), - false); + false, + session_id_); auto filesystem_verifier_action = std::make_unique(); auto update_complete_action = std::make_unique( @@ -696,7 +706,8 @@ void UpdateAttempter::BuildUpdateActions(bool interactive) { new OmahaEvent(OmahaEvent::kTypeUpdateComplete), std::make_unique(GetProxyResolver(), system_state_->hardware()), - false); + false, + session_id_); auto postinstall_runner_action = std::make_unique( system_state_->boot_control(), system_state_->hardware()); @@ -1450,7 +1461,8 @@ bool UpdateAttempter::ScheduleErrorEventAction() { error_event_.release(), // Pass ownership. std::make_unique(GetProxyResolver(), system_state_->hardware()), - false); + false, + session_id_); processor_->EnqueueAction(std::move(error_event_action)); SetStatusAndNotify(UpdateStatus::REPORTING_ERROR_EVENT); processor_->StartProcessing(); @@ -1493,7 +1505,8 @@ void UpdateAttempter::PingOmaha() { nullptr, std::make_unique(GetProxyResolver(), system_state_->hardware()), - true); + true, + "" /* session_id */); processor_->set_delegate(nullptr); processor_->EnqueueAction(std::move(ping_action)); // Call StartProcessing() synchronously here to avoid any race conditions diff --git a/update_attempter.h b/update_attempter.h index 82b81cee..59c96865 100644 --- a/update_attempter.h +++ b/update_attempter.h @@ -26,6 +26,7 @@ #include #include +#include #include #include // for FRIEND_TEST @@ -268,6 +269,9 @@ class UpdateAttempter : public ActionProcessorDelegate, FRIEND_TEST(UpdateAttempterTest, RollbackMetricsRollbackSuccess); FRIEND_TEST(UpdateAttempterTest, ScheduleErrorEventActionNoEventTest); FRIEND_TEST(UpdateAttempterTest, ScheduleErrorEventActionTest); + FRIEND_TEST(UpdateAttempterTest, SessionIdTestOnUpdateCheck); + FRIEND_TEST(UpdateAttempterTest, SessionIdTestEnforceEmptyStrPingOmaha); + FRIEND_TEST(UpdateAttempterTest, SessionIdTestOnOmahaRequestActions); FRIEND_TEST(UpdateAttempterTest, SetRollbackHappenedNotRollback); FRIEND_TEST(UpdateAttempterTest, SetRollbackHappenedRollback); FRIEND_TEST(UpdateAttempterTest, TargetVersionPrefixSetAndReset); @@ -523,6 +527,9 @@ class UpdateAttempter : public ActionProcessorDelegate, base::TimeDelta staging_wait_time_; chromeos_update_manager::StagingSchedule staging_schedule_; + // This is the session ID used to track update flow to Omaha. + std::string session_id_; + DISALLOW_COPY_AND_ASSIGN(UpdateAttempter); }; diff --git a/update_attempter_unittest.cc b/update_attempter_unittest.cc index e246e1bc..fb33011a 100644 --- a/update_attempter_unittest.cc +++ b/update_attempter_unittest.cc @@ -19,6 +19,7 @@ #include #include +#include #include #include @@ -59,11 +60,13 @@ using chromeos_update_manager::UpdateCheckParams; using policy::DevicePolicy; using std::string; using std::unique_ptr; +using std::unordered_set; using std::vector; using testing::_; using testing::DoAll; using testing::Field; using testing::InSequence; +using testing::Invoke; using testing::Ne; using testing::NiceMock; using testing::Pointee; @@ -109,13 +112,13 @@ class UpdateAttempterUnderTest : public UpdateAttempter { } return true; } - void EnableScheduleUpdates() { do_schedule_updates_ = true; } + void DisableScheduleUpdates() { do_schedule_updates_ = false; } - // Indicates whether ScheduleUpdates() was called. + // Indicates whether |ScheduleUpdates()| was called. bool schedule_updates_called() const { return schedule_updates_called_; } - // Need to expose forced_omaha_url_ so we can test it. + // Need to expose |forced_omaha_url_| so we can test it. const string& forced_omaha_url() const { return forced_omaha_url_; } private: @@ -144,6 +147,7 @@ class UpdateAttempterTest : public ::testing::Test { void SetUp() override { EXPECT_NE(nullptr, attempter_.system_state_); + EXPECT_NE(nullptr, attempter_.system_state_->update_manager()); EXPECT_EQ(0, attempter_.http_response_code_); EXPECT_EQ(UpdateStatus::IDLE, attempter_.status_); EXPECT_EQ(0.0, attempter_.download_progress_); @@ -154,7 +158,7 @@ class UpdateAttempterTest : public ::testing::Test { attempter_.processor_.reset(processor_); // Transfers ownership. prefs_ = fake_system_state_.mock_prefs(); - // Set up store/load semantics of P2P properties via the mock PayloadState. + // Setup store/load semantics of P2P properties via the mock |PayloadState|. actual_using_p2p_for_downloading_ = false; EXPECT_CALL(*fake_system_state_.mock_payload_state(), SetUsingP2PForDownloading(_)) @@ -188,6 +192,9 @@ class UpdateAttempterTest : public ::testing::Test { void P2PEnabledInteractiveStart(); void P2PEnabledStartingFailsStart(); void P2PEnabledHousekeepingFailsStart(); + void SessionIdTestChange(); + void SessionIdTestEnforceEmptyStrPingOmaha(); + void SessionIdTestConsistencyInUpdateFlow(); void UpdateToQuickFixBuildStart(bool set_token); void ResetRollbackHappenedStart(bool is_consumer, bool is_policy_available, @@ -214,7 +221,8 @@ class UpdateAttempterTest : public ::testing::Test { MockDlcService mock_dlcservice_; NiceMock* processor_; - NiceMock* prefs_; // Shortcut to fake_system_state_->mock_prefs(). + NiceMock* + prefs_; // Shortcut to |fake_system_state_->mock_prefs()|. NiceMock mock_connection_manager; bool actual_using_p2p_for_downloading_; @@ -228,6 +236,75 @@ void UpdateAttempterTest::ScheduleQuitMainLoop() { base::Unretained(&loop_))); } +void UpdateAttempterTest::SessionIdTestChange() { + EXPECT_NE(UpdateStatus::UPDATED_NEED_REBOOT, attempter_.status()); + const auto old_session_id = attempter_.session_id_; + attempter_.Update("", "", "", "", false, false, 0, false, false); + EXPECT_NE(old_session_id, attempter_.session_id_); + ScheduleQuitMainLoop(); +} + +TEST_F(UpdateAttempterTest, SessionIdTestChange) { + loop_.PostTask(FROM_HERE, + base::Bind(&UpdateAttempterTest::SessionIdTestChange, + base::Unretained(this))); + loop_.Run(); +} + +void UpdateAttempterTest::SessionIdTestEnforceEmptyStrPingOmaha() { + // The |session_id_| should not be changed and should remain as an empty + // string when |status_| is |UPDATED_NEED_REBOOT| (only for consistency) + // and |PingOmaha()| is called. + attempter_.DisableScheduleUpdates(); + attempter_.status_ = UpdateStatus::UPDATED_NEED_REBOOT; + const auto old_session_id = attempter_.session_id_; + auto CheckIfEmptySessionId = [](AbstractAction* aa) { + if (aa->Type() == OmahaRequestAction::StaticType()) { + EXPECT_TRUE(static_cast(aa)->session_id_.empty()); + } + }; + EXPECT_CALL(*processor_, EnqueueAction(Pointee(_))) + .WillRepeatedly(Invoke(CheckIfEmptySessionId)); + EXPECT_CALL(*processor_, StartProcessing()); + attempter_.PingOmaha(); + EXPECT_EQ(old_session_id, attempter_.session_id_); + EXPECT_EQ(UpdateStatus::UPDATED_NEED_REBOOT, attempter_.status_); + ScheduleQuitMainLoop(); +} + +TEST_F(UpdateAttempterTest, SessionIdTestEnforceEmptyStrPingOmaha) { + loop_.PostTask( + FROM_HERE, + base::Bind(&UpdateAttempterTest::SessionIdTestEnforceEmptyStrPingOmaha, + base::Unretained(this))); + loop_.Run(); +} + +void UpdateAttempterTest::SessionIdTestConsistencyInUpdateFlow() { + // All session IDs passed into |OmahaRequestActions| should be enforced to + // have the same value in |BuildUpdateActions()|. + unordered_set session_ids; + // Gather all the session IDs being passed to |OmahaRequestActions|. + auto CheckSessionId = [&session_ids](AbstractAction* aa) { + if (aa->Type() == OmahaRequestAction::StaticType()) + session_ids.insert(static_cast(aa)->session_id_); + }; + EXPECT_CALL(*processor_, EnqueueAction(Pointee(_))) + .WillRepeatedly(Invoke(CheckSessionId)); + attempter_.BuildUpdateActions(false); + // Validate that all the session IDs are the same. + EXPECT_EQ(1, session_ids.size()); + ScheduleQuitMainLoop(); +} + +TEST_F(UpdateAttempterTest, SessionIdTestConsistencyInUpdateFlow) { + loop_.PostTask( + FROM_HERE, + base::Bind(&UpdateAttempterTest::SessionIdTestConsistencyInUpdateFlow, + base::Unretained(this))); + loop_.Run(); +} + TEST_F(UpdateAttempterTest, ActionCompletedDownloadTest) { unique_ptr fetcher(new MockHttpFetcher("", 0, nullptr)); fetcher->FailTransfer(503); // Sets the HTTP response code. @@ -269,7 +346,7 @@ TEST_F(UpdateAttempterTest, DownloadProgressAccumulationTest) { EXPECT_EQ(0.0, attempter_.download_progress_); // This is set via inspecting the InstallPlan payloads when the - // OmahaResponseAction is completed + // |OmahaResponseAction| is completed. attempter_.new_payload_size_ = bytes_total; NiceMock observer; EXPECT_CALL(observer, @@ -293,14 +370,14 @@ TEST_F(UpdateAttempterTest, DownloadProgressAccumulationTest) { } TEST_F(UpdateAttempterTest, ChangeToDownloadingOnReceivedBytesTest) { - // The transition into UpdateStatus::DOWNLOADING happens when the + // The transition into |UpdateStatus::DOWNLOADING| happens when the // first bytes are received. uint64_t bytes_progressed = 1024 * 1024; // 1MB uint64_t bytes_received = 2 * 1024 * 1024; // 2MB uint64_t bytes_total = 20 * 1024 * 1024; // 300MB attempter_.status_ = UpdateStatus::CHECKING_FOR_UPDATE; // This is set via inspecting the InstallPlan payloads when the - // OmahaResponseAction is completed + // |OmahaResponseAction| is completed. attempter_.new_payload_size_ = bytes_total; EXPECT_EQ(0.0, attempter_.download_progress_); NiceMock observer; @@ -315,8 +392,7 @@ TEST_F(UpdateAttempterTest, ChangeToDownloadingOnReceivedBytesTest) { TEST_F(UpdateAttempterTest, BroadcastCompleteDownloadTest) { // There is a special case to ensure that at 100% downloaded, - // download_progress_ is updated and that value broadcast. This test confirms - // that. + // |download_progress_| is updated and broadcastest. uint64_t bytes_progressed = 0; // ignored uint64_t bytes_received = 5 * 1024 * 1024; // ignored uint64_t bytes_total = 5 * 1024 * 1024; // 300MB @@ -338,7 +414,7 @@ TEST_F(UpdateAttempterTest, ActionCompletedOmahaRequestTest) { unique_ptr fetcher(new MockHttpFetcher("", 0, nullptr)); fetcher->FailTransfer(500); // Sets the HTTP response code. OmahaRequestAction action( - &fake_system_state_, nullptr, std::move(fetcher), false); + &fake_system_state_, nullptr, std::move(fetcher), false, ""); ObjectCollectorAction collector_action; BondActions(&action, &collector_action); OmahaResponse response; @@ -368,7 +444,7 @@ TEST_F(UpdateAttempterTest, GetErrorCodeForActionTest) { FakeSystemState fake_system_state; OmahaRequestAction omaha_request_action( - &fake_system_state, nullptr, nullptr, false); + &fake_system_state, nullptr, nullptr, false, ""); EXPECT_EQ(ErrorCode::kOmahaRequestError, GetErrorCodeForAction(&omaha_request_action, ErrorCode::kError)); OmahaResponseHandlerAction omaha_response_handler_action(&fake_system_state_); @@ -488,8 +564,8 @@ const StagingSchedule kValidStagingSchedule = { void UpdateAttempterTest::UpdateTestStart() { attempter_.set_http_response_code(200); - // Expect that the device policy is loaded by the UpdateAttempter at some - // point by calling RefreshDevicePolicy. + // Expect that the device policy is loaded by the |UpdateAttempter| at some + // point by calling |RefreshDevicePolicy()|. auto device_policy = std::make_unique(); EXPECT_CALL(*device_policy, LoadPolicy()) .Times(testing::AtLeast(1)) @@ -628,7 +704,7 @@ void UpdateAttempterTest::PingOmahaTestStart() { TEST_F(UpdateAttempterTest, PingOmahaTest) { EXPECT_FALSE(attempter_.waiting_for_scheduled_check_); EXPECT_FALSE(attempter_.schedule_updates_called()); - // Disable scheduling of subsequnet checks; we're using the DefaultPolicy in + // Disable scheduling of subsequnet checks; we're using the |DefaultPolicy| in // testing, which is more permissive than we want to handle here. attempter_.DisableScheduleUpdates(); loop_.PostTask(FROM_HERE, @@ -702,7 +778,7 @@ TEST_F(UpdateAttempterTest, P2PNotEnabled) { void UpdateAttempterTest::P2PNotEnabledStart() { // If P2P is not enabled, check that we do not attempt housekeeping - // and do not convey that p2p is to be used. + // and do not convey that P2P is to be used. MockP2PManager mock_p2p_manager; fake_system_state_.set_p2p_manager(&mock_p2p_manager); mock_p2p_manager.fake().SetP2PEnabled(false); @@ -721,8 +797,8 @@ TEST_F(UpdateAttempterTest, P2PEnabledStartingFails) { } void UpdateAttempterTest::P2PEnabledStartingFailsStart() { - // If p2p is enabled, but starting it fails ensure we don't do - // any housekeeping and do not convey that p2p should be used. + // If P2P is enabled, but starting it fails ensure we don't do + // any housekeeping and do not convey that P2P should be used. MockP2PManager mock_p2p_manager; fake_system_state_.set_p2p_manager(&mock_p2p_manager); mock_p2p_manager.fake().SetP2PEnabled(true); @@ -744,8 +820,8 @@ TEST_F(UpdateAttempterTest, P2PEnabledHousekeepingFails) { } void UpdateAttempterTest::P2PEnabledHousekeepingFailsStart() { - // If p2p is enabled, starting it works but housekeeping fails, ensure - // we do not convey p2p is to be used. + // If P2P is enabled, starting it works but housekeeping fails, ensure + // we do not convey P2P is to be used. MockP2PManager mock_p2p_manager; fake_system_state_.set_p2p_manager(&mock_p2p_manager); mock_p2p_manager.fake().SetP2PEnabled(true); @@ -769,7 +845,7 @@ void UpdateAttempterTest::P2PEnabledStart() { MockP2PManager mock_p2p_manager; fake_system_state_.set_p2p_manager(&mock_p2p_manager); // If P2P is enabled and starting it works, check that we performed - // housekeeping and that we convey p2p should be used. + // housekeeping and that we convey P2P should be used. mock_p2p_manager.fake().SetP2PEnabled(true); mock_p2p_manager.fake().SetEnsureP2PRunningResult(true); mock_p2p_manager.fake().SetPerformHousekeepingResult(true); @@ -792,7 +868,7 @@ void UpdateAttempterTest::P2PEnabledInteractiveStart() { fake_system_state_.set_p2p_manager(&mock_p2p_manager); // For an interactive check, if P2P is enabled and starting it // works, check that we performed housekeeping and that we convey - // p2p should be used for sharing but NOT for downloading. + // P2P should be used for sharing but NOT for downloading. mock_p2p_manager.fake().SetP2PEnabled(true); mock_p2p_manager.fake().SetEnsureP2PRunningResult(true); mock_p2p_manager.fake().SetPerformHousekeepingResult(true); From ecb60d31efccf3ac4b02c8750e79b03fd5c10ff9 Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Mon, 17 Jun 2019 18:09:10 -0700 Subject: [PATCH 041/624] update_engine: Reset forced update flag before sending Omaha ping Lack of resetting the forced update flag causes InteractiveUpdatePolicyImpl to always return true for (non-)interactive updates and this can cause the update check to fall into an infinite loop because the policy will never reach to NextUpdateCheckTimePolicyImpl: CheckForUpdate -> ScheduleUpdates -> OnUpdateScheduled -> Update -> PingOmaha -> ScheduleUpdates BUG=chromium:960828 TEST=unitest TEST=updated device using this flow: - update_engine_client --interactive=false --update - wait till the state changed to wait_reboot - update_engine_client --interactive=false --update and no infinite loop was entered. Change-Id: Ie8f8308d8af79f56cd71324bcb8679897f6823e7 Reviewed-on: https://chromium-review.googlesource.com/1666252 Tested-by: Amin Hassani Commit-Ready: Amin Hassani Legacy-Commit-Queue: Commit Bot Reviewed-by: Sen Jiang --- update_attempter.cc | 28 +++++++++++++++++----------- update_attempter.h | 3 +++ 2 files changed, 20 insertions(+), 11 deletions(-) diff --git a/update_attempter.cc b/update_attempter.cc index 73785bfe..3f778868 100644 --- a/update_attempter.cc +++ b/update_attempter.cc @@ -835,7 +835,7 @@ bool UpdateAttempter::CheckForUpdate(const string& app_version, if (interactive && status_ != UpdateStatus::IDLE) { // An update check is either in-progress, or an update has completed and the // system is in UPDATED_NEED_REBOOT. Either way, don't do an interactive - // update at this time + // update at this time. LOG(INFO) << "Refusing to do an interactive update with an update already " "in progress"; return false; @@ -1031,17 +1031,12 @@ void UpdateAttempter::ProcessingDone(const ActionProcessor* processor, // Reset cpu shares back to normal. cpu_limiter_.StopLimiter(); - // reset the state that's only valid for a single update pass - current_update_attempt_flags_ = UpdateAttemptFlags::kNone; - - if (forced_update_pending_callback_.get()) - // Clear prior interactive requests once the processor is done. - forced_update_pending_callback_->Run(false, false); + ResetInteractivityFlags(); if (status_ == UpdateStatus::REPORTING_ERROR_EVENT) { LOG(INFO) << "Error event sent."; - // Inform scheduler of new status; + // Inform scheduler of new status. SetStatusAndNotify(UpdateStatus::IDLE); ScheduleUpdates(); @@ -1138,9 +1133,9 @@ void UpdateAttempter::ProcessingStopped(const ActionProcessor* processor) { // Reset cpu shares back to normal. cpu_limiter_.StopLimiter(); download_progress_ = 0.0; - if (forced_update_pending_callback_.get()) - // Clear prior interactive requests once the processor is done. - forced_update_pending_callback_->Run(false, false); + + ResetInteractivityFlags(); + SetStatusAndNotify(UpdateStatus::IDLE); ScheduleUpdates(); error_event_.reset(nullptr); @@ -1294,6 +1289,15 @@ void UpdateAttempter::ProgressUpdate(double progress) { } } +void UpdateAttempter::ResetInteractivityFlags() { + // Reset the state that's only valid for a single update pass. + current_update_attempt_flags_ = UpdateAttemptFlags::kNone; + + if (forced_update_pending_callback_.get()) + // Clear prior interactive requests once the processor is done. + forced_update_pending_callback_->Run(false, false); +} + bool UpdateAttempter::ResetStatus() { LOG(INFO) << "Attempting to reset state from " << UpdateStatusToString(status_) << " to UpdateStatus::IDLE"; @@ -1500,6 +1504,8 @@ void UpdateAttempter::MarkDeltaUpdateFailure() { void UpdateAttempter::PingOmaha() { if (!processor_->IsRunning()) { + ResetInteractivityFlags(); + auto ping_action = std::make_unique( system_state_, nullptr, diff --git a/update_attempter.h b/update_attempter.h index 59c96865..c429076e 100644 --- a/update_attempter.h +++ b/update_attempter.h @@ -417,6 +417,9 @@ class UpdateAttempter : public ActionProcessorDelegate, // will only be reported for enterprise enrolled devices. void ReportTimeToUpdateAppliedMetric(); + // Resets interactivity and forced update flags. + void ResetInteractivityFlags(); + // Last status notification timestamp used for throttling. Use monotonic // TimeTicks to ensure that notifications are sent even if the system clock is // set back in the middle of an update. From 4b6d1261f989dfad74ff1c10bc73fd28da074171 Mon Sep 17 00:00:00 2001 From: Jae Hoon Kim Date: Thu, 20 Jun 2019 10:41:18 -0700 Subject: [PATCH 042/624] update_engine: Validate payload properties. Add in unit tests to validate payload properties for JSON and KeyValue string generation. The JSON properties string should have valid corresponding "sha256_hex" based on the hash of the entire payload file. The KeyValue properties string should have valid corresponding payload and metadata hashes based on payload file size and metadata size respectively. BUG=None TEST=unittests Change-Id: I0399f420d8ee3dc1e4a103841046b91a0319cdb3 Reviewed-on: https://chromium-review.googlesource.com/1669812 Tested-by: Jae Hoon Kim Commit-Ready: Jae Hoon Kim Legacy-Commit-Queue: Commit Bot Reviewed-by: Amin Hassani --- Android.bp | 1 + BUILD.gn | 1 + .../payload_properties_unittest.cc | 144 ++++++++++++++++++ 3 files changed, 146 insertions(+) create mode 100644 payload_generator/payload_properties_unittest.cc diff --git a/Android.bp b/Android.bp index 0c3cac14..db33a5e7 100644 --- a/Android.bp +++ b/Android.bp @@ -695,6 +695,7 @@ cc_test { "payload_generator/payload_file_unittest.cc", "payload_generator/payload_generation_config_android_unittest.cc", "payload_generator/payload_generation_config_unittest.cc", + "payload_generator/payload_properties_unittest.cc", "payload_generator/payload_signer_unittest.cc", "payload_generator/squashfs_filesystem_unittest.cc", "payload_generator/tarjan_unittest.cc", diff --git a/BUILD.gn b/BUILD.gn index 2ee0b9e2..f2bddc9e 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -498,6 +498,7 @@ if (use.test) { "payload_generator/mapfile_filesystem_unittest.cc", "payload_generator/payload_file_unittest.cc", "payload_generator/payload_generation_config_unittest.cc", + "payload_generator/payload_properties_unittest.cc", "payload_generator/payload_signer_unittest.cc", "payload_generator/squashfs_filesystem_unittest.cc", "payload_generator/tarjan_unittest.cc", diff --git a/payload_generator/payload_properties_unittest.cc b/payload_generator/payload_properties_unittest.cc new file mode 100644 index 00000000..db3902ce --- /dev/null +++ b/payload_generator/payload_properties_unittest.cc @@ -0,0 +1,144 @@ +// +// Copyright (C) 2019 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include "update_engine/payload_generator/payload_properties.h" + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include + +#include "update_engine/common/hash_calculator.h" +#include "update_engine/common/test_utils.h" +#include "update_engine/common/utils.h" +#include "update_engine/payload_consumer/install_plan.h" +#include "update_engine/payload_generator/delta_diff_generator.h" +#include "update_engine/payload_generator/delta_diff_utils.h" +#include "update_engine/payload_generator/full_update_generator.h" +#include "update_engine/payload_generator/operations_generator.h" +#include "update_engine/payload_generator/payload_file.h" +#include "update_engine/payload_generator/payload_generation_config.h" + +using chromeos_update_engine::test_utils::ScopedTempFile; +using std::string; +using std::unique_ptr; +using std::vector; + +namespace chromeos_update_engine { + +// TODO(kimjae): current implementation is very specific to a static way of +// producing a deterministic test. It would definitely be beneficial to +// extend the |PayloadPropertiesTest::SetUp()| into a generic helper or +// seperate class that can handle creation of different |PayloadFile|s. +class PayloadPropertiesTest : public ::testing::Test { + protected: + void SetUp() override { + PayloadGenerationConfig config; + config.version.major = kBrilloMajorPayloadVersion; + config.version.minor = kSourceMinorPayloadVersion; + config.source.image_info.set_version("123.0.0"); + config.target.image_info.set_version("456.7.8"); + PayloadFile payload; + EXPECT_TRUE(payload.Init(config)); + + const string kTempFileTemplate = "temp_data.XXXXXX"; + int data_file_fd; + string temp_file_path; + EXPECT_TRUE( + utils::MakeTempFile(kTempFileTemplate, &temp_file_path, &data_file_fd)); + ScopedPathUnlinker temp_file_unlinker(temp_file_path); + EXPECT_LE(0, data_file_fd); + + const auto SetupPartitionConfig = + [](PartitionConfig* config, const string& path, size_t size) { + config->path = path; + config->size = size; + }; + const auto WriteZerosToFile = [](const char path[], size_t size) { + string zeros(size, '\0'); + EXPECT_TRUE(utils::WriteFile(path, zeros.c_str(), zeros.size())); + }; + ScopedTempFile old_part_file; + ScopedTempFile new_part_file; + PartitionConfig old_part(kPartitionNameRoot); + PartitionConfig new_part(kPartitionNameRoot); + SetupPartitionConfig(&old_part, old_part_file.path(), 0); + SetupPartitionConfig(&new_part, new_part_file.path(), 10000); + WriteZerosToFile(old_part_file.path().c_str(), old_part.size); + WriteZerosToFile(new_part_file.path().c_str(), new_part.size); + + // Select payload generation strategy based on the config. + unique_ptr strategy(new FullUpdateGenerator()); + + vector aops; + off_t data_file_size = 0; + BlobFileWriter blob_file_writer(data_file_fd, &data_file_size); + // Generate the operations using the strategy we selected above. + EXPECT_TRUE(strategy->GenerateOperations( + config, old_part, new_part, &blob_file_writer, &aops)); + + payload.AddPartition(old_part, new_part, aops); + + uint64_t metadata_size; + EXPECT_TRUE(payload.WritePayload( + payload_file.path(), temp_file_path, "", &metadata_size)); + } + + ScopedTempFile payload_file; +}; + +// Validate the hash of file exists within the output. +TEST_F(PayloadPropertiesTest, GetPropertiesAsJsonTestHash) { + constexpr char kJsonProperties[] = + "{" + R"("is_delta":true,)" + R"("metadata_signature":"",)" + R"("metadata_size":187,)" + R"("sha256_hex":"Rtrj9v3xXhrAi1741HAojtGxAQEOZ7mDyhzskIF4PJc=",)" + R"("size":233,)" + R"("source_version":"123.0.0",)" + R"("target_version":"456.7.8",)" + R"("version":2)" + "}"; + string json; + EXPECT_TRUE( + PayloadProperties(payload_file.path()).GetPropertiesAsJson(&json)); + EXPECT_EQ(kJsonProperties, json) << "JSON contents:\n" << json; +} + +// Validate the hash of file and metadata are within the output. +TEST_F(PayloadPropertiesTest, GetPropertiesAsKeyValueTestHash) { + constexpr char kKeyValueProperties[] = + "FILE_HASH=Rtrj9v3xXhrAi1741HAojtGxAQEOZ7mDyhzskIF4PJc=\n" + "FILE_SIZE=233\n" + "METADATA_HASH=kiXTexy/s2aPttf4+r8KRZWYZ6FYvwhU6rJGcnnI+U0=\n" + "METADATA_SIZE=187\n"; + string key_value; + EXPECT_TRUE(PayloadProperties{payload_file.path()}.GetPropertiesAsKeyValue( + &key_value)); + EXPECT_EQ(kKeyValueProperties, key_value) << "Key Value contents:\n" + << key_value; +} + +} // namespace chromeos_update_engine From 0787497df1ee8c7b89c5f326630fa6180184b9ba Mon Sep 17 00:00:00 2001 From: Jae Hoon Kim Date: Wed, 26 Jun 2019 17:22:34 -0700 Subject: [PATCH 043/624] update_engine: Fuzzer builders failing to build update_engine |OmahaRequestAction| takes within it's constructor another arguement for the session ID. Updates to arguements passed must be made at the callsite within omaha_request_action_fuzzer.cc. BUG=chromium:979057 TEST=USE="asan fuzzer" emerge-$BOARD update_engine Change-Id: I0a8ed7cd9e0ec8ee6b165617d2816d1f1753edd3 --- omaha_request_action_fuzzer.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/omaha_request_action_fuzzer.cc b/omaha_request_action_fuzzer.cc index 6c2f7ca4..6c41b121 100644 --- a/omaha_request_action_fuzzer.cc +++ b/omaha_request_action_fuzzer.cc @@ -38,7 +38,8 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { nullptr, std::make_unique( data, size, nullptr), - false); + false, + "" /* session_id */); auto collector_action = std::make_unique>(); From c80d2d8867d9ef659b5bdad5b66f77f58a1af2e4 Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Fri, 21 Jun 2019 17:43:32 -0700 Subject: [PATCH 044/624] update_engine: Remove brillo binder library Brillo binder library is not used anymor. Neither in Chrome OS nor Android. This CL deprecates it. BUG=chromium:978672 TEST=unittest Change-Id: I45cbff7561ffd8d24b94782676695e00a131c451 Reviewed-on: https://chromium-review.googlesource.com/1672023 Tested-by: Amin Hassani Commit-Ready: Amin Hassani Legacy-Commit-Queue: Commit Bot Reviewed-by: Sen Jiang --- Android.bp | 40 --- BUILD.gn | 1 - .../android/brillo/IUpdateEngine.aidl | 46 --- .../brillo/IUpdateEngineStatusCallback.aidl | 24 -- .../brillo/ParcelableUpdateEngineStatus.aidl | 20 -- client_library/client.cc | 46 --- client_library/client_binder.cc | 264 ------------------ client_library/client_binder.h | 117 -------- client_library/client_dbus.cc | 12 + parcelable_update_engine_status.cc | 122 -------- parcelable_update_engine_status.h | 63 ----- parcelable_update_engine_status_unittest.cc | 92 ------ 12 files changed, 12 insertions(+), 835 deletions(-) delete mode 100644 binder_bindings/android/brillo/IUpdateEngine.aidl delete mode 100644 binder_bindings/android/brillo/IUpdateEngineStatusCallback.aidl delete mode 100644 binder_bindings/android/brillo/ParcelableUpdateEngineStatus.aidl delete mode 100644 client_library/client.cc delete mode 100644 client_library/client_binder.cc delete mode 100644 client_library/client_binder.h delete mode 100644 parcelable_update_engine_status.cc delete mode 100644 parcelable_update_engine_status.h delete mode 100644 parcelable_update_engine_status_unittest.cc diff --git a/Android.bp b/Android.bp index db33a5e7..9031913b 100644 --- a/Android.bp +++ b/Android.bp @@ -369,46 +369,6 @@ cc_binary { required: ["android.hardware.boot@1.0-impl-wrapper.recovery"], } -// libupdate_engine_client (type: shared_library) -// ======================================================== -cc_library_shared { - name: "libupdate_engine_client", - - cflags: [ - "-Wall", - "-Werror", - "-Wno-unused-parameter", - "-DUSE_BINDER=1", - ], - export_include_dirs: ["client_library/include"], - include_dirs: [ - // TODO(deymo): Remove "external/cros/system_api/dbus" when dbus is not used. - "external/cros/system_api/dbus", - "system", - ], - - aidl: { - local_include_dirs: ["binder_bindings"], - }, - - shared_libs: [ - "libchrome", - "libbrillo", - "libbinder", - "libbrillo-binder", - "libutils", - ], - - srcs: [ - "binder_bindings/android/brillo/IUpdateEngine.aidl", - "binder_bindings/android/brillo/IUpdateEngineStatusCallback.aidl", - "client_library/client.cc", - "client_library/client_binder.cc", - "parcelable_update_engine_status.cc", - "update_status_utils.cc", - ], -} - // update_engine_client (type: executable) // ======================================================== // update_engine console client. diff --git a/BUILD.gn b/BUILD.gn index f2bddc9e..12799380 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -299,7 +299,6 @@ executable("update_engine") { # update_engine client library. static_library("libupdate_engine_client") { sources = [ - "client_library/client.cc", "client_library/client_dbus.cc", "update_status_utils.cc", ] diff --git a/binder_bindings/android/brillo/IUpdateEngine.aidl b/binder_bindings/android/brillo/IUpdateEngine.aidl deleted file mode 100644 index 56e15246..00000000 --- a/binder_bindings/android/brillo/IUpdateEngine.aidl +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright (C) 2016 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package android.brillo; - -import android.brillo.IUpdateEngineStatusCallback; -import android.brillo.ParcelableUpdateEngineStatus; - -interface IUpdateEngine { - void SetUpdateAttemptFlags(in int flags); - boolean AttemptUpdate(in String app_version, in String omaha_url, in int flags); - void AttemptRollback(in boolean powerwash); - boolean CanRollback(); - void ResetStatus(); - ParcelableUpdateEngineStatus GetStatus(); - void RebootIfNeeded(); - void SetChannel(in String target_channel, in boolean powewash); - String GetChannel(in boolean get_current_channel); - void SetCohortHint(in String cohort_hint); - String GetCohortHint(); - void SetP2PUpdatePermission(in boolean enabled); - boolean GetP2PUpdatePermission(); - void SetUpdateOverCellularPermission(in boolean enabled); - void SetUpdateOverCellularTarget(in String target_version, - in long target_size); - boolean GetUpdateOverCellularPermission(); - long GetDurationSinceUpdate(); - String GetPrevVersion(); - String GetRollbackPartition(); - void RegisterStatusCallback(in IUpdateEngineStatusCallback callback); - int GetLastAttemptError(); - int GetEolStatus(); -} diff --git a/binder_bindings/android/brillo/IUpdateEngineStatusCallback.aidl b/binder_bindings/android/brillo/IUpdateEngineStatusCallback.aidl deleted file mode 100644 index 837d44d5..00000000 --- a/binder_bindings/android/brillo/IUpdateEngineStatusCallback.aidl +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright (C) 2016 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package android.brillo; - -import android.brillo.ParcelableUpdateEngineStatus; - -interface IUpdateEngineStatusCallback { - oneway - void HandleStatusUpdate(in ParcelableUpdateEngineStatus status); -} diff --git a/binder_bindings/android/brillo/ParcelableUpdateEngineStatus.aidl b/binder_bindings/android/brillo/ParcelableUpdateEngineStatus.aidl deleted file mode 100644 index fc10505b..00000000 --- a/binder_bindings/android/brillo/ParcelableUpdateEngineStatus.aidl +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Copyright (C) 2016 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package android.brillo; - -parcelable ParcelableUpdateEngineStatus cpp_header - "update_engine/parcelable_update_engine_status.h"; diff --git a/client_library/client.cc b/client_library/client.cc deleted file mode 100644 index b05df90b..00000000 --- a/client_library/client.cc +++ /dev/null @@ -1,46 +0,0 @@ -// -// Copyright (C) 2015 The Android Open Source Project -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -#include "update_engine/client_library/include/update_engine/client.h" - -#include - -#if USE_BINDER -#include "update_engine/client_library/client_binder.h" -#else // !USE_BINDER -#include "update_engine/client_library/client_dbus.h" -#endif // USE_BINDER - -using std::unique_ptr; - -namespace update_engine { - -unique_ptr UpdateEngineClient::CreateInstance() { -#if USE_BINDER - auto update_engine_client_impl = new internal::BinderUpdateEngineClient{}; -#else // !USE_BINDER - auto update_engine_client_impl = new internal::DBusUpdateEngineClient{}; -#endif // USE_BINDER - auto ret = unique_ptr{update_engine_client_impl}; - - if (!update_engine_client_impl->Init()) { - ret.reset(); - } - - return ret; -} - -} // namespace update_engine diff --git a/client_library/client_binder.cc b/client_library/client_binder.cc deleted file mode 100644 index 588bc64b..00000000 --- a/client_library/client_binder.cc +++ /dev/null @@ -1,264 +0,0 @@ -// -// Copyright (C) 2015 The Android Open Source Project -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -#include "update_engine/client_library/client_binder.h" - -#include - -#include -#include - -#include "update_engine/common_service.h" -#include "update_engine/parcelable_update_engine_status.h" -#include "update_engine/update_status_utils.h" - -using android::getService; -using android::OK; -using android::String16; -using android::String8; -using android::binder::Status; -using android::brillo::ParcelableUpdateEngineStatus; -using chromeos_update_engine::StringToUpdateStatus; -using std::string; -using update_engine::UpdateAttemptFlags; - -namespace update_engine { -namespace internal { - -bool BinderUpdateEngineClient::Init() { - if (!binder_watcher_.Init()) - return false; - - return getService(String16{"android.brillo.UpdateEngineService"}, - &service_) == OK; -} - -bool BinderUpdateEngineClient::AttemptUpdate(const string& in_app_version, - const string& in_omaha_url, - bool at_user_request) { - bool started; - return service_ - ->AttemptUpdate( - String16{in_app_version.c_str()}, - String16{in_omaha_url.c_str()}, - at_user_request ? 0 : UpdateAttemptFlags::kFlagNonInteractive, - &started) - .isOk(); -} - -bool BinderUpdateEngineClient::AttemptInstall( - const string& omaha_url, const std::vector& dlc_module_ids) { - return false; -} - -bool BinderUpdateEngineClient::GetStatus(int64_t* out_last_checked_time, - double* out_progress, - UpdateStatus* out_update_status, - string* out_new_version, - int64_t* out_new_size) const { - ParcelableUpdateEngineStatus status; - - if (!service_->GetStatus(&status).isOk()) - return false; - - *out_last_checked_time = status.last_checked_time_; - *out_progress = status.progress_; - StringToUpdateStatus(String8{status.current_operation_}.string(), - out_update_status); - *out_new_version = String8{status.new_version_}.string(); - *out_new_size = status.new_size_; - return true; -} - -bool BinderUpdateEngineClient::SetCohortHint(const string& in_cohort_hint) { - return service_->SetCohortHint(String16{in_cohort_hint.c_str()}).isOk(); -} - -bool BinderUpdateEngineClient::GetCohortHint(string* out_cohort_hint) const { - String16 out_as_string16; - - if (!service_->GetCohortHint(&out_as_string16).isOk()) - return false; - - *out_cohort_hint = String8{out_as_string16}.string(); - return true; -} - -bool BinderUpdateEngineClient::SetUpdateOverCellularPermission(bool allowed) { - return service_->SetUpdateOverCellularPermission(allowed).isOk(); -} - -bool BinderUpdateEngineClient::GetUpdateOverCellularPermission( - bool* allowed) const { - return service_->GetUpdateOverCellularPermission(allowed).isOk(); -} - -bool BinderUpdateEngineClient::SetP2PUpdatePermission(bool enabled) { - return service_->SetP2PUpdatePermission(enabled).isOk(); -} - -bool BinderUpdateEngineClient::GetP2PUpdatePermission(bool* enabled) const { - return service_->GetP2PUpdatePermission(enabled).isOk(); -} - -bool BinderUpdateEngineClient::Rollback(bool powerwash) { - return service_->AttemptRollback(powerwash).isOk(); -} - -bool BinderUpdateEngineClient::GetRollbackPartition( - string* rollback_partition) const { - String16 out_as_string16; - - if (!service_->GetRollbackPartition(&out_as_string16).isOk()) - return false; - - *rollback_partition = String8{out_as_string16}.string(); - return true; -} - -bool BinderUpdateEngineClient::GetPrevVersion(string* prev_version) const { - String16 out_as_string16; - - if (!service_->GetPrevVersion(&out_as_string16).isOk()) - return false; - - *prev_version = String8{out_as_string16}.string(); - return true; -} - -void BinderUpdateEngineClient::RebootIfNeeded() { - if (!service_->RebootIfNeeded().isOk()) { - // Reboot error code doesn't necessarily mean that a reboot - // failed. For example, D-Bus may be shutdown before we receive the - // result. - LOG(INFO) << "RebootIfNeeded() failure ignored."; - } -} - -bool BinderUpdateEngineClient::ResetStatus() { - return service_->ResetStatus().isOk(); -} - -Status BinderUpdateEngineClient::StatusUpdateCallback::HandleStatusUpdate( - const ParcelableUpdateEngineStatus& status) { - UpdateStatus update_status; - - StringToUpdateStatus(String8{status.current_operation_}.string(), - &update_status); - - for (auto& handler : client_->handlers_) { - handler->HandleStatusUpdate(status.last_checked_time_, - status.progress_, - update_status, - String8{status.new_version_}.string(), - status.new_size_); - } - - return Status::ok(); -} - -bool BinderUpdateEngineClient::RegisterStatusUpdateHandler( - StatusUpdateHandler* handler) { - if (!status_callback_.get()) { - status_callback_ = new BinderUpdateEngineClient::StatusUpdateCallback(this); - if (!service_->RegisterStatusCallback(status_callback_).isOk()) { - return false; - } - } - - handlers_.push_back(handler); - - int64_t last_checked_time; - double progress; - UpdateStatus update_status; - string new_version; - int64_t new_size; - - if (!GetStatus(&last_checked_time, - &progress, - &update_status, - &new_version, - &new_size)) { - handler->IPCError("Could not get status from binder service"); - } - - handler->HandleStatusUpdate( - last_checked_time, progress, update_status, new_version, new_size); - - return true; -} - -bool BinderUpdateEngineClient::UnregisterStatusUpdateHandler( - StatusUpdateHandler* handler) { - auto it = std::find(handlers_.begin(), handlers_.end(), handler); - if (it != handlers_.end()) { - handlers_.erase(it); - return true; - } - - return false; -} - -bool BinderUpdateEngineClient::SetTargetChannel(const string& in_target_channel, - bool allow_powerwash) { - return service_ - ->SetChannel(String16{in_target_channel.c_str()}, allow_powerwash) - .isOk(); -} - -bool BinderUpdateEngineClient::GetTargetChannel(string* out_channel) const { - String16 out_as_string16; - - if (!service_->GetChannel(false, &out_as_string16).isOk()) - return false; - - *out_channel = String8{out_as_string16}.string(); - return true; -} - -bool BinderUpdateEngineClient::GetChannel(string* out_channel) const { - String16 out_as_string16; - - if (!service_->GetChannel(true, &out_as_string16).isOk()) - return false; - - *out_channel = String8{out_as_string16}.string(); - return true; -} - -bool BinderUpdateEngineClient::GetLastAttemptError( - int32_t* last_attempt_error) const { - int out_as_int; - - if (!service_->GetLastAttemptError(&out_as_int).isOk()) - return false; - - *last_attempt_error = out_as_int; - return true; -} - -bool BinderUpdateEngineClient::GetEolStatus(int32_t* eol_status) const { - int out_as_int; - - if (!service_->GetEolStatus(&out_as_int).isOk()) - return false; - - *eol_status = out_as_int; - return true; -} - -} // namespace internal -} // namespace update_engine diff --git a/client_library/client_binder.h b/client_library/client_binder.h deleted file mode 100644 index f3e41026..00000000 --- a/client_library/client_binder.h +++ /dev/null @@ -1,117 +0,0 @@ -// -// Copyright (C) 2016 The Android Open Source Project -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -#ifndef UPDATE_ENGINE_CLIENT_LIBRARY_CLIENT_BINDER_H_ -#define UPDATE_ENGINE_CLIENT_LIBRARY_CLIENT_BINDER_H_ - -#include -#include -#include -#include - -#include -#include -#include - -#include - -#include "android/brillo/BnUpdateEngineStatusCallback.h" -#include "android/brillo/IUpdateEngine.h" - -#include "update_engine/client_library/include/update_engine/client.h" - -namespace update_engine { -namespace internal { - -class BinderUpdateEngineClient : public UpdateEngineClient { - public: - BinderUpdateEngineClient() = default; - bool Init(); - - virtual ~BinderUpdateEngineClient() = default; - - bool AttemptUpdate(const std::string& app_version, - const std::string& omaha_url, - bool at_user_request) override; - - bool AttemptInstall(const std::string& omaha_url, - const std::vector& dlc_module_ids) override; - - bool GetStatus(int64_t* out_last_checked_time, - double* out_progress, - UpdateStatus* out_update_status, - std::string* out_new_version, - int64_t* out_new_size) const override; - - bool SetCohortHint(const std::string& in_cohort_hint) override; - bool GetCohortHint(std::string* out_cohort_hint) const override; - - bool SetUpdateOverCellularPermission(bool allowed) override; - bool GetUpdateOverCellularPermission(bool* allowed) const override; - - bool SetP2PUpdatePermission(bool enabled) override; - bool GetP2PUpdatePermission(bool* enabled) const override; - - bool Rollback(bool powerwash) override; - - bool GetRollbackPartition(std::string* rollback_partition) const override; - - void RebootIfNeeded() override; - - bool GetPrevVersion(std::string* prev_version) const override; - - bool ResetStatus() override; - - bool SetTargetChannel(const std::string& target_channel, - bool allow_powerwash) override; - - bool GetTargetChannel(std::string* out_channel) const override; - - bool GetChannel(std::string* out_channel) const override; - - bool RegisterStatusUpdateHandler(StatusUpdateHandler* handler) override; - bool UnregisterStatusUpdateHandler(StatusUpdateHandler* handler) override; - - bool GetLastAttemptError(int32_t* last_attempt_error) const override; - - bool GetEolStatus(int32_t* eol_status) const override; - - private: - class StatusUpdateCallback - : public android::brillo::BnUpdateEngineStatusCallback { - public: - explicit StatusUpdateCallback(BinderUpdateEngineClient* client) - : client_(client) {} - - android::binder::Status HandleStatusUpdate( - const android::brillo::ParcelableUpdateEngineStatus& status) override; - - private: - BinderUpdateEngineClient* client_; - }; - - android::sp service_; - android::sp status_callback_; - std::vector handlers_; - brillo::BinderWatcher binder_watcher_; - - DISALLOW_COPY_AND_ASSIGN(BinderUpdateEngineClient); -}; // class BinderUpdateEngineClient - -} // namespace internal -} // namespace update_engine - -#endif // UPDATE_ENGINE_CLIENT_LIBRARY_CLIENT_BINDER_H_ diff --git a/client_library/client_dbus.cc b/client_library/client_dbus.cc index 3ffb0886..809ad13a 100644 --- a/client_library/client_dbus.cc +++ b/client_library/client_dbus.cc @@ -18,6 +18,8 @@ #include +#include + #include #include #include @@ -28,9 +30,19 @@ using chromeos_update_engine::StringToUpdateStatus; using dbus::Bus; using org::chromium::UpdateEngineInterfaceProxy; using std::string; +using std::unique_ptr; using std::vector; namespace update_engine { + +unique_ptr UpdateEngineClient::CreateInstance() { + auto ret = std::make_unique(); + if (!ret->Init()) { + ret.reset(); + } + return ret; +} + namespace internal { bool DBusUpdateEngineClient::Init() { diff --git a/parcelable_update_engine_status.cc b/parcelable_update_engine_status.cc deleted file mode 100644 index 8a2dbeba..00000000 --- a/parcelable_update_engine_status.cc +++ /dev/null @@ -1,122 +0,0 @@ -// -// Copyright (C) 2016 The Android Open Source Project -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -#include "update_engine/parcelable_update_engine_status.h" -#include "update_engine/update_status_utils.h" - -#include - -using update_engine::UpdateEngineStatus; - -namespace android { -namespace brillo { - -ParcelableUpdateEngineStatus::ParcelableUpdateEngineStatus( - const UpdateEngineStatus& status) - : last_checked_time_(status.last_checked_time), - current_operation_( - chromeos_update_engine::UpdateStatusToString(status.status)), - progress_(status.progress), - current_version_(String16{status.current_version.c_str()}), - current_system_version_(String16{status.current_system_version.c_str()}), - new_size_(status.new_size_bytes), - new_version_(String16{status.new_version.c_str()}), - new_system_version_(String16{status.new_system_version.c_str()}) {} - -status_t ParcelableUpdateEngineStatus::writeToParcel(Parcel* parcel) const { - status_t status; - - status = parcel->writeInt64(last_checked_time_); - if (status != OK) { - return status; - } - - status = parcel->writeString16(current_operation_); - if (status != OK) { - return status; - } - - status = parcel->writeDouble(progress_); - if (status != OK) { - return status; - } - - status = parcel->writeString16(current_version_); - if (status != OK) { - return status; - } - - status = parcel->writeString16(current_system_version_); - if (status != OK) { - return status; - } - - status = parcel->writeInt64(new_size_); - if (status != OK) { - return status; - } - - status = parcel->writeString16(new_version_); - if (status != OK) { - return status; - } - - return parcel->writeString16(new_system_version_); -} - -status_t ParcelableUpdateEngineStatus::readFromParcel(const Parcel* parcel) { - status_t status; - - status = parcel->readInt64(&last_checked_time_); - if (status != OK) { - return status; - } - - status = parcel->readString16(¤t_operation_); - if (status != OK) { - return status; - } - - status = parcel->readDouble(&progress_); - if (status != OK) { - return status; - } - - status = parcel->readString16(¤t_version_); - if (status != OK) { - return status; - } - - status = parcel->readString16(¤t_system_version_); - if (status != OK) { - return status; - } - - status = parcel->readInt64(&new_size_); - if (status != OK) { - return status; - } - - status = parcel->readString16(&new_version_); - if (status != OK) { - return status; - } - - return parcel->readString16(&new_system_version_); -} - -} // namespace brillo -} // namespace android diff --git a/parcelable_update_engine_status.h b/parcelable_update_engine_status.h deleted file mode 100644 index 3feac769..00000000 --- a/parcelable_update_engine_status.h +++ /dev/null @@ -1,63 +0,0 @@ -// -// Copyright (C) 2016 The Android Open Source Project -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -#ifndef UPDATE_ENGINE_PARCELABLE_UPDATE_ENGINE_STATUS_H_ -#define UPDATE_ENGINE_PARCELABLE_UPDATE_ENGINE_STATUS_H_ - -#include -#include - -#include "update_engine/client_library/include/update_engine/update_status.h" - -namespace android { -namespace brillo { - -// Parcelable object containing the current status of update engine, to be sent -// over binder to clients from the server. -class ParcelableUpdateEngineStatus : public Parcelable { - public: - ParcelableUpdateEngineStatus() = default; - explicit ParcelableUpdateEngineStatus( - const update_engine::UpdateEngineStatus& status); - virtual ~ParcelableUpdateEngineStatus() = default; - - status_t writeToParcel(Parcel* parcel) const override; - status_t readFromParcel(const Parcel* parcel) override; - - // This list is kept in the Parcelable serialization order. - - // When the update_engine last checked for updates (seconds since unix Epoch) - int64_t last_checked_time_; - // The current status/operation of the update_engine. - android::String16 current_operation_; - // The current progress (0.0f-1.0f). - double progress_; - // The current product version. - android::String16 current_version_; - // The current system version. - android::String16 current_system_version_; - // The size of the update (bytes). This is int64_t for java compatibility. - int64_t new_size_; - // The new product version. - android::String16 new_version_; - // The new system version, if there is one (empty, otherwise). - android::String16 new_system_version_; -}; - -} // namespace brillo -} // namespace android - -#endif // UPDATE_ENGINE_PARCELABLE_UPDATE_ENGINE_STATUS_H_ diff --git a/parcelable_update_engine_status_unittest.cc b/parcelable_update_engine_status_unittest.cc deleted file mode 100644 index 20decb6b..00000000 --- a/parcelable_update_engine_status_unittest.cc +++ /dev/null @@ -1,92 +0,0 @@ -// -// Copyright (C) 2017 The Android Open Source Project -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -#include "update_engine/parcelable_update_engine_status.h" -#include "update_engine/update_status_utils.h" - -#include -#include - -using android::Parcel; -using android::status_t; -using android::String16; -using android::brillo::ParcelableUpdateEngineStatus; -using update_engine::UpdateEngineStatus; -using update_engine::UpdateStatus; - -TEST(ParcelableUpdateEngineStatusTest, TestCreationFromUpdateEngineStatus) { - // This test creates an object and verifies that all the UpdateEngineStatus - // values are properly reflected in the Parcelable version of the class. - - UpdateEngineStatus ue_status = {123456789, - UpdateStatus::DOWNLOADING, - "0.1.2.3", - "1.2.3.4", - 0.5f, - 34567, - "2.3.4.5", - "3.4.5.6"}; - ParcelableUpdateEngineStatus parcelable_status(ue_status); - EXPECT_EQ(ue_status.last_checked_time, parcelable_status.last_checked_time_); - EXPECT_EQ( - String16{chromeos_update_engine::UpdateStatusToString(ue_status.status)}, - parcelable_status.current_operation_); - EXPECT_EQ(String16{ue_status.current_version.c_str()}, - parcelable_status.current_version_); - EXPECT_EQ(String16{ue_status.current_system_version.c_str()}, - parcelable_status.current_system_version_); - EXPECT_EQ(ue_status.progress, parcelable_status.progress_); - EXPECT_EQ(static_cast(ue_status.new_size_bytes), - parcelable_status.new_size_); - EXPECT_EQ(String16{ue_status.new_version.c_str()}, - parcelable_status.new_version_); - EXPECT_EQ(String16{ue_status.new_system_version.c_str()}, - parcelable_status.new_system_version_); -} - -TEST(ParcelableUpdateEngineStatusTest, TestParceling) { - // This tests the writeToParcel and readFromParcel methods for being correctly - // matched. - UpdateEngineStatus ue_status = {123456789, - UpdateStatus::DOWNLOADING, - "0.1.2.3", - "1.2.3.4", - 0.5f, - 34567, - "2.3.4.5", - "3.4.5.6"}; - ParcelableUpdateEngineStatus source_status(ue_status); - Parcel parcel_source, parcel_target; - status_t status = source_status.writeToParcel(&parcel_source); - EXPECT_EQ(::android::OK, status); - size_t parcel_len = parcel_source.dataSize(); - status = parcel_target.setData(parcel_source.data(), parcel_len); - EXPECT_EQ(::android::OK, status); - ParcelableUpdateEngineStatus target_status; - status = target_status.readFromParcel(&parcel_target); - EXPECT_EQ(::android::OK, status); - - EXPECT_EQ(source_status.last_checked_time_, target_status.last_checked_time_); - EXPECT_EQ(source_status.current_operation_, target_status.current_operation_); - EXPECT_EQ(source_status.current_version_, target_status.current_version_); - EXPECT_EQ(source_status.current_system_version_, - target_status.current_system_version_); - EXPECT_EQ(source_status.progress_, target_status.progress_); - EXPECT_EQ(source_status.new_size_, target_status.new_size_); - EXPECT_EQ(source_status.new_version_, target_status.new_version_); - EXPECT_EQ(source_status.new_system_version_, - target_status.new_system_version_); -} From 565331e3db3cca0746535a4f06dbf4de50032c5f Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Mon, 24 Jun 2019 14:11:29 -0700 Subject: [PATCH 045/624] update_engine: Break UpdateEngineDaemon into their own implementations It looks like Android and ChromeOS have completely different implementation of the daemon. So instead of polluting the source with USE_* flags, just break it into their own implementation files. At the very least this allows deprecating USE_BINDER and USE_OMAHA flag completely. BUG=chromium:978672 TEST=unittest, cros flash two times. Change-Id: Ia5c4f9274e275a2c1ba9334111b694514914a475 Reviewed-on: https://chromium-review.googlesource.com/1674583 Tested-by: Amin Hassani Commit-Ready: Amin Hassani Legacy-Commit-Queue: Commit Bot Reviewed-by: Amin Hassani --- Android.bp | 4 +-- BUILD.gn | 4 +-- daemon_android.cc | 64 +++++++++++++++++++++++++++++++++ daemon_android.h | 56 +++++++++++++++++++++++++++++ daemon_base.h | 40 +++++++++++++++++++++ daemon.cc => daemon_chromeos.cc | 55 +++++++--------------------- daemon.h => daemon_chromeos.h | 31 ++++------------ libcurl_http_fetcher.cc | 4 +-- main.cc | 7 ++-- 9 files changed, 187 insertions(+), 78 deletions(-) create mode 100644 daemon_android.cc create mode 100644 daemon_android.h create mode 100644 daemon_base.h rename daemon.cc => daemon_chromeos.cc (61%) rename daemon.h => daemon_chromeos.h (70%) diff --git a/Android.bp b/Android.bp index 9031913b..d9f3524e 100644 --- a/Android.bp +++ b/Android.bp @@ -29,12 +29,10 @@ cc_defaults { cflags: [ "-DBASE_VER=576279", - "-DUSE_BINDER=1", "-DUSE_CHROME_NETWORK_PROXY=0", "-DUSE_CHROME_KIOSK_APP=0", "-DUSE_HWID_OVERRIDE=0", "-DUSE_MTD=0", - "-DUSE_OMAHA=0", "-D_FILE_OFFSET_BITS=64", "-D_POSIX_C_SOURCE=199309L", "-Wa,--noexecstack", @@ -269,7 +267,7 @@ cc_library_static { ":libupdate_engine_aidl", "binder_service_android.cc", "certificate_checker.cc", - "daemon.cc", + "daemon_android.cc", "daemon_state_android.cc", "hardware_android.cc", "libcurl_http_fetcher.cc", diff --git a/BUILD.gn b/BUILD.gn index 12799380..14c7a924 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -69,14 +69,12 @@ pkg_config("target_defaults") { "__CHROMEOS__", "_FILE_OFFSET_BITS=64", "_POSIX_C_SOURCE=199309L", - "USE_BINDER=0", "USE_DBUS=${use.dbus}", "USE_FEC=0", "USE_HWID_OVERRIDE=${use.hwid_override}", "USE_CHROME_KIOSK_APP=${use.chrome_kiosk_app}", "USE_CHROME_NETWORK_PROXY=${use.chrome_network_proxy}", "USE_MTD=${use.mtd}", - "USE_OMAHA=1", "USE_SHILL=1", ] include_dirs = [ @@ -194,7 +192,7 @@ static_library("libupdate_engine") { "common_service.cc", "connection_manager.cc", "connection_utils.cc", - "daemon.cc", + "daemon_chromeos.cc", "dbus_connection.cc", "dbus_service.cc", "hardware_chromeos.cc", diff --git a/daemon_android.cc b/daemon_android.cc new file mode 100644 index 00000000..1aa921f8 --- /dev/null +++ b/daemon_android.cc @@ -0,0 +1,64 @@ +// +// Copyright (C) 2015 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include "update_engine/daemon_android.h" + +#include + +#include + +#include "update_engine/daemon_state_android.h" + +using std::unique_ptr; + +namespace chromeos_update_engine { + +unique_ptr DaemonBase::CreateInstance() { + return std::make_unique(); +} + +int DaemonAndroid::OnInit() { + // Register the |subprocess_| singleton with this Daemon as the signal + // handler. + subprocess_.Init(this); + + int exit_code = brillo::Daemon::OnInit(); + if (exit_code != EX_OK) + return exit_code; + + android::BinderWrapper::Create(); + binder_watcher_.Init(); + + DaemonStateAndroid* daemon_state_android = new DaemonStateAndroid(); + daemon_state_.reset(daemon_state_android); + LOG_IF(ERROR, !daemon_state_android->Initialize()) + << "Failed to initialize system state."; + + // Create the Binder Service. + binder_service_ = new BinderUpdateEngineAndroidService{ + daemon_state_android->service_delegate()}; + auto binder_wrapper = android::BinderWrapper::Get(); + if (!binder_wrapper->RegisterService(binder_service_->ServiceName(), + binder_service_)) { + LOG(ERROR) << "Failed to register binder service."; + } + + daemon_state_->AddObserver(binder_service_.get()); + daemon_state_->StartUpdater(); + return EX_OK; +} + +} // namespace chromeos_update_engine diff --git a/daemon_android.h b/daemon_android.h new file mode 100644 index 00000000..baead373 --- /dev/null +++ b/daemon_android.h @@ -0,0 +1,56 @@ +// +// Copyright (C) 2015 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef UPDATE_ENGINE_DAEMON_ANDROID_H_ +#define UPDATE_ENGINE_DAEMON_ANDROID_H_ + +#include + +#include + +#include "update_engine/binder_service_android.h" +#include "update_engine/common/subprocess.h" +#include "update_engine/daemon_base.h" +#include "update_engine/daemon_state_interface.h" + +namespace chromeos_update_engine { + +class DaemonAndroid : public DaemonBase { + public: + DaemonAndroid() = default; + + protected: + int OnInit() override; + + private: + // The Subprocess singleton class requires a |brillo::MessageLoop| in the + // current thread, so we need to initialize it from this class instead of + // the main() function. + Subprocess subprocess_; + + brillo::BinderWatcher binder_watcher_; + android::sp binder_service_; + + // The daemon state with all the required daemon classes for the configured + // platform. + std::unique_ptr daemon_state_; + + DISALLOW_COPY_AND_ASSIGN(DaemonAndroid); +}; + +} // namespace chromeos_update_engine + +#endif // UPDATE_ENGINE_DAEMON_ANDROID_H_ diff --git a/daemon_base.h b/daemon_base.h new file mode 100644 index 00000000..742a0ba2 --- /dev/null +++ b/daemon_base.h @@ -0,0 +1,40 @@ +// +// Copyright (C) 2019 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef UPDATE_ENGINE_DAEMON_BASE_H_ +#define UPDATE_ENGINE_DAEMON_BASE_H_ + +#include + +#include + +namespace chromeos_update_engine { + +class DaemonBase : public brillo::Daemon { + public: + DaemonBase() = default; + virtual ~DaemonBase() = default; + + // Creates an instance of the daemon. + static std::unique_ptr CreateInstance(); + + private: + DISALLOW_COPY_AND_ASSIGN(DaemonBase); +}; + +} // namespace chromeos_update_engine + +#endif // UPDATE_ENGINE_DAEMON_BASE_H_ diff --git a/daemon.cc b/daemon_chromeos.cc similarity index 61% rename from daemon.cc rename to daemon_chromeos.cc index f370564e..21740d81 100644 --- a/daemon.cc +++ b/daemon_chromeos.cc @@ -14,25 +14,25 @@ // limitations under the License. // -#include "update_engine/daemon.h" +#include "update_engine/daemon_chromeos.h" #include #include #include -#if USE_BINDER -#include -#endif // USE_BINDER -#if USE_OMAHA #include "update_engine/real_system_state.h" -#else // !USE_OMAHA -#include "update_engine/daemon_state_android.h" -#endif // USE_OMAHA + +using brillo::Daemon; +using std::unique_ptr; namespace chromeos_update_engine { -int UpdateEngineDaemon::OnInit() { +unique_ptr DaemonBase::CreateInstance() { + return std::make_unique(); +} + +int DaemonChromeOS::OnInit() { // Register the |subprocess_| singleton with this Daemon as the signal // handler. subprocess_.Init(this); @@ -41,12 +41,6 @@ int UpdateEngineDaemon::OnInit() { if (exit_code != EX_OK) return exit_code; -#if USE_BINDER - android::BinderWrapper::Create(); - binder_watcher_.Init(); -#endif // USE_BINDER - -#if USE_OMAHA // Initialize update engine global state but continue if something fails. // TODO(deymo): Move the daemon_state_ initialization to a factory method // avoiding the explicit re-usage of the |bus| instance, shared between @@ -55,42 +49,18 @@ int UpdateEngineDaemon::OnInit() { daemon_state_.reset(real_system_state); LOG_IF(ERROR, !real_system_state->Initialize()) << "Failed to initialize system state."; -#else // !USE_OMAHA - DaemonStateAndroid* daemon_state_android = new DaemonStateAndroid(); - daemon_state_.reset(daemon_state_android); - LOG_IF(ERROR, !daemon_state_android->Initialize()) - << "Failed to initialize system state."; -#endif // USE_OMAHA -#if USE_BINDER - // Create the Binder Service. - binder_service_ = new BinderUpdateEngineAndroidService{ - daemon_state_android->service_delegate()}; - auto binder_wrapper = android::BinderWrapper::Get(); - if (!binder_wrapper->RegisterService(binder_service_->ServiceName(), - binder_service_)) { - LOG(ERROR) << "Failed to register binder service."; - } - - daemon_state_->AddObserver(binder_service_.get()); -#endif // USE_BINDER - -#if USE_DBUS // Create the DBus service. dbus_adaptor_.reset(new UpdateEngineAdaptor(real_system_state)); daemon_state_->AddObserver(dbus_adaptor_.get()); - dbus_adaptor_->RegisterAsync(base::Bind(&UpdateEngineDaemon::OnDBusRegistered, - base::Unretained(this))); + dbus_adaptor_->RegisterAsync( + base::Bind(&DaemonChromeOS::OnDBusRegistered, base::Unretained(this))); LOG(INFO) << "Waiting for DBus object to be registered."; -#else // !USE_DBUS - daemon_state_->StartUpdater(); -#endif // USE_DBUS return EX_OK; } -#if USE_DBUS -void UpdateEngineDaemon::OnDBusRegistered(bool succeeded) { +void DaemonChromeOS::OnDBusRegistered(bool succeeded) { if (!succeeded) { LOG(ERROR) << "Registering the UpdateEngineAdaptor"; QuitWithExitCode(1); @@ -108,6 +78,5 @@ void UpdateEngineDaemon::OnDBusRegistered(bool succeeded) { } daemon_state_->StartUpdater(); } -#endif // USE_DBUS } // namespace chromeos_update_engine diff --git a/daemon.h b/daemon_chromeos.h similarity index 70% rename from daemon.h rename to daemon_chromeos.h index 3c896bce..657e7971 100644 --- a/daemon.h +++ b/daemon_chromeos.h @@ -14,37 +14,26 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_DAEMON_H_ -#define UPDATE_ENGINE_DAEMON_H_ +#ifndef UPDATE_ENGINE_DAEMON_CHROMEOS_H_ +#define UPDATE_ENGINE_DAEMON_CHROMEOS_H_ #include -#include -#if USE_BINDER -#include -#endif // USE_BINDER -#include - -#if USE_BINDER -#include "update_engine/binder_service_android.h" -#endif // USE_BINDER #include "update_engine/common/subprocess.h" +#include "update_engine/daemon_base.h" #include "update_engine/daemon_state_interface.h" -#if USE_DBUS #include "update_engine/dbus_service.h" -#endif // USE_DBUS namespace chromeos_update_engine { -class UpdateEngineDaemon : public brillo::Daemon { +class DaemonChromeOS : public DaemonBase { public: - UpdateEngineDaemon() = default; + DaemonChromeOS() = default; protected: int OnInit() override; private: -#if USE_DBUS // Run from the main loop when the |dbus_adaptor_| object is registered. At // this point we can request ownership of the DBus service name and continue // initialization. @@ -52,25 +41,19 @@ class UpdateEngineDaemon : public brillo::Daemon { // Main D-Bus service adaptor. std::unique_ptr dbus_adaptor_; -#endif // USE_DBUS // The Subprocess singleton class requires a brillo::MessageLoop in the // current thread, so we need to initialize it from this class instead of // the main() function. Subprocess subprocess_; -#if USE_BINDER - brillo::BinderWatcher binder_watcher_; - android::sp binder_service_; -#endif // USE_BINDER - // The daemon state with all the required daemon classes for the configured // platform. std::unique_ptr daemon_state_; - DISALLOW_COPY_AND_ASSIGN(UpdateEngineDaemon); + DISALLOW_COPY_AND_ASSIGN(DaemonChromeOS); }; } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_DAEMON_H_ +#endif // UPDATE_ENGINE_DAEMON_CHROMEOS_H_ diff --git a/libcurl_http_fetcher.cc b/libcurl_http_fetcher.cc index f4694357..d39351c6 100644 --- a/libcurl_http_fetcher.cc +++ b/libcurl_http_fetcher.cc @@ -269,11 +269,11 @@ void LibcurlHttpFetcher::ResumeTransfer(const string& url) { } else if (base::StartsWith( url_, "https://", base::CompareCase::INSENSITIVE_ASCII)) { SetCurlOptionsForHttps(); -#if !USE_OMAHA +#ifdef __ANDROID__ } else if (base::StartsWith( url_, "file://", base::CompareCase::INSENSITIVE_ASCII)) { SetCurlOptionsForFile(); -#endif +#endif // __ANDROID__ } else { LOG(ERROR) << "Received invalid URI: " << url_; // Lock down to no protocol supported for the transfer. diff --git a/main.cc b/main.cc index 26f9efbb..b4354673 100644 --- a/main.cc +++ b/main.cc @@ -33,9 +33,10 @@ #include #include +#include "update_engine/common/subprocess.h" #include "update_engine/common/terminator.h" #include "update_engine/common/utils.h" -#include "update_engine/daemon.h" +#include "update_engine/daemon_base.h" using std::string; @@ -190,8 +191,8 @@ int main(int argc, char** argv) { // Done _after_ log file creation. umask(S_IRWXG | S_IRWXO); - chromeos_update_engine::UpdateEngineDaemon update_engine_daemon; - int exit_code = update_engine_daemon.Run(); + auto daemon = chromeos_update_engine::DaemonBase::CreateInstance(); + int exit_code = daemon->Run(); chromeos_update_engine::Subprocess::Get().FlushBufferedLogsAtExit(); From 7dc8cd51dfa12622e0c14c797a1126a414deb6ce Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Mon, 24 Jun 2019 14:41:14 -0700 Subject: [PATCH 046/624] update_engine: Remove unused variable new_system_version It seems like new_system_version and current_system_version values are not being used anywhere. Deprecate them. BUG=chromium:978672 TEST=unittest Change-Id: I3e554bafa59d9759dca13acffb360697c63df815 Reviewed-on: https://chromium-review.googlesource.com/1674804 Tested-by: Amin Hassani Commit-Ready: Amin Hassani Legacy-Commit-Queue: Commit Bot Reviewed-by: Amin Hassani --- client_library/include/update_engine/update_status.h | 4 ---- update_attempter.cc | 3 --- update_attempter.h | 1 - update_attempter_unittest.cc | 4 ---- 4 files changed, 12 deletions(-) diff --git a/client_library/include/update_engine/update_status.h b/client_library/include/update_engine/update_status.h index 5a3dccf8..4b86df37 100644 --- a/client_library/include/update_engine/update_status.h +++ b/client_library/include/update_engine/update_status.h @@ -64,16 +64,12 @@ struct UpdateEngineStatus { UpdateStatus status; // the current product version (oem bundle id) std::string current_version; - // the current system version - std::string current_system_version; // The current progress (0.0f-1.0f). double progress; // the size of the update (bytes) uint64_t new_size_bytes; // the new product version std::string new_version; - // the new system version, if there is one (empty, otherwise) - std::string new_system_version; }; } // namespace update_engine diff --git a/update_attempter.cc b/update_attempter.cc index 3f778868..50aa9f42 100644 --- a/update_attempter.cc +++ b/update_attempter.cc @@ -1203,7 +1203,6 @@ void UpdateAttempter::ActionCompleted(ActionProcessor* processor, new InstallPlan(omaha_response_handler_action->install_plan())); UpdateLastCheckedTime(); new_version_ = install_plan_->version; - new_system_version_ = install_plan_->system_version; new_payload_size_ = 0; for (const auto& payload : install_plan_->payloads) new_payload_size_ += payload.size; @@ -1351,11 +1350,9 @@ bool UpdateAttempter::GetStatus(UpdateEngineStatus* out_status) { out_status->last_checked_time = last_checked_time_; out_status->status = status_; out_status->current_version = omaha_request_params_->app_version(); - out_status->current_system_version = omaha_request_params_->system_version(); out_status->progress = download_progress_; out_status->new_size_bytes = new_payload_size_; out_status->new_version = new_version_; - out_status->new_system_version = new_system_version_; return true; } diff --git a/update_attempter.h b/update_attempter.h index c429076e..1c3abe11 100644 --- a/update_attempter.h +++ b/update_attempter.h @@ -472,7 +472,6 @@ class UpdateAttempter : public ActionProcessorDelegate, int64_t last_checked_time_ = 0; std::string prev_version_; std::string new_version_ = "0.0.0.0"; - std::string new_system_version_; uint64_t new_payload_size_ = 0; // Flags influencing all periodic update checks UpdateAttemptFlags update_attempt_flags_ = UpdateAttemptFlags::kNone; diff --git a/update_attempter_unittest.cc b/update_attempter_unittest.cc index fb33011a..8a896dae 100644 --- a/update_attempter_unittest.cc +++ b/update_attempter_unittest.cc @@ -1378,8 +1378,6 @@ TEST_F(UpdateAttempterTest, UpdateDeferredByPolicyTest) { EXPECT_EQ(UpdateStatus::UPDATE_AVAILABLE, status.status); EXPECT_TRUE(attempter_.install_plan_); EXPECT_EQ(attempter_.install_plan_->version, status.new_version); - EXPECT_EQ(attempter_.install_plan_->system_version, - status.new_system_version); EXPECT_EQ(attempter_.install_plan_->payloads[0].size, status.new_size_bytes); } @@ -1400,8 +1398,6 @@ TEST_F(UpdateAttempterTest, UpdateDeferredByPolicyTest) { attempter_.GetStatus(&status); EXPECT_EQ(UpdateStatus::REPORTING_ERROR_EVENT, status.status); EXPECT_EQ(response_action.install_plan_.version, status.new_version); - EXPECT_EQ(response_action.install_plan_.system_version, - status.new_system_version); EXPECT_EQ(response_action.install_plan_.payloads[0].size, status.new_size_bytes); } From 165d4b4eac9691108ee3425b16f30e4b2456bca9 Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Thu, 27 Jun 2019 11:04:14 -0700 Subject: [PATCH 047/624] update_engine: disable linter complaints about the unittest.cc file names BUG=chromium:979268 TEST=none Change-Id: I35bb6f83d5bf25f4eb2f384c4ca41a1501858379 Reviewed-on: https://chromium-review.googlesource.com/1680857 Tested-by: Amin Hassani Commit-Ready: Amin Hassani Legacy-Commit-Queue: Commit Bot Reviewed-by: Keigo Oka --- BUILD.gn | 3 +++ 1 file changed, 3 insertions(+) diff --git a/BUILD.gn b/BUILD.gn index 14c7a924..9ff26e03 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -14,6 +14,9 @@ # limitations under the License. # +# Stop linter from complaining XXX_unittest.cc naming. +# gnlint: disable=GnLintSourceFileNames + import("//common-mk/generate-dbus-adaptors.gni") import("//common-mk/generate-dbus-proxies.gni") import("//common-mk/openssl_pem.gni") From ec39a220c4122305f92dbede8e7f0154bfe4c7b1 Mon Sep 17 00:00:00 2001 From: Keigo Oka Date: Thu, 27 Jun 2019 13:51:07 +0900 Subject: [PATCH 048/624] update_engine: fix heap-buffer-overflow ASAN error happens depending on library ordering in compile flags (crbug/887845). This change updates BUILD.gn so that correct library ordering is generated. BUG=chromium:976843 TEST=setup_board --board=amd64-generic --profile=asan TEST=./build_packages --board=amd64-generic update_engine TEST=FEATURES=test emerge-amd64-generic update_engine # Some test fail, but they also fail without ASAN and this modification. Change-Id: I40bb72f25e7012105eecac5c53d01662d68bf3d7 Reviewed-on: https://chromium-review.googlesource.com/1679810 Tested-by: Manoj Gupta Commit-Ready: Manoj Gupta Legacy-Commit-Queue: Commit Bot Reviewed-by: Amin Hassani --- BUILD.gn | 31 ++++++++++++++++++++++++++----- 1 file changed, 26 insertions(+), 5 deletions(-) diff --git a/BUILD.gn b/BUILD.gn index 9ff26e03..2d4e9a44 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -396,8 +396,14 @@ if (use.test || use.fuzzer) { "payload_generator/fake_filesystem.cc", "update_manager/umtest_utils.cc", ] - all_dependent_configs = [ "//common-mk:test" ] - configs += [ ":target_defaults" ] + + # TODO(crbug.com/887845): After library odering issue is fixed, + # //common-mk:test can be moved in all_dependent_configs and + # //common-mk:test in each test configs can be removed. + configs += [ + "//common-mk:test", + ":target_defaults", + ] pkg_deps = [ "libshill-client-test" ] deps = [ ":libupdate_engine", @@ -430,7 +436,12 @@ if (use.test) { "common/http_common.cc", "test_http_server.cc", ] - configs += [ ":target_defaults" ] + # //common-mk:test should be on the top. + # TODO(crbug.com/887845): Remove this after library odering issue is fixed. + configs += [ + "//common-mk:test", + ":target_defaults", + ] } # Test subprocess helper. @@ -438,7 +449,12 @@ if (use.test) { sources = [ "test_subprocess.cc", ] - configs += [ ":target_defaults" ] + # //common-mk:test should be on the top. + # TODO(crbug.com/887845): Remove this after library odering issue is fixed. + configs += [ + "//common-mk:test", + ":target_defaults", + ] } # Main unittest file. @@ -525,7 +541,12 @@ if (use.test) { "update_manager/variable_unittest.cc", "update_manager/weekly_time_unittest.cc", ] - configs += [ ":target_defaults" ] + # //common-mk:test should be on the top. + # TODO(crbug.com/887845): Remove this after library odering issue is fixed. + configs += [ + "//common-mk:test", + ":target_defaults", + ] pkg_deps = [ "libbrillo-test-${libbase_ver}", "libchrome-test-${libbase_ver}", From 8ecb65f57ba65c6fc415e40c0dd3f3730088a55d Mon Sep 17 00:00:00 2001 From: Justin Yun Date: Mon, 1 Jul 2019 11:04:13 +0900 Subject: [PATCH 049/624] Rename product_services to system_ext Bug: 134359158 Test: run tests for update_engine_unittests Change-Id: I5d5b7f33f2b106f23152049f07e4b44c0cdcdc7e --- boot_control_android_unittest.cc | 14 ++++++-------- .../payload_generation_config_unittest.cc | 12 ++++++------ 2 files changed, 12 insertions(+), 14 deletions(-) diff --git a/boot_control_android_unittest.cc b/boot_control_android_unittest.cc index b2885a3f..65c2381f 100644 --- a/boot_control_android_unittest.cc +++ b/boot_control_android_unittest.cc @@ -784,12 +784,11 @@ TEST_P(BootControlAndroidGroupTestP, GroupTooBig) { TEST_P(BootControlAndroidGroupTestP, AddPartitionToGroup) { ExpectStoreMetadata(PartitionMetadata{ - .groups = { - {.name = T("android"), - .size = 3_GiB, - .partitions = {{.name = T("system"), .size = 2_GiB}, - {.name = T("product_services"), .size = 1_GiB}}}}}); - ExpectUnmap({T("system"), T("vendor"), T("product_services")}); + .groups = {{.name = T("android"), + .size = 3_GiB, + .partitions = {{.name = T("system"), .size = 2_GiB}, + {.name = T("system_ext"), .size = 1_GiB}}}}}); + ExpectUnmap({T("system"), T("vendor"), T("system_ext")}); EXPECT_TRUE(bootctl_.InitPartitionMetadata( target(), @@ -797,8 +796,7 @@ TEST_P(BootControlAndroidGroupTestP, AddPartitionToGroup) { .groups = {{.name = "android", .size = 3_GiB, .partitions = {{.name = "system", .size = 2_GiB}, - {.name = "product_services", - .size = 1_GiB}}}, + {.name = "system_ext", .size = 1_GiB}}}, SimpleGroup("oem", 2_GiB, "vendor", 2_GiB)}}, true)); } diff --git a/payload_generator/payload_generation_config_unittest.cc b/payload_generator/payload_generation_config_unittest.cc index 70a3df31..aca9655d 100644 --- a/payload_generator/payload_generation_config_unittest.cc +++ b/payload_generator/payload_generation_config_unittest.cc @@ -59,7 +59,7 @@ TEST_F(PayloadGenerationConfigTest, LoadDynamicPartitionMetadataTest) { ASSERT_TRUE( store.LoadFromString("super_partition_groups=group_a group_b\n" "group_a_size=3221225472\n" - "group_a_partition_list=system product_services\n" + "group_a_partition_list=system system_ext\n" "group_b_size=2147483648\n" "group_b_partition_list=vendor\n")); EXPECT_TRUE(image_config.LoadDynamicPartitionMetadata(store)); @@ -72,7 +72,7 @@ TEST_F(PayloadGenerationConfigTest, LoadDynamicPartitionMetadataTest) { EXPECT_EQ(3221225472u, group_a.size()); ASSERT_EQ(2, group_a.partition_names_size()); EXPECT_EQ("system", group_a.partition_names(0)); - EXPECT_EQ("product_services", group_a.partition_names(1)); + EXPECT_EQ("system_ext", group_a.partition_names(1)); const auto& group_b = image_config.dynamic_partition_metadata->groups(1); EXPECT_EQ("group_b", group_b.name()); @@ -108,17 +108,17 @@ TEST_F(PayloadGenerationConfigTest, ValidateDynamicPartitionMetadata) { PartitionConfig system("system"); system.size = 2147483648u; - PartitionConfig product_services("product_services"); - product_services.size = 1073741824u; + PartitionConfig system_ext("system_ext"); + system_ext.size = 1073741824u; image_config.partitions.push_back(std::move(system)); - image_config.partitions.push_back(std::move(product_services)); + image_config.partitions.push_back(std::move(system_ext)); brillo::KeyValueStore store; ASSERT_TRUE( store.LoadFromString("super_partition_groups=foo\n" "foo_size=3221225472\n" - "foo_partition_list=system product_services\n")); + "foo_partition_list=system system_ext\n")); EXPECT_TRUE(image_config.LoadDynamicPartitionMetadata(store)); EXPECT_NE(nullptr, image_config.dynamic_partition_metadata); From 6fc0b88c3139144fb44c89b3ed892e987050ea86 Mon Sep 17 00:00:00 2001 From: Xiaochu Liu Date: Wed, 26 Jun 2019 09:51:49 -0700 Subject: [PATCH 050/624] update_engine: add COMPONENT to OWNERS file update_engine bugs should be filed against Internals>Installer component, add that information to the OWNERS file to make it easier to file bugs against the correct component. Also add comments showing this COMPONENT is Chromium OS only since the OWNERS file is shared by Android. BUG=chromium:977646 TEST=None Change-Id: Ic918378bb786c9b79790bb155dcf142497fcae17 Reviewed-on: https://chromium-review.googlesource.com/1678727 Tested-by: Xiaochu Liu Commit-Ready: Xiaochu Liu Legacy-Commit-Queue: Commit Bot Reviewed-by: Amin Hassani --- OWNERS | 3 +++ 1 file changed, 3 insertions(+) diff --git a/OWNERS b/OWNERS index 07ee38ea..6709d7ab 100644 --- a/OWNERS +++ b/OWNERS @@ -8,3 +8,6 @@ senj@google.com benchan@google.com ahassani@google.com xiaochu@google.com + +# Chromium OS only: +# COMPONENT: Internals>Installer From 0ae8fe18ee8d0da7dce92d6a4c2c0b3ff2075b7c Mon Sep 17 00:00:00 2001 From: Jae Hoon Kim Date: Wed, 26 Jun 2019 14:32:50 -0700 Subject: [PATCH 051/624] update_engine: Attach session ID to HTTP header of binary download In order for Omaha to correlate Omaha Client requests with the actual binary download, the session ID must be attached to the HTTP header of the binary download in the X-Goog-Update-SessionId. Also, remove the HTTP header of X-Goog-Update-SessionId added into the Omaha requests. BUG=chromium:940515 TEST=unittests # new unittests Change-Id: I0759562f2d1c8c003064ad976ca1ae6ce039b960 --- Android.bp | 1 + BUILD.gn | 1 + common/constants.cc | 6 +++ common/constants.h | 6 +++ common/file_fetcher.h | 6 +++ common/http_fetcher.h | 8 +++ common/mock_http_fetcher.h | 6 +++ common/multi_range_http_fetcher.h | 5 ++ libcurl_http_fetcher.cc | 32 ++++++++++++ libcurl_http_fetcher.h | 3 ++ libcurl_http_fetcher_unittest.cc | 81 +++++++++++++++++++++++++++++++ omaha_request_action.cc | 7 --- omaha_request_action_unittest.cc | 4 -- update_attempter.cc | 1 + update_attempter.h | 1 - update_attempter_unittest.cc | 29 +++++++++++ 16 files changed, 185 insertions(+), 12 deletions(-) create mode 100644 libcurl_http_fetcher_unittest.cc diff --git a/Android.bp b/Android.bp index d9f3524e..b91e8839 100644 --- a/Android.bp +++ b/Android.bp @@ -622,6 +622,7 @@ cc_test { "common/terminator_unittest.cc", "common/test_utils.cc", "common/utils_unittest.cc", + "libcurl_http_fetcher_unittest.cc", "payload_consumer/bzip_extent_writer_unittest.cc", "payload_consumer/cached_file_descriptor_unittest.cc", "payload_consumer/delta_performer_integration_test.cc", diff --git a/BUILD.gn b/BUILD.gn index 2d4e9a44..224ad450 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -478,6 +478,7 @@ if (use.test) { "connection_manager_unittest.cc", "hardware_chromeos_unittest.cc", "image_properties_chromeos_unittest.cc", + "libcurl_http_fetcher_unittest.cc", "metrics_reporter_omaha_unittest.cc", "metrics_utils_unittest.cc", "omaha_request_action_unittest.cc", diff --git a/common/constants.cc b/common/constants.cc index 5ab96b0f..87bdf911 100644 --- a/common/constants.cc +++ b/common/constants.cc @@ -126,4 +126,10 @@ const char kPayloadPropertyRunPostInstall[] = "RUN_POST_INSTALL"; const char kOmahaUpdaterVersion[] = "0.1.0.0"; +// X-Goog-Update headers. +const char kXGoogleUpdateInteractivity[] = "X-Goog-Update-Interactivity"; +const char kXGoogleUpdateAppId[] = "X-Goog-Update-AppId"; +const char kXGoogleUpdateUpdater[] = "X-Goog-Update-Updater"; +const char kXGoogleUpdateSessionId[] = "X-Goog-SessionId"; + } // namespace chromeos_update_engine diff --git a/common/constants.h b/common/constants.h index 9b4623f5..d95a56a1 100644 --- a/common/constants.h +++ b/common/constants.h @@ -110,6 +110,12 @@ extern const char kPayloadPropertyRunPostInstall[]; extern const char kOmahaUpdaterVersion[]; +// X-Goog-Update headers. +extern const char kXGoogleUpdateInteractivity[]; +extern const char kXGoogleUpdateAppId[]; +extern const char kXGoogleUpdateUpdater[]; +extern const char kXGoogleUpdateSessionId[]; + // A download source is any combination of protocol and server (that's of // interest to us when looking at UMA metrics) using which we may download // the payload. diff --git a/common/file_fetcher.h b/common/file_fetcher.h index fbdfc327..bd390074 100644 --- a/common/file_fetcher.h +++ b/common/file_fetcher.h @@ -59,6 +59,12 @@ class FileFetcher : public HttpFetcher { void SetHeader(const std::string& header_name, const std::string& header_value) override {} + bool GetHeader(const std::string& header_name, + std::string* header_value) const override { + header_value->clear(); + return false; + } + // Suspend the asynchronous file read. void Pause() override; diff --git a/common/http_fetcher.h b/common/http_fetcher.h index 93b0e249..94f31d75 100644 --- a/common/http_fetcher.h +++ b/common/http_fetcher.h @@ -100,6 +100,14 @@ class HttpFetcher { virtual void SetHeader(const std::string& header_name, const std::string& header_value) = 0; + // Only used for testing. + // If |header_name| is set, the value will be set into |header_value|. + // On success the boolean true will be returned, hoewever on failture to find + // the |header_name| in the header the return value will be false. The state + // in which |header_value| is left in for failures is an empty string. + virtual bool GetHeader(const std::string& header_name, + std::string* header_value) const = 0; + // If data is coming in too quickly, you can call Pause() to pause the // transfer. The delegate will not have ReceivedBytes() called while // an HttpFetcher is paused. diff --git a/common/mock_http_fetcher.h b/common/mock_http_fetcher.h index 492e6cee..0f043190 100644 --- a/common/mock_http_fetcher.h +++ b/common/mock_http_fetcher.h @@ -89,6 +89,12 @@ class MockHttpFetcher : public HttpFetcher { void SetHeader(const std::string& header_name, const std::string& header_value) override; + bool GetHeader(const std::string& header_name, + std::string* header_value) const override { + header_value->clear(); + return false; + } + // Return the value of the header |header_name| or the empty string if not // set. std::string GetHeader(const std::string& header_name) const; diff --git a/common/multi_range_http_fetcher.h b/common/multi_range_http_fetcher.h index f57ea7f6..ef32f0d5 100644 --- a/common/multi_range_http_fetcher.h +++ b/common/multi_range_http_fetcher.h @@ -83,6 +83,11 @@ class MultiRangeHttpFetcher : public HttpFetcher, public HttpFetcherDelegate { base_fetcher_->SetHeader(header_name, header_value); } + bool GetHeader(const std::string& header_name, + std::string* header_value) const override { + return base_fetcher_->GetHeader(header_name, header_value); + } + void Pause() override { base_fetcher_->Pause(); } void Unpause() override { base_fetcher_->Unpause(); } diff --git a/libcurl_http_fetcher.cc b/libcurl_http_fetcher.cc index d39351c6..06722fd6 100644 --- a/libcurl_http_fetcher.cc +++ b/libcurl_http_fetcher.cc @@ -26,6 +26,7 @@ #include #include #include +#include #include #include @@ -392,6 +393,37 @@ void LibcurlHttpFetcher::SetHeader(const string& header_name, extra_headers_[base::ToLowerASCII(header_name)] = header_line; } +// Inputs: header_name, header_value +// Example: +// extra_headers_ = { {"foo":"foo: 123"}, {"bar":"bar:"} } +// string tmp = "gibberish"; +// Case 1: +// GetHeader("foo", &tmp) -> tmp = "123", return true. +// Case 2: +// GetHeader("bar", &tmp) -> tmp = "", return true. +// Case 3: +// GetHeader("moo", &tmp) -> tmp = "", return false. +bool LibcurlHttpFetcher::GetHeader(const string& header_name, + string* header_value) const { + // Initially clear |header_value| to handle both success and failures without + // leaving |header_value| in a unclear state. + header_value->clear(); + auto header_key = base::ToLowerASCII(header_name); + auto header_line_itr = extra_headers_.find(header_key); + // If the |header_name| was never set, indicate so by returning false. + if (header_line_itr == extra_headers_.end()) + return false; + // From |SetHeader()| the check for |header_name| to not include ":" is + // verified, so finding the first index of ":" is a safe operation. + auto header_line = header_line_itr->second; + *header_value = header_line.substr(header_line.find(':') + 1); + // The following is neccessary to remove the leading ' ' before the header + // value that was place only if |header_value| passed to |SetHeader()| was + // a non-empty string. + header_value->erase(0, 1); + return true; +} + void LibcurlHttpFetcher::CurlPerformOnce() { CHECK(transfer_in_progress_); int running_handles = 0; diff --git a/libcurl_http_fetcher.h b/libcurl_http_fetcher.h index 24103de1..3978b70d 100644 --- a/libcurl_http_fetcher.h +++ b/libcurl_http_fetcher.h @@ -61,6 +61,9 @@ class LibcurlHttpFetcher : public HttpFetcher { void SetHeader(const std::string& header_name, const std::string& header_value) override; + bool GetHeader(const std::string& header_name, + std::string* header_value) const override; + // Suspend the transfer by calling curl_easy_pause(CURLPAUSE_ALL). void Pause() override; diff --git a/libcurl_http_fetcher_unittest.cc b/libcurl_http_fetcher_unittest.cc new file mode 100644 index 00000000..88e48fa4 --- /dev/null +++ b/libcurl_http_fetcher_unittest.cc @@ -0,0 +1,81 @@ +// +// Copyright (C) 2019 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include "update_engine/libcurl_http_fetcher.h" + +#include + +#include +#include + +#include "update_engine/common/fake_hardware.h" +#include "update_engine/common/mock_proxy_resolver.h" + +using std::string; + +namespace chromeos_update_engine { + +namespace { +constexpr char kHeaderName[] = "X-Goog-Test-Header"; +} + +class LibcurlHttpFetcherTest : public ::testing::Test { + protected: + void SetUp() override { + loop_.SetAsCurrent(); + fake_hardware_.SetIsOfficialBuild(true); + fake_hardware_.SetIsOOBEEnabled(false); + } + + brillo::FakeMessageLoop loop_{nullptr}; + FakeHardware fake_hardware_; + LibcurlHttpFetcher libcurl_fetcher_{nullptr, &fake_hardware_}; +}; + +TEST_F(LibcurlHttpFetcherTest, GetEmptyHeaderValueTest) { + const string header_value = ""; + string actual_header_value; + libcurl_fetcher_.SetHeader(kHeaderName, header_value); + EXPECT_TRUE(libcurl_fetcher_.GetHeader(kHeaderName, &actual_header_value)); + EXPECT_EQ("", actual_header_value); +} + +TEST_F(LibcurlHttpFetcherTest, GetHeaderTest) { + const string header_value = "This-is-value 123"; + string actual_header_value; + libcurl_fetcher_.SetHeader(kHeaderName, header_value); + EXPECT_TRUE(libcurl_fetcher_.GetHeader(kHeaderName, &actual_header_value)); + EXPECT_EQ(header_value, actual_header_value); +} + +TEST_F(LibcurlHttpFetcherTest, GetNonExistentHeaderValueTest) { + string actual_header_value; + // Skip |SetHeaader()| call. + EXPECT_FALSE(libcurl_fetcher_.GetHeader(kHeaderName, &actual_header_value)); + // Even after a failed |GetHeaderValue()|, enforce that the passed pointer to + // modifiable string was cleared to be empty. + EXPECT_EQ("", actual_header_value); +} + +TEST_F(LibcurlHttpFetcherTest, GetHeaderEdgeCaseTest) { + const string header_value = "\a\b\t\v\f\r\\ edge:-case: \a\b\t\v\f\r\\"; + string actual_header_value; + libcurl_fetcher_.SetHeader(kHeaderName, header_value); + EXPECT_TRUE(libcurl_fetcher_.GetHeader(kHeaderName, &actual_header_value)); + EXPECT_EQ(header_value, actual_header_value); +} + +} // namespace chromeos_update_engine diff --git a/omaha_request_action.cc b/omaha_request_action.cc index 40e52f08..6c67a3b7 100644 --- a/omaha_request_action.cc +++ b/omaha_request_action.cc @@ -107,12 +107,6 @@ constexpr char kAttrStatus[] = "status"; constexpr char kValPostInstall[] = "postinstall"; constexpr char kValNoUpdate[] = "noupdate"; -// X-Goog-Update headers. -constexpr char kXGoogleUpdateInteractivity[] = "X-Goog-Update-Interactivity"; -constexpr char kXGoogleUpdateAppId[] = "X-Goog-Update-AppId"; -constexpr char kXGoogleUpdateUpdater[] = "X-Goog-Update-Updater"; -constexpr char kXGoogleUpdateSessionId[] = "X-Goog-Update-SessionId"; - // updatecheck attributes (without the underscore prefix). constexpr char kAttrEol[] = "eol"; constexpr char kAttrRollback[] = "rollback"; @@ -444,7 +438,6 @@ void OmahaRequestAction::PerformAction() { kXGoogleUpdateUpdater, base::StringPrintf( "%s-%s", constants::kOmahaUpdaterID, kOmahaUpdaterVersion)); - http_fetcher_->SetHeader(kXGoogleUpdateSessionId, session_id_); http_fetcher_->SetPostData( request_post.data(), request_post.size(), kHttpContentTypeTextXml); diff --git a/omaha_request_action_unittest.cc b/omaha_request_action_unittest.cc index e13d10e9..41b2520b 100644 --- a/omaha_request_action_unittest.cc +++ b/omaha_request_action_unittest.cc @@ -84,7 +84,6 @@ const char kCurrentVersion[] = "0.1.0.0"; const char kTestAppId[] = "test-app-id"; const char kTestAppId2[] = "test-app2-id"; const char kTestAppIdSkipUpdatecheck[] = "test-app-id-skip-updatecheck"; -const char kTestSessionId[] = "12341234-1234-1234-1234-1234123412341234"; // This is a helper struct to allow unit tests build an update response with the // values they care about. @@ -297,8 +296,6 @@ class OmahaRequestActionTestProcessorDelegate : public ActionProcessorDelegate { fetcher->GetHeader("X-Goog-Update-Interactivity")); EXPECT_EQ(kTestAppId, fetcher->GetHeader("X-Goog-Update-AppId")); EXPECT_NE("", fetcher->GetHeader("X-Goog-Update-Updater")); - EXPECT_EQ(kTestSessionId, - fetcher->GetHeader("X-Goog-Update-SessionId")); } post_data_ = fetcher->post_data(); } else if (action->Type() == @@ -370,7 +367,6 @@ class OmahaRequestActionTest : public ::testing::Test { .expected_check_result = metrics::CheckResult::kUpdateAvailable, .expected_check_reaction = metrics::CheckReaction::kUpdating, .expected_download_error_code = metrics::DownloadErrorCode::kUnset, - .session_id = kTestSessionId, }; } diff --git a/update_attempter.cc b/update_attempter.cc index 50aa9f42..b3cb3c36 100644 --- a/update_attempter.cc +++ b/update_attempter.cc @@ -683,6 +683,7 @@ void UpdateAttempter::BuildUpdateActions(bool interactive) { download_fetcher->set_server_to_check(ServerToCheck::kDownload); if (interactive) download_fetcher->set_max_retry_count(kDownloadMaxRetryCountInteractive); + download_fetcher->SetHeader(kXGoogleUpdateSessionId, session_id_); auto download_action = std::make_unique(prefs_, system_state_->boot_control(), diff --git a/update_attempter.h b/update_attempter.h index 1c3abe11..b0654d84 100644 --- a/update_attempter.h +++ b/update_attempter.h @@ -269,7 +269,6 @@ class UpdateAttempter : public ActionProcessorDelegate, FRIEND_TEST(UpdateAttempterTest, RollbackMetricsRollbackSuccess); FRIEND_TEST(UpdateAttempterTest, ScheduleErrorEventActionNoEventTest); FRIEND_TEST(UpdateAttempterTest, ScheduleErrorEventActionTest); - FRIEND_TEST(UpdateAttempterTest, SessionIdTestOnUpdateCheck); FRIEND_TEST(UpdateAttempterTest, SessionIdTestEnforceEmptyStrPingOmaha); FRIEND_TEST(UpdateAttempterTest, SessionIdTestOnOmahaRequestActions); FRIEND_TEST(UpdateAttempterTest, SetRollbackHappenedNotRollback); diff --git a/update_attempter_unittest.cc b/update_attempter_unittest.cc index 8a896dae..8b8e3902 100644 --- a/update_attempter_unittest.cc +++ b/update_attempter_unittest.cc @@ -31,6 +31,7 @@ #include #include +#include "update_engine/common/constants.h" #include "update_engine/common/dlcservice_interface.h" #include "update_engine/common/fake_clock.h" #include "update_engine/common/fake_prefs.h" @@ -43,6 +44,7 @@ #include "update_engine/common/test_utils.h" #include "update_engine/common/utils.h" #include "update_engine/fake_system_state.h" +#include "update_engine/libcurl_http_fetcher.h" #include "update_engine/mock_p2p_manager.h" #include "update_engine/mock_payload_state.h" #include "update_engine/mock_service_observer.h" @@ -195,6 +197,7 @@ class UpdateAttempterTest : public ::testing::Test { void SessionIdTestChange(); void SessionIdTestEnforceEmptyStrPingOmaha(); void SessionIdTestConsistencyInUpdateFlow(); + void SessionIdTestInDownloadAction(); void UpdateToQuickFixBuildStart(bool set_token); void ResetRollbackHappenedStart(bool is_consumer, bool is_policy_available, @@ -305,6 +308,32 @@ TEST_F(UpdateAttempterTest, SessionIdTestConsistencyInUpdateFlow) { loop_.Run(); } +void UpdateAttempterTest::SessionIdTestInDownloadAction() { + // The session ID passed into |DownloadAction|'s |LibcurlHttpFetcher| should + // be enforced to be included in the HTTP header as X-Goog-Update-SessionId. + string header_value; + auto CheckSessionIdInDownloadAction = [&header_value](AbstractAction* aa) { + if (aa->Type() == DownloadAction::StaticType()) { + DownloadAction* da = static_cast(aa); + EXPECT_TRUE(da->http_fetcher()->GetHeader(kXGoogleUpdateSessionId, + &header_value)); + } + }; + EXPECT_CALL(*processor_, EnqueueAction(Pointee(_))) + .WillRepeatedly(Invoke(CheckSessionIdInDownloadAction)); + attempter_.BuildUpdateActions(false); + // Validate that X-Goog-Update_SessionId is set correctly in HTTP Header. + EXPECT_EQ(attempter_.session_id_, header_value); + ScheduleQuitMainLoop(); +} + +TEST_F(UpdateAttempterTest, SessionIdTestInDownloadAction) { + loop_.PostTask(FROM_HERE, + base::Bind(&UpdateAttempterTest::SessionIdTestInDownloadAction, + base::Unretained(this))); + loop_.Run(); +} + TEST_F(UpdateAttempterTest, ActionCompletedDownloadTest) { unique_ptr fetcher(new MockHttpFetcher("", 0, nullptr)); fetcher->FailTransfer(503); // Sets the HTTP response code. From 18fff84dd7192e2454593ee94eac1cdc92dcc67d Mon Sep 17 00:00:00 2001 From: Askar Aitzhan Date: Fri, 21 Jun 2019 23:24:37 +0200 Subject: [PATCH 052/624] update_engine: move autoupdate_token to cohorthint attribute If policy DeviceQuickFixBuildToken is set and is not empty then set "cohorthint" attribute to the value of DeviceQuickFixBuildToken in update request. Takes precedence over kPrefsOmahaCohortHint pref. BUG=chromium:932465 TEST=./build_packages --board=amd64-generic && \ cros_run_unit_tests --board=amd64-generic --packages update_engine Change-Id: Ia4143b0854742ec22a535ce75b3b54a937a47b5a --- omaha_request_action_unittest.cc | 37 ++++++++++++++++++++++++++------ omaha_request_builder_xml.cc | 33 ++++++++++++++++------------ omaha_request_builder_xml.h | 3 ++- 3 files changed, 51 insertions(+), 22 deletions(-) diff --git a/omaha_request_action_unittest.cc b/omaha_request_action_unittest.cc index 41b2520b..8008e008 100644 --- a/omaha_request_action_unittest.cc +++ b/omaha_request_action_unittest.cc @@ -1471,7 +1471,6 @@ TEST_F(OmahaRequestActionTest, XmlEncodeIsUsedForParams) { request_params_.set_current_channel("unittest_track<"); request_params_.set_target_channel("unittest_track<"); request_params_.set_hwid(""); - request_params_.set_autoupdate_token("autoupdate_token>"); fake_prefs_.SetString(kPrefsOmahaCohort, "evil\nstring"); fake_prefs_.SetString(kPrefsOmahaCohortHint, "evil&string\\"); fake_prefs_.SetString( @@ -1498,8 +1497,6 @@ TEST_F(OmahaRequestActionTest, XmlEncodeIsUsedForParams) { // Values from Prefs that are too big are removed from the XML instead of // encoded. EXPECT_EQ(string::npos, post_str.find("cohortname=")); - EXPECT_NE(string::npos, post_str.find("autoupdate_token>")); - EXPECT_EQ(string::npos, post_str.find("autoupdate_token>")); } TEST_F(OmahaRequestActionTest, XmlDecodeTest) { @@ -1706,18 +1703,44 @@ TEST_F(OmahaRequestActionTest, OmahaEventTest) { } TEST_F(OmahaRequestActionTest, DeviceQuickFixBuildTokenIsSetTest) { - constexpr char autoupdate_token[] = "autoupdate_token"; + // If DeviceQuickFixBuildToken value is set it takes precedence over pref + // value. + constexpr char autoupdate_token[] = "autoupdate_token>"; + constexpr char xml_encoded_autoupdate_token[] = "autoupdate_token>"; + constexpr char omaha_cohort_hint[] = "cohort_hint"; tuc_params_.http_response = fake_update_response_.GetNoUpdateResponse(); tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable; tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; request_params_.set_autoupdate_token(autoupdate_token); + fake_prefs_.SetString(kPrefsOmahaCohortHint, omaha_cohort_hint); ASSERT_TRUE(TestUpdateCheck()); - EXPECT_NE(post_str.find(" \n"), - string::npos); + EXPECT_NE(string::npos, + post_str.find("cohorthint=\"" + + string(xml_encoded_autoupdate_token) + "\"")); + EXPECT_EQ(string::npos, post_str.find(autoupdate_token)); + EXPECT_EQ(string::npos, post_str.find(omaha_cohort_hint)); +} + +TEST_F(OmahaRequestActionTest, DeviceQuickFixBuildTokenIsNotSetTest) { + // If DeviceQuickFixBuildToken is not set, pref value will be provided in + // cohorthint attribute. + constexpr char omaha_cohort_hint[] = "evil_string>"; + constexpr char xml_encoded_cohort_hint[] = "evil_string>"; + + tuc_params_.http_response = fake_update_response_.GetNoUpdateResponse(); + tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; + fake_prefs_.SetString(kPrefsOmahaCohortHint, omaha_cohort_hint); + + ASSERT_TRUE(TestUpdateCheck()); + + EXPECT_NE( + string::npos, + post_str.find("cohorthint=\"" + string(xml_encoded_cohort_hint) + "\"")); + EXPECT_EQ(string::npos, post_str.find(omaha_cohort_hint)); } void OmahaRequestActionTest::PingTest(bool ping_only) { diff --git a/omaha_request_builder_xml.cc b/omaha_request_builder_xml.cc index 3e4a3359..95fb1837 100644 --- a/omaha_request_builder_xml.cc +++ b/omaha_request_builder_xml.cc @@ -118,12 +118,6 @@ string OmahaRequestBuilderXml::GetAppBody(bool skip_updatecheck) const { app_body += " rollback_allowed=\"true\""; } } - string autoupdate_token = params_->autoupdate_token(); - if (!autoupdate_token.empty()) { - app_body += base::StringPrintf( - " token=\"%s\"", XmlEncodeWithDefault(autoupdate_token).c_str()); - } - app_body += ">\n"; } @@ -172,14 +166,20 @@ string OmahaRequestBuilderXml::GetAppBody(bool skip_updatecheck) const { } string OmahaRequestBuilderXml::GetCohortArg(const string arg_name, - const string prefs_key) const { - // There's nothing wrong with not having a given cohort setting, so we check - // existence first to avoid the warning log message. - if (!prefs_->Exists(prefs_key)) - return ""; + const string prefs_key, + const string override_value) const { string cohort_value; - if (!prefs_->GetString(prefs_key, &cohort_value) || cohort_value.empty()) - return ""; + if (!override_value.empty()) { + // |override_value| take precedence over pref value. + cohort_value = override_value; + } else { + // There's nothing wrong with not having a given cohort setting, so we check + // existence first to avoid the warning log message. + if (!prefs_->Exists(prefs_key)) + return ""; + if (!prefs_->GetString(prefs_key, &cohort_value) || cohort_value.empty()) + return ""; + } // This is a sanity check to avoid sending a huge XML file back to Ohama due // to a compromised stateful partition making the update check fail in low // network environments envent after a reboot. @@ -246,9 +246,14 @@ string OmahaRequestBuilderXml::GetApp(const OmahaAppData& app_data) const { string app_cohort_args; app_cohort_args += GetCohortArg("cohort", kPrefsOmahaCohort); - app_cohort_args += GetCohortArg("cohorthint", kPrefsOmahaCohortHint); app_cohort_args += GetCohortArg("cohortname", kPrefsOmahaCohortName); + // Policy provided value overrides pref. + string autoupdate_token = params_->autoupdate_token(); + app_cohort_args += GetCohortArg("cohorthint", + kPrefsOmahaCohortHint, + autoupdate_token /* override_value */); + string fingerprint_arg; if (!params_->os_build_fingerprint().empty()) { fingerprint_arg = "fingerprint=\"" + diff --git a/omaha_request_builder_xml.h b/omaha_request_builder_xml.h index 0ba44b88..495ddd7c 100644 --- a/omaha_request_builder_xml.h +++ b/omaha_request_builder_xml.h @@ -161,7 +161,8 @@ class OmahaRequestBuilderXml : OmahaRequestBuilder { // |arg_name| and |prefs_key|, if any. The return value is suitable to // concatenate to the list of arguments and includes a space at the end. std::string GetCohortArg(const std::string arg_name, - const std::string prefs_key) const; + const std::string prefs_key, + const std::string override_value = "") const; // Returns an XML ping element if any of the elapsed days need to be // sent, or an empty string otherwise. From 7999ef8089454c51e8ed84e7d58db0f6592d1227 Mon Sep 17 00:00:00 2001 From: Curtis Malainey Date: Tue, 2 Jul 2019 15:25:12 -0700 Subject: [PATCH 053/624] update_engine: increase logging on config detection failure In the event the partition version cannot be detected we were logging it as if it had been detected but defaulting back to a fallback value. We should log the failure to find the config. BUG=chromium:971174 TEST=run cros_generate_update_payload --image chromiumos_base_image.bin --src_image chromiumos_base_image.bin --output output on a known bad image Change-Id: I5389134e809217e5134bdfb8d16b32a43aeb3f9f --- payload_generator/generate_delta_main.cc | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/payload_generator/generate_delta_main.cc b/payload_generator/generate_delta_main.cc index 10ae2a05..ddb9a355 100644 --- a/payload_generator/generate_delta_main.cc +++ b/payload_generator/generate_delta_main.cc @@ -645,17 +645,25 @@ int Main(int argc, char** argv) { payload_config.version.minor = kInPlaceMinorPayloadVersion; brillo::KeyValueStore store; uint32_t minor_version; + bool minor_version_found = false; for (const PartitionConfig& part : payload_config.source.partitions) { if (part.fs_interface && part.fs_interface->LoadSettings(&store) && utils::GetMinorVersion(store, &minor_version)) { payload_config.version.minor = minor_version; + minor_version_found = true; + LOG(INFO) << "Auto-detected minor_version=" + << payload_config.version.minor; break; } } + LOG_IF(WARNING, !minor_version_found) + << "Failed to detect minor version defaulting to minor_version=" + << payload_config.version.minor; } else { payload_config.version.minor = kFullPayloadMinorVersion; + LOG(INFO) << "Using non-delta minor_version=" + << payload_config.version.minor; } - LOG(INFO) << "Auto-detected minor_version=" << payload_config.version.minor; } else { payload_config.version.minor = FLAGS_minor_version; LOG(INFO) << "Using provided minor_version=" << FLAGS_minor_version; From eb463eef3769467d0a09d99aebb3300109ec912c Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Thu, 20 Jun 2019 19:23:03 -0700 Subject: [PATCH 054/624] update_engine: Add newer DBus method and signal for GetStatus The current GetStatus function is pretty non-extendable and there has been use cases where we wanted to add arguments to it but it was quite hard to do specially changes in Chrome. This CL adds a new DBus Method GetStatusAdvanced and Signal UpdateStatusAdvanced which use a protobuf for communicating dbus messages. This allows us to extend the protobuf without much effort in the future. BUG=chromium:977320 TEST=unittests, precq Cq-Depend: chromium:1672684, chrome-internal:1424559 Change-Id: Ia93ed189e7561ca18c63b5ded81826bc9b1cff12 Reviewed-on: https://chromium-review.googlesource.com/1669974 Tested-by: Amin Hassani Commit-Ready: Amin Hassani Legacy-Commit-Queue: Commit Bot Reviewed-by: Amin Hassani --- UpdateEngine.conf | 24 ++++++ client_library/client_dbus.cc | 69 ++++++++------- client_library/client_dbus.h | 9 +- client_library/include/update_engine/client.h | 3 + .../update_engine/status_update_handler.h | 9 +- ...rg.chromium.UpdateEngineInterface.dbus-xml | 35 ++++++++ dbus_service.cc | 42 ++++++++-- dbus_service.h | 6 ++ update_engine_client.cc | 83 ++++++------------- 9 files changed, 174 insertions(+), 106 deletions(-) diff --git a/UpdateEngine.conf b/UpdateEngine.conf index 94900969..42f73fc3 100644 --- a/UpdateEngine.conf +++ b/UpdateEngine.conf @@ -1,5 +1,20 @@ + @@ -27,6 +42,9 @@ + @@ -75,11 +93,17 @@ + + diff --git a/client_library/client_dbus.cc b/client_library/client_dbus.cc index 809ad13a..48a563b9 100644 --- a/client_library/client_dbus.cc +++ b/client_library/client_dbus.cc @@ -45,6 +45,19 @@ unique_ptr UpdateEngineClient::CreateInstance() { namespace internal { +namespace { +// This converts the status from Protobuf |StatusResult| to The internal +// |UpdateEngineStatus| struct. +bool ConvertToUpdateEngineStatus(const StatusResult& status, + UpdateEngineStatus* out_status) { + out_status->last_checked_time = status.last_checked_time(); + out_status->progress = status.progress(); + out_status->new_version = status.new_version(); + out_status->new_size_bytes = status.new_size(); + return StringToUpdateStatus(status.current_operation(), &out_status->status); +} +} // namespace + bool DBusUpdateEngineClient::Init() { Bus::Options options; options.bus_type = Bus::SYSTEM; @@ -93,18 +106,25 @@ bool DBusUpdateEngineClient::GetStatus(int64_t* out_last_checked_time, UpdateStatus* out_update_status, string* out_new_version, int64_t* out_new_size) const { - string status_as_string; - const bool success = proxy_->GetStatus(out_last_checked_time, - out_progress, - &status_as_string, - out_new_version, - out_new_size, - nullptr); - if (!success) { + StatusResult status; + if (!proxy_->GetStatusAdvanced(&status, nullptr)) { + return false; + } + + *out_last_checked_time = status.last_checked_time(); + *out_progress = status.progress(); + *out_new_version = status.new_version(); + *out_new_size = status.new_size(); + return StringToUpdateStatus(status.current_operation(), out_update_status); +} + +bool DBusUpdateEngineClient::GetStatus(UpdateEngineStatus* out_status) const { + StatusResult status; + if (!proxy_->GetStatusAdvanced(&status, nullptr)) { return false; } - return StringToUpdateStatus(status_as_string, out_update_status); + return ConvertToUpdateEngineStatus(status, out_status); } bool DBusUpdateEngineClient::SetCohortHint(const string& cohort_hint) { @@ -173,40 +193,25 @@ void DBusUpdateEngineClient::DBusStatusHandlersRegistered( void DBusUpdateEngineClient::StatusUpdateHandlersRegistered( StatusUpdateHandler* handler) const { - int64_t last_checked_time; - double progress; - UpdateStatus update_status; - string new_version; - int64_t new_size; - - if (!GetStatus(&last_checked_time, - &progress, - &update_status, - &new_version, - &new_size)) { + UpdateEngineStatus status; + if (!GetStatus(&status)) { handler->IPCError("Could not query current status"); return; } std::vector just_handler = {handler}; for (auto h : handler ? just_handler : handlers_) { - h->HandleStatusUpdate( - last_checked_time, progress, update_status, new_version, new_size); + h->HandleStatusUpdate(status); } } void DBusUpdateEngineClient::RunStatusUpdateHandlers( - int64_t last_checked_time, - double progress, - const string& current_operation, - const string& new_version, - int64_t new_size) { - UpdateStatus status; - StringToUpdateStatus(current_operation, &status); + const StatusResult& status) { + UpdateEngineStatus ue_status; + ConvertToUpdateEngineStatus(status, &ue_status); for (auto handler : handlers_) { - handler->HandleStatusUpdate( - last_checked_time, progress, status, new_version, new_size); + handler->HandleStatusUpdate(ue_status); } } @@ -235,7 +240,7 @@ bool DBusUpdateEngineClient::RegisterStatusUpdateHandler( return true; } - proxy_->RegisterStatusUpdateSignalHandler( + proxy_->RegisterStatusUpdateAdvancedSignalHandler( base::Bind(&DBusUpdateEngineClient::RunStatusUpdateHandlers, base::Unretained(this)), base::Bind(&DBusUpdateEngineClient::DBusStatusHandlersRegistered, diff --git a/client_library/client_dbus.h b/client_library/client_dbus.h index a186d45d..1b127e32 100644 --- a/client_library/client_dbus.h +++ b/client_library/client_dbus.h @@ -23,6 +23,7 @@ #include #include +#include #include "update_engine/client_library/include/update_engine/client.h" #include "update_engine/dbus-proxies.h" @@ -50,6 +51,8 @@ class DBusUpdateEngineClient : public UpdateEngineClient { std::string* out_new_version, int64_t* out_new_size) const override; + bool GetStatus(UpdateEngineStatus* out_status) const override; + bool SetCohortHint(const std::string& cohort_hint) override; bool GetCohortHint(std::string* cohort_hint) const override; @@ -93,11 +96,7 @@ class DBusUpdateEngineClient : public UpdateEngineClient { // registered handlers receive the event. void StatusUpdateHandlersRegistered(StatusUpdateHandler* handler) const; - void RunStatusUpdateHandlers(int64_t last_checked_time, - double progress, - const std::string& current_operation, - const std::string& new_version, - int64_t new_size); + void RunStatusUpdateHandlers(const StatusResult& status); std::unique_ptr proxy_; std::vector handlers_; diff --git a/client_library/include/update_engine/client.h b/client_library/include/update_engine/client.h index 1bc61116..89f36af6 100644 --- a/client_library/include/update_engine/client.h +++ b/client_library/include/update_engine/client.h @@ -80,6 +80,9 @@ class UpdateEngineClient { std::string* out_new_version, int64_t* out_new_size) const = 0; + // Same as above but return the entire struct instead. + virtual bool GetStatus(UpdateEngineStatus* out_status) const = 0; + // Getter and setter for the cohort hint. virtual bool SetCohortHint(const std::string& cohort_hint) = 0; virtual bool GetCohortHint(std::string* cohort_hint) const = 0; diff --git a/client_library/include/update_engine/status_update_handler.h b/client_library/include/update_engine/status_update_handler.h index d2fad34c..238f6bda 100644 --- a/client_library/include/update_engine/status_update_handler.h +++ b/client_library/include/update_engine/status_update_handler.h @@ -14,7 +14,9 @@ // limitations under the License. // +// NOLINTNEXTLINE(whitespace/line_length) #ifndef UPDATE_ENGINE_CLIENT_LIBRARY_INCLUDE_UPDATE_ENGINE_STATUS_UPDATE_HANDLER_H_ +// NOLINTNEXTLINE(whitespace/line_length) #define UPDATE_ENGINE_CLIENT_LIBRARY_INCLUDE_UPDATE_ENGINE_STATUS_UPDATE_HANDLER_H_ #include @@ -35,13 +37,10 @@ class StatusUpdateHandler { virtual void IPCError(const std::string& error) = 0; // Runs every time update_engine reports a status change. - virtual void HandleStatusUpdate(int64_t last_checked_time, - double progress, - UpdateStatus current_operation, - const std::string& new_version, - int64_t new_size) = 0; + virtual void HandleStatusUpdate(const UpdateEngineStatus& status) = 0; }; } // namespace update_engine +// NOLINTNEXTLINE(whitespace/line_length) #endif // UPDATE_ENGINE_CLIENT_LIBRARY_INCLUDE_UPDATE_ENGINE_STATUS_UPDATE_HANDLER_H_ diff --git a/dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml b/dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml index f81d4ed1..ef7bea75 100644 --- a/dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml +++ b/dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml @@ -1,4 +1,19 @@ + + + + + + The current status serialized in a protobuf. + + + + @@ -81,12 +106,22 @@ + + + + + The current status serialized in a protobuf. + + + + diff --git a/dbus_service.cc b/dbus_service.cc index 2a5662f3..105d5816 100644 --- a/dbus_service.cc +++ b/dbus_service.cc @@ -23,6 +23,7 @@ #include #include "update_engine/dbus_connection.h" +#include "update_engine/proto_bindings/update_engine.pb.h" #include "update_engine/update_status_utils.h" namespace chromeos_update_engine { @@ -31,8 +32,21 @@ using brillo::ErrorPtr; using chromeos_update_engine::UpdateEngineService; using std::string; using std::vector; +using update_engine::StatusResult; using update_engine::UpdateEngineStatus; +namespace { +// Converts the internal |UpdateEngineStatus| to the protobuf |StatusResult|. +void ConvertToStatusResult(const UpdateEngineStatus& ue_status, + StatusResult* out_status) { + out_status->set_last_checked_time(ue_status.last_checked_time); + out_status->set_progress(ue_status.progress); + out_status->set_current_operation(UpdateStatusToString(ue_status.status)); + out_status->set_new_version(ue_status.new_version); + out_status->set_new_size(ue_status.new_size_bytes); +} +} // namespace + DBusUpdateEngineService::DBusUpdateEngineService(SystemState* system_state) : common_(new UpdateEngineService{system_state}) {} @@ -116,6 +130,17 @@ bool DBusUpdateEngineService::GetStatus(ErrorPtr* error, return true; } +bool DBusUpdateEngineService::GetStatusAdvanced(ErrorPtr* error, + StatusResult* out_status) { + UpdateEngineStatus status; + if (!common_->GetStatus(error, &status)) { + return false; + } + + ConvertToStatusResult(status, out_status); + return true; +} + bool DBusUpdateEngineService::RebootIfNeeded(ErrorPtr* error) { return common_->RebootIfNeeded(error); } @@ -216,11 +241,18 @@ bool UpdateEngineAdaptor::RequestOwnership() { void UpdateEngineAdaptor::SendStatusUpdate( const UpdateEngineStatus& update_engine_status) { - SendStatusUpdateSignal(update_engine_status.last_checked_time, - update_engine_status.progress, - UpdateStatusToString(update_engine_status.status), - update_engine_status.new_version, - update_engine_status.new_size_bytes); + StatusResult status; + ConvertToStatusResult(update_engine_status, &status); + + // TODO(crbug.com/977320): Deprecate |StatusUpdate| signal. + SendStatusUpdateSignal(status.last_checked_time(), + status.progress(), + status.current_operation(), + status.new_version(), + status.new_size()); + + // Send |StatusUpdateAdvanced| signal. + SendStatusUpdateAdvancedSignal(status); } } // namespace chromeos_update_engine diff --git a/dbus_service.h b/dbus_service.h index 134461bd..71a6d2b6 100644 --- a/dbus_service.h +++ b/dbus_service.h @@ -24,6 +24,7 @@ #include #include +#include #include "update_engine/common_service.h" #include "update_engine/service_observer_interface.h" @@ -72,6 +73,11 @@ class DBusUpdateEngineService std::string* out_new_version, int64_t* out_new_size) override; + // Similar to Above, but returns a protobuffer instead. In the future it will + // have more features and is easily extendable. + bool GetStatusAdvanced(brillo::ErrorPtr* error, + update_engine::StatusResult* out_status) override; + // Reboots the device if an update is applied and a reboot is required. bool RebootIfNeeded(brillo::ErrorPtr* error) override; diff --git a/update_engine_client.cc b/update_engine_client.cc index d1b22678..1b680d12 100644 --- a/update_engine_client.cc +++ b/update_engine_client.cc @@ -46,6 +46,7 @@ using chromeos_update_engine::utils::ErrorCodeToString; using std::string; using std::unique_ptr; using std::vector; +using update_engine::UpdateEngineStatus; using update_engine::UpdateStatus; namespace { @@ -132,42 +133,24 @@ class WatchingStatusUpdateHandler : public ExitingStatusUpdateHandler { public: ~WatchingStatusUpdateHandler() override = default; - void HandleStatusUpdate(int64_t last_checked_time, - double progress, - UpdateStatus current_operation, - const string& new_version, - int64_t new_size) override; + void HandleStatusUpdate(const UpdateEngineStatus& status) override; }; void WatchingStatusUpdateHandler::HandleStatusUpdate( - int64_t last_checked_time, - double progress, - UpdateStatus current_operation, - const string& new_version, - int64_t new_size) { + const UpdateEngineStatus& status) { LOG(INFO) << "Got status update:"; - LOG(INFO) << " last_checked_time: " << last_checked_time; - LOG(INFO) << " progress: " << progress; - LOG(INFO) << " current_operation: " - << UpdateStatusToString(current_operation); - LOG(INFO) << " new_version: " << new_version; - LOG(INFO) << " new_size: " << new_size; + LOG(INFO) << " last_checked_time: " << status.last_checked_time; + LOG(INFO) << " progress: " << status.progress; + LOG(INFO) << " current_operation: " << UpdateStatusToString(status.status); + LOG(INFO) << " new_version: " << status.new_version; + LOG(INFO) << " new_size: " << status.new_size_bytes; } bool UpdateEngineClient::ShowStatus() { - int64_t last_checked_time = 0; - double progress = 0.0; - UpdateStatus current_op; - string new_version; - int64_t new_size = 0; - + UpdateEngineStatus status; int retry_count = kShowStatusRetryCount; while (retry_count > 0) { - if (client_->GetStatus(&last_checked_time, - &progress, - ¤t_op, - &new_version, - &new_size)) { + if (client_->GetStatus(&status)) { break; } if (--retry_count == 0) { @@ -181,31 +164,22 @@ bool UpdateEngineClient::ShowStatus() { printf("LAST_CHECKED_TIME=%" PRIi64 "\nPROGRESS=%f\nCURRENT_OP=%s\n" "NEW_VERSION=%s\nNEW_SIZE=%" PRIi64 "\n", - last_checked_time, - progress, - UpdateStatusToString(current_op), - new_version.c_str(), - new_size); + status.last_checked_time, + status.progress, + UpdateStatusToString(status.status), + status.new_version.c_str(), + status.new_size_bytes); return true; } int UpdateEngineClient::GetNeedReboot() { - int64_t last_checked_time = 0; - double progress = 0.0; - UpdateStatus current_op; - string new_version; - int64_t new_size = 0; - - if (!client_->GetStatus(&last_checked_time, - &progress, - ¤t_op, - &new_version, - &new_size)) { + UpdateEngineStatus status; + if (!client_->GetStatus(&status)) { return 1; } - if (current_op == UpdateStatus::UPDATED_NEED_REBOOT) { + if (status.status == UpdateStatus::UPDATED_NEED_REBOOT) { return 0; } @@ -220,35 +194,26 @@ class UpdateWaitHandler : public ExitingStatusUpdateHandler { ~UpdateWaitHandler() override = default; - void HandleStatusUpdate(int64_t last_checked_time, - double progress, - UpdateStatus current_operation, - const string& new_version, - int64_t new_size) override; + void HandleStatusUpdate(const UpdateEngineStatus& status) override; private: bool exit_on_error_; update_engine::UpdateEngineClient* client_; }; -void UpdateWaitHandler::HandleStatusUpdate(int64_t /* last_checked_time */, - double /* progress */, - UpdateStatus current_operation, - const string& /* new_version */, - int64_t /* new_size */) { - if (exit_on_error_ && current_operation == UpdateStatus::IDLE) { +void UpdateWaitHandler::HandleStatusUpdate(const UpdateEngineStatus& status) { + if (exit_on_error_ && status.status == UpdateStatus::IDLE) { int last_attempt_error; ErrorCode code = ErrorCode::kSuccess; if (client_ && client_->GetLastAttemptError(&last_attempt_error)) code = static_cast(last_attempt_error); LOG(ERROR) << "Update failed, current operation is " - << UpdateStatusToString(current_operation) - << ", last error code is " << ErrorCodeToString(code) << "(" - << last_attempt_error << ")"; + << UpdateStatusToString(status.status) << ", last error code is " + << ErrorCodeToString(code) << "(" << last_attempt_error << ")"; exit(1); } - if (current_operation == UpdateStatus::UPDATED_NEED_REBOOT) { + if (status.status == UpdateStatus::UPDATED_NEED_REBOOT) { LOG(INFO) << "Update succeeded -- reboot needed."; exit(0); } From 3bab177b61d37254c332c4febb11e30bd46aae71 Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Fri, 21 Jun 2019 14:58:25 -0700 Subject: [PATCH 055/624] update_engine: Use DBus protobuf capabilities in dlcservice API methods DBus have the capability to pass protobuf message directly in the method without the need for serializing it manually. This CL uses those types of arguments which is almost used everywhere in the platform2 code base. BUG=chromium:978672 TEST=precq Cq-Depend: chromium:1672684 Change-Id: I6e34ee76ede0c7b0b8cd1bba603a5836743d67fc Reviewed-on: https://chromium-review.googlesource.com/1672021 Tested-by: Amin Hassani Commit-Ready: Amin Hassani Legacy-Commit-Queue: Commit Bot Reviewed-by: Amin Hassani --- client_library/client_dbus.cc | 9 ++------- client_library/client_dbus.h | 1 + .../org.chromium.UpdateEngineInterface.dbus-xml | 8 +++++++- dbus_service.cc | 17 +++++------------ dbus_service.h | 3 ++- dlcservice_chromeos.cc | 14 ++++++-------- 6 files changed, 23 insertions(+), 29 deletions(-) diff --git a/client_library/client_dbus.cc b/client_library/client_dbus.cc index 48a563b9..18ae23b7 100644 --- a/client_library/client_dbus.cc +++ b/client_library/client_dbus.cc @@ -92,13 +92,8 @@ bool DBusUpdateEngineClient::AttemptInstall(const string& omaha_url, dlc_parameters.add_dlc_module_infos(); dlc_module_info->set_dlc_id(dlc_id); } - string dlc_request; - if (dlc_parameters.SerializeToString(&dlc_request)) { - return proxy_->AttemptInstall(dlc_request, nullptr /* brillo::ErrorPtr* */); - } else { - LOG(ERROR) << "Fail to serialize a protobuf to a string."; - return false; - } + return proxy_->AttemptInstall(dlc_parameters, + nullptr /* brillo::ErrorPtr* */); } bool DBusUpdateEngineClient::GetStatus(int64_t* out_last_checked_time, diff --git a/client_library/client_dbus.h b/client_library/client_dbus.h index 1b127e32..c9631cf7 100644 --- a/client_library/client_dbus.h +++ b/client_library/client_dbus.h @@ -23,6 +23,7 @@ #include #include +#include #include #include "update_engine/client_library/include/update_engine/client.h" diff --git a/dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml b/dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml index ef7bea75..a1831476 100644 --- a/dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml +++ b/dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml @@ -35,7 +35,13 @@ - + + + The information about DLC modules that needs to be installed. + + + diff --git a/dbus_service.cc b/dbus_service.cc index 105d5816..4e372212 100644 --- a/dbus_service.cc +++ b/dbus_service.cc @@ -30,6 +30,7 @@ namespace chromeos_update_engine { using brillo::ErrorPtr; using chromeos_update_engine::UpdateEngineService; +using dlcservice::DlcModuleList; using std::string; using std::vector; using update_engine::StatusResult; @@ -77,25 +78,17 @@ bool DBusUpdateEngineService::AttemptUpdateWithFlags( } bool DBusUpdateEngineService::AttemptInstall(ErrorPtr* error, - const string& dlc_request) { - // Parse the raw parameters into protobuf. - dlcservice::DlcModuleList dlc_parameters; - if (!dlc_parameters.ParseFromString(dlc_request)) { - *error = brillo::Error::Create( - FROM_HERE, "update_engine", "INTERNAL", "parameters are invalid."); - return false; - } - // Extract fields from the protobuf. + const DlcModuleList& request) { vector dlc_ids; - for (const auto& dlc_module_info : dlc_parameters.dlc_module_infos()) { + for (const auto& dlc_module_info : request.dlc_module_infos()) { if (dlc_module_info.dlc_id().empty()) { *error = brillo::Error::Create( - FROM_HERE, "update_engine", "INTERNAL", "parameters are invalid."); + FROM_HERE, "update_engine", "INTERNAL", "Empty DLC ID passed."); return false; } dlc_ids.push_back(dlc_module_info.dlc_id()); } - return common_->AttemptInstall(error, dlc_parameters.omaha_url(), dlc_ids); + return common_->AttemptInstall(error, request.omaha_url(), dlc_ids); } bool DBusUpdateEngineService::AttemptRollback(ErrorPtr* error, diff --git a/dbus_service.h b/dbus_service.h index 71a6d2b6..2babf8c7 100644 --- a/dbus_service.h +++ b/dbus_service.h @@ -24,6 +24,7 @@ #include #include +#include #include #include "update_engine/common_service.h" @@ -51,7 +52,7 @@ class DBusUpdateEngineService int32_t in_flags_as_int) override; bool AttemptInstall(brillo::ErrorPtr* error, - const std::string& dlc_request) override; + const dlcservice::DlcModuleList& request) override; bool AttemptRollback(brillo::ErrorPtr* error, bool in_powerwash) override; diff --git a/dlcservice_chromeos.cc b/dlcservice_chromeos.cc index e95f08ff..b7dee360 100644 --- a/dlcservice_chromeos.cc +++ b/dlcservice_chromeos.cc @@ -16,11 +16,13 @@ #include "update_engine/dlcservice_chromeos.h" -#include #include +// NOLINTNEXTLINE(build/include_alpha) "dbus-proxies.h" needs "dlcservice.pb.h" +#include #include "update_engine/dbus_connection.h" +using dlcservice::DlcModuleList; using std::string; using std::vector; @@ -35,14 +37,10 @@ bool DlcServiceChromeOS::GetInstalled(vector* dlc_module_ids) { return false; org::chromium::DlcServiceInterfaceProxy dlcservice_proxy( DBusConnection::Get()->GetDBus()); - string dlc_module_list_str; - if (!dlcservice_proxy.GetInstalled(&dlc_module_list_str, nullptr)) { - LOG(ERROR) << "dlcservice does not return installed DLC module list."; - return false; - } + dlcservice::DlcModuleList dlc_module_list; - if (!dlc_module_list.ParseFromString(dlc_module_list_str)) { - LOG(ERROR) << "Errors parsing DlcModuleList protobuf."; + if (!dlcservice_proxy.GetInstalled(&dlc_module_list, nullptr)) { + LOG(ERROR) << "dlcservice does not return installed DLC module list."; return false; } for (const auto& dlc_module_info : dlc_module_list.dlc_module_infos()) { From 0ffa71f7f7feede3d881cfb377895df956ebb621 Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Mon, 8 Jul 2019 10:25:54 -0700 Subject: [PATCH 056/624] update_engine: fix BUILD.gn lint errors Via: gn format BUILD.gn BUG=none TEST=presubmit Change-Id: I64b028bfffe5c9aaac6086466d08602785d979e3 --- BUILD.gn | 3 +++ 1 file changed, 3 insertions(+) diff --git a/BUILD.gn b/BUILD.gn index 224ad450..51a4ae00 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -436,6 +436,7 @@ if (use.test) { "common/http_common.cc", "test_http_server.cc", ] + # //common-mk:test should be on the top. # TODO(crbug.com/887845): Remove this after library odering issue is fixed. configs += [ @@ -449,6 +450,7 @@ if (use.test) { sources = [ "test_subprocess.cc", ] + # //common-mk:test should be on the top. # TODO(crbug.com/887845): Remove this after library odering issue is fixed. configs += [ @@ -542,6 +544,7 @@ if (use.test) { "update_manager/variable_unittest.cc", "update_manager/weekly_time_unittest.cc", ] + # //common-mk:test should be on the top. # TODO(crbug.com/887845): Remove this after library odering issue is fixed. configs += [ From 53a42e847ca184c5caca4e39445bd0bb26b4d1a6 Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Mon, 8 Jul 2019 10:28:29 -0700 Subject: [PATCH 057/624] update_engine: prettify presubmit hooks We can name these, so it's clearer what hook is running when. 'cros' should also be on everyone's ${PATH}, so no need for the relative paths. BUG=none TEST=presubmit Change-Id: If900a293db7ebd775ffe15403df0a0f2b05dfd8c --- PRESUBMIT.cfg | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/PRESUBMIT.cfg b/PRESUBMIT.cfg index 7256e542..42156b3e 100644 --- a/PRESUBMIT.cfg +++ b/PRESUBMIT.cfg @@ -1,6 +1,6 @@ [Hook Scripts] -hook0=../../../../chromite/bin/cros lint ${PRESUBMIT_FILES} -hook1=../../../platform2/common-mk/gnlint.py ${PRESUBMIT_FILES} +cros lint = cros lint ${PRESUBMIT_FILES} +gnlint = ../../../platform2/common-mk/gnlint.py ${PRESUBMIT_FILES} [Hook Overrides] clang_format_check: true From 504c3cb7e37d0ad1adbe6c08d4156a0fadb7aa07 Mon Sep 17 00:00:00 2001 From: Jae Hoon Kim Date: Tue, 2 Jul 2019 11:17:24 -0700 Subject: [PATCH 058/624] update_engine: Template specialized UpdateManager interface for mockability This change is to create an interface that is for testing |UpdateManager|. Currently the |UpdateMaanger| exists by having two main member functions that are templatized allowing for a generic set of policy methods to be passed in taking a abitrary set of arguments. The downside of this design is the difficulty when testing such a class. Next steps are to refactor |Policy| and |UpdateManager| to exist together without the need for templatized member function within |UpdateManager| as the whole set of policy methods that can be passed in are already determined (UpdateCheckAllowed, UpdateCanBeApplied, UpdateCanStart, UpdateDownloadAllowed, P2PEnabled, P2PEnabledChanged). The issue is that these functions each take a different set of arguments and can probably be combined into one generic set of arguments making the |UpdateManager| much simpler to manage. BUG=chromium:924165 TEST=unittests TEST=FEATURES="test" emerge-${BOARD} update_engine Change-Id: Ia8091495079f9324bccf5e717d5f26ea7ef24514 --- update_attempter.cc | 3 +- update_attempter_unittest.cc | 4 +++ update_manager/mock_update_manager.h | 44 ++++++++++++++++++++++++++++ update_manager/update_manager.cc | 7 +++++ update_manager/update_manager.h | 29 +++++++++++++++++- 5 files changed, 85 insertions(+), 2 deletions(-) create mode 100644 update_manager/mock_update_manager.h diff --git a/update_attempter.cc b/update_attempter.cc index b3cb3c36..8df8b614 100644 --- a/update_attempter.cc +++ b/update_attempter.cc @@ -163,7 +163,8 @@ bool UpdateAttempter::ScheduleUpdates() { Bind(&UpdateAttempter::OnUpdateScheduled, base::Unretained(this)); // We limit the async policy request to a reasonably short time, to avoid a // starvation due to a transient bug. - update_manager->AsyncPolicyRequest(callback, &Policy::UpdateCheckAllowed); + update_manager->AsyncPolicyRequestUpdateCheckAllowed( + callback, &Policy::UpdateCheckAllowed); waiting_for_scheduled_check_ = true; return true; } diff --git a/update_attempter_unittest.cc b/update_attempter_unittest.cc index 8b8e3902..9421c19f 100644 --- a/update_attempter_unittest.cc +++ b/update_attempter_unittest.cc @@ -53,10 +53,12 @@ #include "update_engine/payload_consumer/payload_constants.h" #include "update_engine/payload_consumer/postinstall_runner_action.h" #include "update_engine/update_boot_flags_action.h" +#include "update_engine/update_manager/mock_update_manager.h" using base::Time; using base::TimeDelta; using chromeos_update_manager::EvalStatus; +using chromeos_update_manager::MockUpdateManager; using chromeos_update_manager::StagingSchedule; using chromeos_update_manager::UpdateCheckParams; using policy::DevicePolicy; @@ -137,6 +139,7 @@ class UpdateAttempterTest : public ::testing::Test { fake_system_state_.set_connection_manager(&mock_connection_manager); fake_system_state_.set_update_attempter(&attempter_); fake_system_state_.set_dlcservice(&mock_dlcservice_); + fake_system_state_.set_update_manager(&mock_update_manager_); loop_.SetAsCurrent(); certificate_checker_.Init(); @@ -222,6 +225,7 @@ class UpdateAttempterTest : public ::testing::Test { OpenSSLWrapper openssl_wrapper_; CertificateChecker certificate_checker_; MockDlcService mock_dlcservice_; + MockUpdateManager mock_update_manager_; NiceMock* processor_; NiceMock* diff --git a/update_manager/mock_update_manager.h b/update_manager/mock_update_manager.h new file mode 100644 index 00000000..07e4689a --- /dev/null +++ b/update_manager/mock_update_manager.h @@ -0,0 +1,44 @@ +// +// Copyright (C) 2019 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef UPDATE_ENGINE_MOCK_UPDATE_MANAGER_H +#define UPDATE_ENGINE_MOCK_UPDATE_MANAGER_H + +#include + +#include "update_engine/update_manager/update_manager.h" + +#include + +namespace chromeos_update_manager { + +class MockUpdateManager : public UpdateManager { + public: + MockUpdateManager() + : UpdateManager(nullptr, base::TimeDelta(), base::TimeDelta(), nullptr) {} + + MOCK_METHOD2( + AsyncPolicyRequestUpdateCheckAllowed, + void(base::Callback + callback, + EvalStatus (Policy::*policy_method)( + EvaluationContext*, State*, std::string*, UpdateCheckParams*) + const)); +}; + +} // namespace chromeos_update_manager + +#endif // UPDATE_ENGINE_MOCK_UPDATE_MANAGER_H diff --git a/update_manager/update_manager.cc b/update_manager/update_manager.cc index 5dfc09cf..00694969 100644 --- a/update_manager/update_manager.cc +++ b/update_manager/update_manager.cc @@ -50,6 +50,13 @@ UpdateManager::~UpdateManager() { ec->RemoveObserversAndTimeout(); } +void UpdateManager::AsyncPolicyRequestUpdateCheckAllowed( + base::Callback callback, + EvalStatus (Policy::*policy_method)( + EvaluationContext*, State*, std::string*, UpdateCheckParams*) const) { + AsyncPolicyRequest(callback, policy_method); +} + void UpdateManager::UnregisterEvalContext(EvaluationContext* ec) { if (!ec_repo_.erase(ec)) { LOG(ERROR) << "Unregistering an unknown evaluation context, this is a bug."; diff --git a/update_manager/update_manager.h b/update_manager/update_manager.h index b0fd97fa..732175fe 100644 --- a/update_manager/update_manager.h +++ b/update_manager/update_manager.h @@ -42,8 +42,27 @@ struct ScopedRefPtrLess { } }; +// Please do not move this class into a new file for simplicity. +// This pure virtual class is purely created for purpose of testing. The reason +// was that |UpdateManager|'s member functions are templatized, which does not +// play nicely when testing (mocking + faking). Whenever a specialized member of +// |UpdateManager| must be tested, please add a specialized template member +// function within this class for testing. +class SpecializedPolicyRequestInterface { + public: + virtual ~SpecializedPolicyRequestInterface() = default; + + virtual void AsyncPolicyRequestUpdateCheckAllowed( + base::Callback + callback, + EvalStatus (Policy::*policy_method)(EvaluationContext*, + State*, + std::string*, + UpdateCheckParams*) const) = 0; +}; + // The main Update Manager singleton class. -class UpdateManager { +class UpdateManager : public SpecializedPolicyRequestInterface { public: // Creates the UpdateManager instance, assuming ownership on the provided // |state|. @@ -91,6 +110,14 @@ class UpdateManager { EvaluationContext*, State*, std::string*, R*, ExpectedArgs...) const, ActualArgs... args); + void AsyncPolicyRequestUpdateCheckAllowed( + base::Callback + callback, + EvalStatus (Policy::*policy_method)(EvaluationContext*, + State*, + std::string*, + UpdateCheckParams*) const) override; + protected: // The UpdateManager receives ownership of the passed Policy instance. void set_policy(const Policy* policy) { policy_.reset(policy); } From 500ca135b771d41700c91ae716bf9c9179d29bef Mon Sep 17 00:00:00 2001 From: "Kyeongkab.Nam" Date: Wed, 26 Jun 2019 13:48:07 +0900 Subject: [PATCH 059/624] Enable update_engine to access OTA package via file descriptor Due to the restriction of Treble, update_engine cannot access to OTA packages located on non-core domain area. (e.g. /data/vendor/upgrade/xxx.zip) To solve such problem, update_engine needs to have a new interface which accepts a file descriptor (FD) of OTA package file instead of its URI and to read package file while updating via FD. Test: Manual update Bug: 130209137 Change-Id: Ieb7173dc958ba3eb21af708e616ef7078cd17b3e --- binder_bindings/android/os/IUpdateEngine.aidl | 5 ++++ binder_service_android.cc | 29 +++++++++++++++++-- binder_service_android.h | 5 ++++ common/file_fetcher.cc | 26 +++++++++++------ common/utils.cc | 10 +++++++ common/utils.h | 3 ++ common/utils_unittest.cc | 9 ++++++ payload_consumer/install_plan.cc | 10 ++++++- service_delegate_android_interface.h | 7 +++++ update_attempter_android.cc | 16 ++++++++++ update_attempter_android.h | 8 +++++ 11 files changed, 115 insertions(+), 13 deletions(-) diff --git a/binder_bindings/android/os/IUpdateEngine.aidl b/binder_bindings/android/os/IUpdateEngine.aidl index c0e29f50..cde05bed 100644 --- a/binder_bindings/android/os/IUpdateEngine.aidl +++ b/binder_bindings/android/os/IUpdateEngine.aidl @@ -26,6 +26,11 @@ interface IUpdateEngine { in long payload_size, in String[] headerKeyValuePairs); /** @hide */ + void applyPayloadFd(in FileDescriptor fd, + in long payload_offset, + in long payload_size, + in String[] headerKeyValuePairs); + /** @hide */ boolean bind(IUpdateEngineCallback callback); /** @hide */ boolean unbind(IUpdateEngineCallback callback); diff --git a/binder_service_android.cc b/binder_service_android.cc index 137694a7..1799438e 100644 --- a/binder_service_android.cc +++ b/binder_service_android.cc @@ -16,14 +16,18 @@ #include "update_engine/binder_service_android.h" +#include #include #include #include #include #include +using android::base::unique_fd; using android::binder::Status; using android::os::IUpdateEngineCallback; +using std::string; +using std::vector; using update_engine::UpdateEngineStatus; namespace { @@ -94,9 +98,9 @@ Status BinderUpdateEngineAndroidService::applyPayload( const android::String16& url, int64_t payload_offset, int64_t payload_size, - const std::vector& header_kv_pairs) { - const std::string payload_url{android::String8{url}.string()}; - std::vector str_headers; + const vector& header_kv_pairs) { + const string payload_url{android::String8{url}.string()}; + vector str_headers; str_headers.reserve(header_kv_pairs.size()); for (const auto& header : header_kv_pairs) { str_headers.emplace_back(android::String8{header}.string()); @@ -110,6 +114,25 @@ Status BinderUpdateEngineAndroidService::applyPayload( return Status::ok(); } +Status BinderUpdateEngineAndroidService::applyPayloadFd( + const ::android::base::unique_fd& fd, + int64_t payload_offset, + int64_t payload_size, + const vector& header_kv_pairs) { + vector str_headers; + str_headers.reserve(header_kv_pairs.size()); + for (const auto& header : header_kv_pairs) { + str_headers.emplace_back(android::String8{header}.string()); + } + + brillo::ErrorPtr error; + if (!service_delegate_->ApplyPayload( + fd.get(), payload_offset, payload_size, str_headers, &error)) { + return ErrorPtrToStatus(error); + } + return Status::ok(); +} + Status BinderUpdateEngineAndroidService::suspend() { brillo::ErrorPtr error; if (!service_delegate_->SuspendUpdate(&error)) diff --git a/binder_service_android.h b/binder_service_android.h index d8c4e9c3..ec4a93ba 100644 --- a/binder_service_android.h +++ b/binder_service_android.h @@ -53,6 +53,11 @@ class BinderUpdateEngineAndroidService : public android::os::BnUpdateEngine, int64_t payload_offset, int64_t payload_size, const std::vector& header_kv_pairs) override; + android::binder::Status applyPayloadFd( + const ::android::base::unique_fd& fd, + int64_t payload_offset, + int64_t payload_size, + const std::vector& header_kv_pairs) override; android::binder::Status bind( const android::sp& callback, bool* return_value) override; diff --git a/common/file_fetcher.cc b/common/file_fetcher.cc index 3836e548..7134fd69 100644 --- a/common/file_fetcher.cc +++ b/common/file_fetcher.cc @@ -43,8 +43,9 @@ namespace chromeos_update_engine { // static bool FileFetcher::SupportedUrl(const string& url) { // Note that we require the file path to start with a "/". - return base::StartsWith( - url, "file:///", base::CompareCase::INSENSITIVE_ASCII); + return ( + base::StartsWith(url, "file:///", base::CompareCase::INSENSITIVE_ASCII) || + base::StartsWith(url, "fd://", base::CompareCase::INSENSITIVE_ASCII)); } FileFetcher::~FileFetcher() { @@ -67,12 +68,20 @@ void FileFetcher::BeginTransfer(const string& url) { return; } - string file_path = url.substr(strlen("file://")); - stream_ = - brillo::FileStream::Open(base::FilePath(file_path), - brillo::Stream::AccessMode::READ, - brillo::FileStream::Disposition::OPEN_EXISTING, - nullptr); + string file_path; + + if (base::StartsWith(url, "fd://", base::CompareCase::INSENSITIVE_ASCII)) { + int fd = std::stoi(url.substr(strlen("fd://"))); + file_path = url; + stream_ = brillo::FileStream::FromFileDescriptor(fd, false, nullptr); + } else { + file_path = url.substr(strlen("file://")); + stream_ = + brillo::FileStream::Open(base::FilePath(file_path), + brillo::Stream::AccessMode::READ, + brillo::FileStream::Disposition::OPEN_EXISTING, + nullptr); + } if (!stream_) { LOG(ERROR) << "Couldn't open " << file_path; @@ -183,5 +192,4 @@ void FileFetcher::CleanUp() { transfer_in_progress_ = false; transfer_paused_ = false; } - } // namespace chromeos_update_engine diff --git a/common/utils.cc b/common/utils.cc index 34d97a2a..e7b69750 100644 --- a/common/utils.cc +++ b/common/utils.cc @@ -1064,6 +1064,16 @@ void ParseRollbackKeyVersion(const string& raw_version, } } +string GetFilePath(int fd) { + base::FilePath proc("/proc/self/fd/" + std::to_string(fd)); + base::FilePath file_name; + + if (!base::ReadSymbolicLink(proc, &file_name)) { + return "not found"; + } + return file_name.value(); +} + } // namespace utils } // namespace chromeos_update_engine diff --git a/common/utils.h b/common/utils.h index 9160d9f8..9dca9e87 100644 --- a/common/utils.h +++ b/common/utils.h @@ -304,6 +304,9 @@ bool ReadExtents(const std::string& path, // reboot. Returns whether it succeeded getting the boot_id. bool GetBootId(std::string* boot_id); +// This function gets the file path of the file pointed to by FileDiscriptor. +std::string GetFilePath(int fd); + // Divide |x| by |y| and round up to the nearest integer. constexpr uint64_t DivRoundUp(uint64_t x, uint64_t y) { return (x + y - 1) / y; diff --git a/common/utils_unittest.cc b/common/utils_unittest.cc index 7d1c59ed..b4ac2f53 100644 --- a/common/utils_unittest.cc +++ b/common/utils_unittest.cc @@ -507,4 +507,13 @@ TEST(UtilsTest, ParseDottedVersion) { ExpectInvalidParseRollbackKeyVersion("1.99999"); } +TEST(UtilsTest, GetFilePathTest) { + test_utils::ScopedTempFile file; + int fd = HANDLE_EINTR(open(file.path().c_str(), O_RDONLY)); + EXPECT_GE(fd, 0); + EXPECT_EQ(file.path(), utils::GetFilePath(fd)); + EXPECT_EQ("not found", utils::GetFilePath(-1)); + IGNORE_EINTR(close(fd)); +} + } // namespace chromeos_update_engine diff --git a/payload_consumer/install_plan.cc b/payload_consumer/install_plan.cc index 2e7b6d44..766b27cf 100644 --- a/payload_consumer/install_plan.cc +++ b/payload_consumer/install_plan.cc @@ -19,6 +19,7 @@ #include #include #include +#include #include #include "update_engine/common/utils.h" @@ -80,11 +81,18 @@ void InstallPlan::Dump() const { base::StringPrintf(", system_version: %s", system_version.c_str()); } + string url_str = download_url; + if (base::StartsWith( + url_str, "fd://", base::CompareCase::INSENSITIVE_ASCII)) { + int fd = std::stoi(url_str.substr(strlen("fd://"))); + url_str = utils::GetFilePath(fd); + } + LOG(INFO) << "InstallPlan: " << (is_resume ? "resume" : "new_update") << version_str << ", source_slot: " << BootControlInterface::SlotName(source_slot) << ", target_slot: " << BootControlInterface::SlotName(target_slot) - << ", url: " << download_url << payloads_str << partitions_str + << ", url: " << url_str << payloads_str << partitions_str << ", hash_checks_mandatory: " << utils::ToString(hash_checks_mandatory) << ", powerwash_required: " << utils::ToString(powerwash_required) diff --git a/service_delegate_android_interface.h b/service_delegate_android_interface.h index 5267bb06..6bd75b62 100644 --- a/service_delegate_android_interface.h +++ b/service_delegate_android_interface.h @@ -47,6 +47,13 @@ class ServiceDelegateAndroidInterface { const std::vector& key_value_pair_headers, brillo::ErrorPtr* error) = 0; + virtual bool ApplyPayload( + int fd, + int64_t payload_offset, + int64_t payload_size, + const std::vector& key_value_pair_headers, + brillo::ErrorPtr* error) = 0; + // Suspend an ongoing update. Returns true if there was an update ongoing and // it was suspended. In case of failure, it returns false and sets |error| // accordingly. diff --git a/update_attempter_android.cc b/update_attempter_android.cc index 1cc85058..97c53ec9 100644 --- a/update_attempter_android.cc +++ b/update_attempter_android.cc @@ -22,6 +22,7 @@ #include #include +#include #include #include #include @@ -55,6 +56,7 @@ #include "update_engine/libcurl_http_fetcher.h" #endif +using android::base::unique_fd; using base::Bind; using base::Time; using base::TimeDelta; @@ -288,6 +290,19 @@ bool UpdateAttempterAndroid::ApplyPayload( return true; } +bool UpdateAttempterAndroid::ApplyPayload( + int fd, + int64_t payload_offset, + int64_t payload_size, + const vector& key_value_pair_headers, + brillo::ErrorPtr* error) { + payload_fd_.reset(dup(fd)); + const string payload_url = "fd://" + std::to_string(payload_fd_.get()); + + return ApplyPayload( + payload_url, payload_offset, payload_size, key_value_pair_headers, error); +} + bool UpdateAttempterAndroid::SuspendUpdate(brillo::ErrorPtr* error) { if (!processor_->IsRunning()) return LogAndSetError(error, FROM_HERE, "No ongoing update to suspend."); @@ -583,6 +598,7 @@ void UpdateAttempterAndroid::TerminateUpdateAndNotify(ErrorCode error_code) { (error_code == ErrorCode::kSuccess ? UpdateStatus::UPDATED_NEED_REBOOT : UpdateStatus::IDLE); SetStatusAndNotify(new_status); + payload_fd_.reset(); // The network id is only applicable to one download attempt and once it's // done the network id should not be re-used anymore. diff --git a/update_attempter_android.h b/update_attempter_android.h index c4710ad5..7e1949d6 100644 --- a/update_attempter_android.h +++ b/update_attempter_android.h @@ -23,6 +23,7 @@ #include #include +#include #include #include "update_engine/client_library/include/update_engine/update_status.h" @@ -65,6 +66,11 @@ class UpdateAttempterAndroid int64_t payload_size, const std::vector& key_value_pair_headers, brillo::ErrorPtr* error) override; + bool ApplyPayload(int fd, + int64_t payload_offset, + int64_t payload_size, + const std::vector& key_value_pair_headers, + brillo::ErrorPtr* error) override; bool SuspendUpdate(brillo::ErrorPtr* error) override; bool ResumeUpdate(brillo::ErrorPtr* error) override; bool CancelUpdate(brillo::ErrorPtr* error) override; @@ -191,6 +197,8 @@ class UpdateAttempterAndroid std::unique_ptr metrics_reporter_; + ::android::base::unique_fd payload_fd_; + DISALLOW_COPY_AND_ASSIGN(UpdateAttempterAndroid); }; From 4c891c94214702d57f81b5892bcccf3e38bf09a1 Mon Sep 17 00:00:00 2001 From: David Anderson Date: Fri, 21 Jun 2019 17:45:23 -0700 Subject: [PATCH 060/624] Fix build for API change to DestroyLogicalPartition(). This method no longer uses a timeout parameter. Bug: 135771280 Test: update_engine and update_engine_unittests build Change-Id: If4764bf2d60c6b3aac1e8052c7fbb013c7b3349d --- boot_control_android.cc | 4 ++-- boot_control_android_unittest.cc | 8 ++++---- dynamic_partition_control_android.cc | 10 ++++------ dynamic_partition_control_android.h | 4 ++-- dynamic_partition_control_interface.h | 2 +- mock_dynamic_partition_control.h | 2 +- 6 files changed, 14 insertions(+), 16 deletions(-) diff --git a/boot_control_android.cc b/boot_control_android.cc index 1fab85f1..8ab73be1 100644 --- a/boot_control_android.cc +++ b/boot_control_android.cc @@ -373,8 +373,8 @@ bool UnmapTargetPartitions(DynamicPartitionControlInterface* dynamic_control, const PartitionMetadata& partition_metadata) { for (const auto& group : partition_metadata.groups) { for (const auto& partition : group.partitions) { - if (!dynamic_control->UnmapPartitionOnDeviceMapper( - partition.name + target_suffix, true /* wait */)) { + if (!dynamic_control->UnmapPartitionOnDeviceMapper(partition.name + + target_suffix)) { return false; } } diff --git a/boot_control_android_unittest.cc b/boot_control_android_unittest.cc index 65c2381f..94e195f8 100644 --- a/boot_control_android_unittest.cc +++ b/boot_control_android_unittest.cc @@ -321,12 +321,12 @@ class BootControlAndroidTest : public ::testing::Test { // slot with each partition in |partitions|. void ExpectUnmap(const std::set& partitions) { // Error when UnmapPartitionOnDeviceMapper is called on unknown arguments. - ON_CALL(dynamicControl(), UnmapPartitionOnDeviceMapper(_, _)) + ON_CALL(dynamicControl(), UnmapPartitionOnDeviceMapper(_)) .WillByDefault(Return(false)); for (const auto& partition : partitions) { - EXPECT_CALL(dynamicControl(), UnmapPartitionOnDeviceMapper(partition, _)) - .WillOnce(Invoke([this](auto partition, auto) { + EXPECT_CALL(dynamicControl(), UnmapPartitionOnDeviceMapper(partition)) + .WillOnce(Invoke([this](auto partition) { mapped_devices_.erase(partition); return true; })); @@ -531,7 +531,7 @@ TEST_P(BootControlAndroidTestP, {T("system"), 2_GiB}, {T("vendor"), 1_GiB}}); // Should not try to unmap any target partition. - EXPECT_CALL(dynamicControl(), UnmapPartitionOnDeviceMapper(_, _)).Times(0); + EXPECT_CALL(dynamicControl(), UnmapPartitionOnDeviceMapper(_)).Times(0); // Should not store metadata to target slot. EXPECT_CALL(dynamicControl(), StoreMetadata(GetSuperDevice(target()), _, target())) diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc index 40c26637..bfdd3752 100644 --- a/dynamic_partition_control_android.cc +++ b/dynamic_partition_control_android.cc @@ -103,7 +103,7 @@ bool DynamicPartitionControlAndroid::MapPartitionOnDeviceMapper( // Note that for source partitions, if GetState() == ACTIVE, callers (e.g. // BootControlAndroid) should not call MapPartitionOnDeviceMapper, but // should directly call GetDmDevicePathByName. - if (!UnmapPartitionOnDeviceMapper(target_partition_name, true /* wait */)) { + if (!UnmapPartitionOnDeviceMapper(target_partition_name)) { LOG(ERROR) << target_partition_name << " is mapped before the update, and it cannot be unmapped."; return false; @@ -127,12 +127,10 @@ bool DynamicPartitionControlAndroid::MapPartitionOnDeviceMapper( } bool DynamicPartitionControlAndroid::UnmapPartitionOnDeviceMapper( - const std::string& target_partition_name, bool wait) { + const std::string& target_partition_name) { if (DeviceMapper::Instance().GetState(target_partition_name) != DmDeviceState::INVALID) { - if (!DestroyLogicalPartition( - target_partition_name, - std::chrono::milliseconds(wait ? kMapTimeoutMillis : 0))) { + if (!DestroyLogicalPartition(target_partition_name)) { LOG(ERROR) << "Cannot unmap " << target_partition_name << " from device mapper."; return false; @@ -150,7 +148,7 @@ void DynamicPartitionControlAndroid::CleanupInternal(bool wait) { std::set mapped = mapped_devices_; LOG(INFO) << "Destroying [" << Join(mapped, ", ") << "] from device mapper"; for (const auto& partition_name : mapped) { - ignore_result(UnmapPartitionOnDeviceMapper(partition_name, wait)); + ignore_result(UnmapPartitionOnDeviceMapper(partition_name)); } } diff --git a/dynamic_partition_control_android.h b/dynamic_partition_control_android.h index 1233b642..334f9bd7 100644 --- a/dynamic_partition_control_android.h +++ b/dynamic_partition_control_android.h @@ -36,8 +36,8 @@ class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { uint32_t slot, bool force_writable, std::string* path) override; - bool UnmapPartitionOnDeviceMapper(const std::string& target_partition_name, - bool wait) override; + bool UnmapPartitionOnDeviceMapper( + const std::string& target_partition_name) override; void Cleanup() override; bool DeviceExists(const std::string& path) override; android::dm::DmDeviceState GetState(const std::string& name) override; diff --git a/dynamic_partition_control_interface.h b/dynamic_partition_control_interface.h index 86a07300..d4590f7d 100644 --- a/dynamic_partition_control_interface.h +++ b/dynamic_partition_control_interface.h @@ -57,7 +57,7 @@ class DynamicPartitionControlInterface { // If |wait| is set, wait until the device is unmapped. // Returns true if unmapped successfully. virtual bool UnmapPartitionOnDeviceMapper( - const std::string& target_partition_name, bool wait) = 0; + const std::string& target_partition_name) = 0; // Do necessary cleanups before destroying the object. virtual void Cleanup() = 0; diff --git a/mock_dynamic_partition_control.h b/mock_dynamic_partition_control.h index 24aca06d..cdfeeccc 100644 --- a/mock_dynamic_partition_control.h +++ b/mock_dynamic_partition_control.h @@ -33,7 +33,7 @@ class MockDynamicPartitionControl : public DynamicPartitionControlInterface { uint32_t, bool, std::string*)); - MOCK_METHOD2(UnmapPartitionOnDeviceMapper, bool(const std::string&, bool)); + MOCK_METHOD1(UnmapPartitionOnDeviceMapper, bool(const std::string&)); MOCK_METHOD0(Cleanup, void()); MOCK_METHOD1(DeviceExists, bool(const std::string&)); MOCK_METHOD1(GetState, ::android::dm::DmDeviceState(const std::string&)); From 51ea9aec43851cb5a40698f9b8785dd897a1f397 Mon Sep 17 00:00:00 2001 From: Jae Hoon Kim Date: Wed, 3 Jul 2019 16:56:30 -0700 Subject: [PATCH 061/624] update_engine: Thoroughly test DBUS entry-point for updating This change is to specifically target testing combinations of inputs and states possible for/when the call to the member function |CheckForUpdate()| within |UpdateAttempter| is invoked. BUG=chromium:924165 TEST=FEATURES="test" emerge-${BOARD} update_engine Change-Id: Ief9c4fa88ad38338f6853555f88e77cac4407e4d --- update_attempter_unittest.cc | 236 +++++++++++++++++++++++++++++++---- 1 file changed, 214 insertions(+), 22 deletions(-) diff --git a/update_attempter_unittest.cc b/update_attempter_unittest.cc index 9421c19f..f20f107f 100644 --- a/update_attempter_unittest.cc +++ b/update_attempter_unittest.cc @@ -88,6 +88,21 @@ namespace chromeos_update_engine { namespace { +struct CheckForUpdateTestParams { + // Setups + Inputs: + UpdateStatus status = UpdateStatus::IDLE; + string app_version = "fake_app_version"; + string omaha_url = "fake_omaha_url"; + UpdateAttemptFlags flags = UpdateAttemptFlags::kNone; + bool is_official_build = true; + bool are_dev_features_enabled = false; + + // Expects: + string expected_forced_app_version = ""; + string expected_forced_omaha_url = ""; + bool expected_result = true; +}; + class MockDlcService : public DlcServiceInterface { public: MOCK_METHOD1(GetInstalled, bool(vector*)); @@ -122,7 +137,8 @@ class UpdateAttempterUnderTest : public UpdateAttempter { // Indicates whether |ScheduleUpdates()| was called. bool schedule_updates_called() const { return schedule_updates_called_; } - // Need to expose |forced_omaha_url_| so we can test it. + // Need to expose following private members of |UpdateAttempter| for tests. + const string& forced_app_version() const { return forced_app_version_; } const string& forced_omaha_url() const { return forced_omaha_url_; } private: @@ -217,6 +233,9 @@ class UpdateAttempterTest : public ::testing::Test { } bool actual_using_p2p_for_sharing() { return actual_using_p2p_for_sharing_; } + // |CheckForUpdate()| related member functions. + void TestCheckForUpdate(); + base::MessageLoopForIO base_loop_; brillo::BaseMessageLoop loop_{&base_loop_}; @@ -232,10 +251,34 @@ class UpdateAttempterTest : public ::testing::Test { prefs_; // Shortcut to |fake_system_state_->mock_prefs()|. NiceMock mock_connection_manager; + // |CheckForUpdate()| test params. + CheckForUpdateTestParams cfu_params_; + bool actual_using_p2p_for_downloading_; bool actual_using_p2p_for_sharing_; }; +void UpdateAttempterTest::TestCheckForUpdate() { + // Setup + attempter_.status_ = cfu_params_.status; + fake_system_state_.fake_hardware()->SetIsOfficialBuild( + cfu_params_.is_official_build); + fake_system_state_.fake_hardware()->SetAreDevFeaturesEnabled( + cfu_params_.are_dev_features_enabled); + + // Invocation + EXPECT_EQ( + cfu_params_.expected_result, + attempter_.CheckForUpdate( + cfu_params_.app_version, cfu_params_.omaha_url, cfu_params_.flags)); + + // Verify + EXPECT_EQ(cfu_params_.expected_forced_app_version, + attempter_.forced_app_version()); + EXPECT_EQ(cfu_params_.expected_forced_omaha_url, + attempter_.forced_omaha_url()); +} + void UpdateAttempterTest::ScheduleQuitMainLoop() { loop_.PostTask( FROM_HERE, @@ -1288,33 +1331,182 @@ TEST_F(UpdateAttempterTest, AnyUpdateSourceDisallowedOfficialNormal) { EXPECT_FALSE(attempter_.IsAnyUpdateSourceAllowed()); } -TEST_F(UpdateAttempterTest, CheckForUpdateAUDlcTest) { - fake_system_state_.fake_hardware()->SetIsOfficialBuild(true); - fake_system_state_.fake_hardware()->SetAreDevFeaturesEnabled(false); +TEST_F(UpdateAttempterTest, CheckForUpdateInteractiveNotIdleFails) { + // GIVEN an update is in progress. + cfu_params_.status = UpdateStatus::CHECKING_FOR_UPDATE; + // GIVEN a interactive update. - const string dlc_module_id = "a_dlc_module_id"; - vector dlc_module_ids = {dlc_module_id}; - ON_CALL(mock_dlcservice_, GetInstalled(testing::_)) - .WillByDefault(DoAll(testing::SetArgPointee<0>(dlc_module_ids), - testing::Return(true))); + // THEN result should indicate failure. + cfu_params_.expected_result = false; - attempter_.CheckForUpdate("", "autest", UpdateAttemptFlags::kNone); - EXPECT_EQ(attempter_.dlc_module_ids_.size(), 1); - EXPECT_EQ(attempter_.dlc_module_ids_[0], dlc_module_id); + TestCheckForUpdate(); } -TEST_F(UpdateAttempterTest, CheckForUpdateAUTest) { - fake_system_state_.fake_hardware()->SetIsOfficialBuild(true); - fake_system_state_.fake_hardware()->SetAreDevFeaturesEnabled(false); - attempter_.CheckForUpdate("", "autest", UpdateAttemptFlags::kNone); - EXPECT_EQ(constants::kOmahaDefaultAUTestURL, attempter_.forced_omaha_url()); +// TODO(b/137217982): Currently, since the logic is to flow through, the app +// version and omaha url are cleared. +TEST_F(UpdateAttempterTest, + CheckForUpdateNonInteractiveNotIdleOfficialBuildSucceeds) { + // GIVEN an update is in progress. + cfu_params_.status = UpdateStatus::CHECKING_FOR_UPDATE; + // GIVEN a non interactive update. + cfu_params_.flags = UpdateAttemptFlags::kFlagNonInteractive; + + // THEN we except forced app version + forced omaha url to be cleared. + + TestCheckForUpdate(); } -TEST_F(UpdateAttempterTest, CheckForUpdateScheduledAUTest) { - fake_system_state_.fake_hardware()->SetIsOfficialBuild(true); - fake_system_state_.fake_hardware()->SetAreDevFeaturesEnabled(false); - attempter_.CheckForUpdate("", "autest-scheduled", UpdateAttemptFlags::kNone); - EXPECT_EQ(constants::kOmahaDefaultAUTestURL, attempter_.forced_omaha_url()); +// TODO(b/137217982): Currently, since the logic is to flow through, the app +// version and omaha url are set based on inputs. +TEST_F(UpdateAttempterTest, + CheckForUpdateNonInteractiveNotIdleUnofficialBuildSucceeds) { + // GIVEN an update is in progress. + cfu_params_.status = UpdateStatus::CHECKING_FOR_UPDATE; + // GIVEN a non interactive update. + cfu_params_.flags = UpdateAttemptFlags::kFlagNonInteractive; + // GIVEN a non offical build with dev features enabled. + cfu_params_.is_official_build = false; + cfu_params_.are_dev_features_enabled = true; + + // THEN the forced app version + forced omaha url changes based on input. + cfu_params_.expected_forced_app_version = cfu_params_.app_version; + cfu_params_.expected_forced_omaha_url = cfu_params_.omaha_url; + + TestCheckForUpdate(); +} + +TEST_F(UpdateAttempterTest, CheckForUpdateOfficalBuildClearsSource) { + // GIVEN a official build. + + // THEN we except forced app version + forced omaha url to be cleared. + + TestCheckForUpdate(); +} + +TEST_F(UpdateAttempterTest, CheckForUpdateUnofficialBuildChangesSource) { + // GIVEN a non offical build with dev features enabled. + cfu_params_.is_official_build = false; + cfu_params_.are_dev_features_enabled = true; + + // THEN the forced app version + forced omaha url changes based on input. + cfu_params_.expected_forced_app_version = cfu_params_.app_version; + cfu_params_.expected_forced_omaha_url = cfu_params_.omaha_url; + + TestCheckForUpdate(); +} + +TEST_F(UpdateAttempterTest, CheckForUpdateOfficialBuildScheduledAUTest) { + // GIVEN a scheduled autest omaha url. + cfu_params_.omaha_url = "autest-scheduled"; + + // THEN forced app version is cleared. + // THEN forced omaha url changes to default constant. + cfu_params_.expected_forced_omaha_url = constants::kOmahaDefaultAUTestURL; + + TestCheckForUpdate(); +} + +TEST_F(UpdateAttempterTest, CheckForUpdateUnofficialBuildScheduledAUTest) { + // GIVEN a scheduled autest omaha url. + cfu_params_.omaha_url = "autest-scheduled"; + // GIVEN a non offical build with dev features enabled. + cfu_params_.is_official_build = false; + cfu_params_.are_dev_features_enabled = true; + + // THEN forced app version changes based on input. + cfu_params_.expected_forced_app_version = cfu_params_.app_version; + // THEN forced omaha url changes to default constant. + cfu_params_.expected_forced_omaha_url = constants::kOmahaDefaultAUTestURL; + + TestCheckForUpdate(); +} + +TEST_F(UpdateAttempterTest, CheckForUpdateOfficialBuildAUTest) { + // GIVEN a autest omaha url. + cfu_params_.omaha_url = "autest"; + + // THEN forced app version is cleared. + // THEN forced omaha url changes to default constant. + cfu_params_.expected_forced_omaha_url = constants::kOmahaDefaultAUTestURL; + + TestCheckForUpdate(); +} + +TEST_F(UpdateAttempterTest, CheckForUpdateUnofficialBuildAUTest) { + // GIVEN a autest omha url. + cfu_params_.omaha_url = "autest"; + // GIVEN a non offical build with dev features enabled. + cfu_params_.is_official_build = false; + cfu_params_.are_dev_features_enabled = true; + + // THEN forced app version changes based on input. + cfu_params_.expected_forced_app_version = cfu_params_.app_version; + // THEN forced omaha url changes to default constant. + cfu_params_.expected_forced_omaha_url = constants::kOmahaDefaultAUTestURL; + + TestCheckForUpdate(); +} + +TEST_F(UpdateAttempterTest, + CheckForUpdateNonInteractiveOfficialBuildScheduledAUTest) { + // GIVEN a scheduled autest omaha url. + cfu_params_.omaha_url = "autest-scheduled"; + // GIVEN a non interactive update. + cfu_params_.flags = UpdateAttemptFlags::kFlagNonInteractive; + + // THEN forced app version is cleared. + // THEN forced omaha url changes to default constant. + cfu_params_.expected_forced_omaha_url = constants::kOmahaDefaultAUTestURL; + + TestCheckForUpdate(); +} + +TEST_F(UpdateAttempterTest, + CheckForUpdateNonInteractiveUnofficialBuildScheduledAUTest) { + // GIVEN a scheduled autest omaha url. + cfu_params_.omaha_url = "autest-scheduled"; + // GIVEN a non interactive update. + cfu_params_.flags = UpdateAttemptFlags::kFlagNonInteractive; + // GIVEN a non offical build with dev features enabled. + cfu_params_.is_official_build = false; + cfu_params_.are_dev_features_enabled = true; + + // THEN forced app version changes based on input. + cfu_params_.expected_forced_app_version = cfu_params_.app_version; + // THEN forced omaha url changes to default constant. + cfu_params_.expected_forced_omaha_url = constants::kOmahaDefaultAUTestURL; + + TestCheckForUpdate(); +} + +TEST_F(UpdateAttempterTest, CheckForUpdateNonInteractiveOfficialBuildAUTest) { + // GIVEN a autest omaha url. + cfu_params_.omaha_url = "autest"; + // GIVEN a non interactive update. + cfu_params_.flags = UpdateAttemptFlags::kFlagNonInteractive; + + // THEN forced app version is cleared. + // THEN forced omaha url changes to default constant. + cfu_params_.expected_forced_omaha_url = constants::kOmahaDefaultAUTestURL; + + TestCheckForUpdate(); +} + +TEST_F(UpdateAttempterTest, CheckForUpdateNonInteractiveUnofficialBuildAUTest) { + // GIVEN a autest omaha url. + cfu_params_.omaha_url = "autest"; + // GIVEN a non interactive update. + cfu_params_.flags = UpdateAttemptFlags::kFlagNonInteractive; + // GIVEN a non offical build with dev features enabled. + cfu_params_.is_official_build = false; + cfu_params_.are_dev_features_enabled = true; + + // THEN forced app version changes based on input. + cfu_params_.expected_forced_app_version = cfu_params_.app_version; + // THEN forced omaha url changes to default constant. + cfu_params_.expected_forced_omaha_url = constants::kOmahaDefaultAUTestURL; + + TestCheckForUpdate(); } TEST_F(UpdateAttempterTest, CheckForInstallTest) { From e4d414eecf6f6c2cacb2be28d852c6c787496c78 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?H=C3=A5kan=20Kvist?= Date: Fri, 28 Jun 2019 08:05:06 +0200 Subject: [PATCH 062/624] Only scan for deflates in regular files It only makes sense to scan for deflates in regular files. Scanning a symlink to a zip/gzip file would crash the generator. Test: Run ota package generation script with image including a symlink Bug: 137128486 Change-Id: I16f9040f2e483dcbb6a77d6dc56d38d32529521c --- payload_generator/deflate_utils.cc | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/payload_generator/deflate_utils.cc b/payload_generator/deflate_utils.cc index a7a05032..01402dd6 100644 --- a/payload_generator/deflate_utils.cc +++ b/payload_generator/deflate_utils.cc @@ -74,6 +74,15 @@ bool IsSquashfsImage(const string& part_path, return false; } +bool IsRegularFile(const FilesystemInterface::File& file) { + // If inode is 0, then stat information is invalid for some psuedo files + if (file.file_stat.st_ino != 0 && + (file.file_stat.st_mode & S_IFMT) == S_IFREG) { + return true; + } + return false; +} + // Realigns subfiles |files| of a splitted file |file| into its correct // positions. This can be used for squashfs, zip, apk, etc. bool RealignSplittedFiles(const FilesystemInterface::File& file, @@ -265,7 +274,9 @@ bool PreprocessPartitionFiles(const PartitionConfig& part, result_files->reserve(tmp_files.size()); for (auto& file : tmp_files) { - if (IsSquashfsImage(part.path, file)) { + auto is_regular_file = IsRegularFile(file); + + if (is_regular_file && IsSquashfsImage(part.path, file)) { // Read the image into a file. base::FilePath path; TEST_AND_RETURN_FALSE(base::CreateTemporaryFile(&path)); @@ -295,7 +306,7 @@ bool PreprocessPartitionFiles(const PartitionConfig& part, } } - if (extract_deflates) { + if (is_regular_file && extract_deflates) { // Search for deflates if the file is in zip or gzip format. // .zvoice files may eventually move out of rootfs. If that happens, // remove ".zvoice" (crbug.com/782918). From 75daa3822680b04882ce13d4803ee78f9c3f365f Mon Sep 17 00:00:00 2001 From: Jae Hoon Kim Date: Tue, 2 Jul 2019 11:17:24 -0700 Subject: [PATCH 063/624] update_engine: Period background update check flow correctness The |UpdateAttempter| has a tremendous amount of possible situations it can be in and be handling due to the async nature. The following tests try and cover important cases of the member functions in particular |OnUpdateScheduled()| and |ScheduleUpdates()|. BUG=chromium:924165 TEST=FEATURES="test" emerge-${BOARD} update_engine Change-Id: If5350a74b444f4c0e58da93ddba6d7c832945322 --- update_attempter.cc | 2 +- update_attempter_unittest.cc | 157 +++++++++++++++++++++++++++++++++-- 2 files changed, 150 insertions(+), 9 deletions(-) diff --git a/update_attempter.cc b/update_attempter.cc index 8df8b614..62a999c4 100644 --- a/update_attempter.cc +++ b/update_attempter.cc @@ -869,7 +869,7 @@ bool UpdateAttempter::CheckForUpdate(const string& app_version, // of the previously set ones. current_update_attempt_flags_ = flags; // Note: The caching for non-interactive update checks happens in - // OnUpdateScheduled(). + // |OnUpdateScheduled()|. } if (forced_update_pending_callback_.get()) { diff --git a/update_attempter_unittest.cc b/update_attempter_unittest.cc index f20f107f..203d7044 100644 --- a/update_attempter_unittest.cc +++ b/update_attempter_unittest.cc @@ -103,6 +103,16 @@ struct CheckForUpdateTestParams { bool expected_result = true; }; +struct OnUpdateScheduledTestParams { + // Setups + Inputs: + UpdateCheckParams params = {}; + EvalStatus status = EvalStatus::kFailed; + // Expects: + UpdateStatus exit_status = UpdateStatus::IDLE; + bool should_schedule_updates_be_called = false; + bool should_update_be_called = false; +}; + class MockDlcService : public DlcServiceInterface { public: MOCK_METHOD1(GetInstalled, bool(vector*)); @@ -120,28 +130,67 @@ class UpdateAttempterUnderTest : public UpdateAttempter { explicit UpdateAttempterUnderTest(SystemState* system_state) : UpdateAttempter(system_state, nullptr) {} + void Update(const std::string& app_version, + const std::string& omaha_url, + const std::string& target_channel, + const std::string& target_version_prefix, + bool rollback_allowed, + bool rollback_data_save_requested, + int rollback_allowed_milestones, + bool obey_proxies, + bool interactive) override { + update_called_ = true; + if (do_update_) { + UpdateAttempter::Update(app_version, + omaha_url, + target_channel, + target_version_prefix, + rollback_allowed, + rollback_data_save_requested, + rollback_allowed_milestones, + obey_proxies, + interactive); + return; + } + LOG(INFO) << "[TEST] Update() disabled."; + status_ = UpdateStatus::CHECKING_FOR_UPDATE; + } + + void DisableUpdate() { do_update_ = false; } + + bool WasUpdateCalled() const { return update_called_; } + // Wrap the update scheduling method, allowing us to opt out of scheduled // updates for testing purposes. bool ScheduleUpdates() override { schedule_updates_called_ = true; - if (do_schedule_updates_) { - UpdateAttempter::ScheduleUpdates(); - } else { - LOG(INFO) << "[TEST] Update scheduling disabled."; - } + if (do_schedule_updates_) + return UpdateAttempter::ScheduleUpdates(); + LOG(INFO) << "[TEST] Update scheduling disabled."; + waiting_for_scheduled_check_ = true; return true; } void DisableScheduleUpdates() { do_schedule_updates_ = false; } // Indicates whether |ScheduleUpdates()| was called. - bool schedule_updates_called() const { return schedule_updates_called_; } + bool WasScheduleUpdatesCalled() const { return schedule_updates_called_; } // Need to expose following private members of |UpdateAttempter| for tests. const string& forced_app_version() const { return forced_app_version_; } const string& forced_omaha_url() const { return forced_omaha_url_; } + // Need to expose |waiting_for_scheduled_check_| for testing. + void SetWaitingForScheduledCheck(bool waiting) { + waiting_for_scheduled_check_ = waiting; + } + private: + // Used for overrides of |Update()|. + bool update_called_ = false; + bool do_update_ = true; + + // Used for overrides of |ScheduleUpdates()|. bool schedule_updates_called_ = false; bool do_schedule_updates_ = true; }; @@ -236,6 +285,9 @@ class UpdateAttempterTest : public ::testing::Test { // |CheckForUpdate()| related member functions. void TestCheckForUpdate(); + // |OnUpdateScheduled()| related member functions. + void TestOnUpdateScheduled(); + base::MessageLoopForIO base_loop_; brillo::BaseMessageLoop loop_{&base_loop_}; @@ -254,6 +306,9 @@ class UpdateAttempterTest : public ::testing::Test { // |CheckForUpdate()| test params. CheckForUpdateTestParams cfu_params_; + // |OnUpdateScheduled()| test params. + OnUpdateScheduledTestParams ous_params_; + bool actual_using_p2p_for_downloading_; bool actual_using_p2p_for_sharing_; }; @@ -779,7 +834,7 @@ void UpdateAttempterTest::PingOmahaTestStart() { TEST_F(UpdateAttempterTest, PingOmahaTest) { EXPECT_FALSE(attempter_.waiting_for_scheduled_check_); - EXPECT_FALSE(attempter_.schedule_updates_called()); + EXPECT_FALSE(attempter_.WasScheduleUpdatesCalled()); // Disable scheduling of subsequnet checks; we're using the |DefaultPolicy| in // testing, which is more permissive than we want to handle here. attempter_.DisableScheduleUpdates(); @@ -788,7 +843,7 @@ TEST_F(UpdateAttempterTest, PingOmahaTest) { base::Unretained(this))); brillo::MessageLoopRunMaxIterations(&loop_, 100); EXPECT_EQ(UpdateStatus::UPDATED_NEED_REBOOT, attempter_.status()); - EXPECT_TRUE(attempter_.schedule_updates_called()); + EXPECT_TRUE(attempter_.WasScheduleUpdatesCalled()); } TEST_F(UpdateAttempterTest, CreatePendingErrorEventTest) { @@ -1629,6 +1684,7 @@ TEST_F(UpdateAttempterTest, UpdateDeferredByPolicyTest) { } TEST_F(UpdateAttempterTest, UpdateIsNotRunningWhenUpdateAvailable) { + // Default construction for |waiting_for_scheduled_check_| is false. EXPECT_FALSE(attempter_.IsUpdateRunningOrScheduled()); // Verify in-progress update with UPDATE_AVAILABLE is running attempter_.status_ = UpdateStatus::UPDATE_AVAILABLE; @@ -1919,4 +1975,89 @@ TEST_F(UpdateAttempterTest, EmptyQuickFixToken) { loop_.Run(); } +TEST_F(UpdateAttempterTest, ScheduleUpdateSpamHandlerTest) { + EXPECT_CALL(mock_update_manager_, AsyncPolicyRequestUpdateCheckAllowed(_, _)) + .Times(1); + EXPECT_TRUE(attempter_.ScheduleUpdates()); + // Now there is an update scheduled which means that all subsequent + // |ScheduleUpdates()| should fail. + EXPECT_FALSE(attempter_.ScheduleUpdates()); + EXPECT_FALSE(attempter_.ScheduleUpdates()); + EXPECT_FALSE(attempter_.ScheduleUpdates()); +} + +// Critical tests to always make sure that an update is scheduled. The following +// unittest(s) try and cover the correctness in synergy between +// |UpdateAttempter| and |UpdateManager|. Also it is good to remember the +// actions that happen in the flow when |UpdateAttempter| get callbacked on +// |OnUpdateScheduled()| -> (various cases which leads to) -> |ProcessingDone()| +void UpdateAttempterTest::TestOnUpdateScheduled() { + // Setup + attempter_.SetWaitingForScheduledCheck(true); + attempter_.DisableUpdate(); + attempter_.DisableScheduleUpdates(); + + // Invocation + attempter_.OnUpdateScheduled(ous_params_.status, ous_params_.params); + + // Verify + EXPECT_EQ(ous_params_.exit_status, attempter_.status()); + EXPECT_EQ(ous_params_.should_schedule_updates_be_called, + attempter_.WasScheduleUpdatesCalled()); + EXPECT_EQ(ous_params_.should_update_be_called, attempter_.WasUpdateCalled()); +} + +TEST_F(UpdateAttempterTest, OnUpdatesScheduledFailed) { + // GIVEN failed status. + + // THEN update should be scheduled. + ous_params_.should_schedule_updates_be_called = true; + + TestOnUpdateScheduled(); +} + +TEST_F(UpdateAttempterTest, OnUpdatesScheduledAskMeAgainLater) { + // GIVEN ask me again later status. + ous_params_.status = EvalStatus::kAskMeAgainLater; + + // THEN update should be scheduled. + ous_params_.should_schedule_updates_be_called = true; + + TestOnUpdateScheduled(); +} + +TEST_F(UpdateAttempterTest, OnUpdatesScheduledContinue) { + // GIVEN continue status. + ous_params_.status = EvalStatus::kContinue; + + // THEN update should be scheduled. + ous_params_.should_schedule_updates_be_called = true; + + TestOnUpdateScheduled(); +} + +TEST_F(UpdateAttempterTest, OnUpdatesScheduledSucceededButUpdateDisabledFails) { + // GIVEN updates disabled. + ous_params_.params = {.updates_enabled = false}; + // GIVEN succeeded status. + ous_params_.status = EvalStatus::kSucceeded; + + // THEN update should not be scheduled. + + TestOnUpdateScheduled(); +} + +TEST_F(UpdateAttempterTest, OnUpdatesScheduledSucceeded) { + // GIVEN updates enabled. + ous_params_.params = {.updates_enabled = true}; + // GIVEN succeeded status. + ous_params_.status = EvalStatus::kSucceeded; + + // THEN update should be called indicating status change. + ous_params_.exit_status = UpdateStatus::CHECKING_FOR_UPDATE; + ous_params_.should_update_be_called = true; + + TestOnUpdateScheduled(); +} + } // namespace chromeos_update_engine From 2b73ac21602d96ebb25801f06c33be0f3ef33dc9 Mon Sep 17 00:00:00 2001 From: Jae Hoon Kim Date: Tue, 2 Jul 2019 11:17:24 -0700 Subject: [PATCH 064/624] update_engine: Force Update + Install uniform logic pattern The change here is to make the exit points within |CheckForUpdate()| and |CheckForInstall()| uniform with simpler logic. When a force update or install is called, it is heavily dependent on the |forced_update_pending_callback_| and the existence of a scheduled update in the message loop. The |forced_update_pending_callback_| is the one that notifies a change hence calling upon |OnUpdateScheduled()| that was waiting on the message loop. The call to |ScheduleUpdates()| before forcing the callback is to guarantee that the |forced_update_pending_callback_| will take effect. Even if the |forced_update_pending_callback_| is not set, it is not a failure. Simply, the periodic check will use the set forced variables when it is scheduled. BUG=none TEST=FEATURES="test" emerge-${BOARD} update_engine Change-Id: Ic97671a70880c8b05d513a8dea93fce3b982bbf6 --- update_attempter.cc | 32 +++++++++++++++++--------------- update_attempter_unittest.cc | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 50 insertions(+), 15 deletions(-) diff --git a/update_attempter.cc b/update_attempter.cc index 62a999c4..cf588e5b 100644 --- a/update_attempter.cc +++ b/update_attempter.cc @@ -872,16 +872,17 @@ bool UpdateAttempter::CheckForUpdate(const string& app_version, // |OnUpdateScheduled()|. } + // |forced_update_pending_callback_| should always be set, but even in the + // case that it is not, we still return true indicating success because the + // scheduled periodic check will pick up these changes. if (forced_update_pending_callback_.get()) { - if (!system_state_->dlcservice()->GetInstalled(&dlc_module_ids_)) { - dlc_module_ids_.clear(); - } - // Make sure that a scheduling request is made prior to calling the forced - // update pending callback. + // Always call |ScheduleUpdates()| before forcing an update. This is because + // we need an update to be scheduled for the + // |forced_update_pending_callback_| to have an effect. Here we don't need + // to care about the return value from |ScheduleUpdate()|. ScheduleUpdates(); forced_update_pending_callback_->Run(true, interactive); } - return true; } @@ -903,15 +904,16 @@ bool UpdateAttempter::CheckForInstall(const vector& dlc_module_ids, forced_omaha_url_ = constants::kOmahaDefaultAUTestURL; } - if (!ScheduleUpdates()) { - if (forced_update_pending_callback_.get()) { - // Make sure that a scheduling request is made prior to calling the forced - // update pending callback. - ScheduleUpdates(); - forced_update_pending_callback_->Run(true, true); - return true; - } - return false; + // |forced_update_pending_callback_| should always be set, but even in the + // case that it is not, we still return true indicating success because the + // scheduled periodic check will pick up these changes. + if (forced_update_pending_callback_.get()) { + // Always call |ScheduleUpdates()| before forcing an update. This is because + // we need an update to be scheduled for the + // |forced_update_pending_callback_| to have an effect. Here we don't need + // to care about the return value from |ScheduleUpdate()|. + ScheduleUpdates(); + forced_update_pending_callback_->Run(true, true); } return true; } diff --git a/update_attempter_unittest.cc b/update_attempter_unittest.cc index 203d7044..b7d09971 100644 --- a/update_attempter_unittest.cc +++ b/update_attempter_unittest.cc @@ -100,6 +100,7 @@ struct CheckForUpdateTestParams { // Expects: string expected_forced_app_version = ""; string expected_forced_omaha_url = ""; + bool should_schedule_updates_be_called = true; bool expected_result = true; }; @@ -332,6 +333,8 @@ void UpdateAttempterTest::TestCheckForUpdate() { attempter_.forced_app_version()); EXPECT_EQ(cfu_params_.expected_forced_omaha_url, attempter_.forced_omaha_url()); + EXPECT_EQ(cfu_params_.should_schedule_updates_be_called, + attempter_.WasScheduleUpdatesCalled()); } void UpdateAttempterTest::ScheduleQuitMainLoop() { @@ -1391,6 +1394,8 @@ TEST_F(UpdateAttempterTest, CheckForUpdateInteractiveNotIdleFails) { cfu_params_.status = UpdateStatus::CHECKING_FOR_UPDATE; // GIVEN a interactive update. + // THEN |ScheduleUpdates()| should not be called. + cfu_params_.should_schedule_updates_be_called = false; // THEN result should indicate failure. cfu_params_.expected_result = false; @@ -1564,6 +1569,34 @@ TEST_F(UpdateAttempterTest, CheckForUpdateNonInteractiveUnofficialBuildAUTest) { TestCheckForUpdate(); } +TEST_F(UpdateAttempterTest, CheckForUpdateMissingForcedCallback1) { + // GIVEN a official build. + // GIVEN forced callback is not set. + attempter_.set_forced_update_pending_callback(nullptr); + + // THEN we except forced app version + forced omaha url to be cleared. + // THEN |ScheduleUpdates()| should not be called. + cfu_params_.should_schedule_updates_be_called = false; + + TestCheckForUpdate(); +} + +TEST_F(UpdateAttempterTest, CheckForUpdateMissingForcedCallback2) { + // GIVEN a non offical build with dev features enabled. + cfu_params_.is_official_build = false; + cfu_params_.are_dev_features_enabled = true; + // GIVEN forced callback is not set. + attempter_.set_forced_update_pending_callback(nullptr); + + // THEN the forced app version + forced omaha url changes based on input. + cfu_params_.expected_forced_app_version = cfu_params_.app_version; + cfu_params_.expected_forced_omaha_url = cfu_params_.omaha_url; + // THEN |ScheduleUpdates()| should not be called. + cfu_params_.should_schedule_updates_be_called = false; + + TestCheckForUpdate(); +} + TEST_F(UpdateAttempterTest, CheckForInstallTest) { fake_system_state_.fake_hardware()->SetIsOfficialBuild(true); fake_system_state_.fake_hardware()->SetAreDevFeaturesEnabled(false); From c437ea562cab391ca2974b61c80534ec8f0bd28c Mon Sep 17 00:00:00 2001 From: Jae Hoon Kim Date: Thu, 11 Jul 2019 11:20:38 -0700 Subject: [PATCH 065/624] update_engine: Block force update + install when not idle Currently |CheckForUpdate()| and |CheckForInstall()| sets the values of |is_install_| to false and true respectively. The change of the value |is_install_| at the initial entry to these functions will cause the checks and actions taken within |ProcessingDone()| to behave as it's not intended to. This unwanted behavior occurs because |ProcessingDone()| depends on |UpdateAttempter|'s member variable |is_install_|. Until |ProcessingDone()| is called, the |is_install_| value needs to remain the same. BUG=chromium:982929 TEST=FEATURES="test" emerge-$BOARD update_engine Change-Id: Iddb55f1b46e66b9bc94b63e10e9f393f3cb89e1c --- .../include/update_engine/update_status.h | 10 ++- update_attempter.cc | 25 +++--- update_attempter.h | 1 + update_attempter_unittest.cc | 90 +++++++++---------- 4 files changed, 63 insertions(+), 63 deletions(-) diff --git a/client_library/include/update_engine/update_status.h b/client_library/include/update_engine/update_status.h index 4b86df37..edf90b4f 100644 --- a/client_library/include/update_engine/update_status.h +++ b/client_library/include/update_engine/update_status.h @@ -23,10 +23,12 @@ namespace update_engine { -// ATTENTION: When adding a new enum value here, always append at the end and -// make sure to make proper adjustments in UpdateAttempter:ActionCompleted(). If -// any enum memeber is deprecated, the assigned value of other members should -// not change. See b/62842358. +// ATTENTION: +// When adding a new enum value: +// - always append at the end with proper adjustments in |ActionCompleted()|. +// - always update |kNonIdleUpdateStatues| in update_attempter_unittest.cc. +// When deprecating an old enum value: +// - other enum values should not change their old values. See b/62842358. enum class UpdateStatus { IDLE = 0, CHECKING_FOR_UPDATE = 1, diff --git a/update_attempter.cc b/update_attempter.cc index cf588e5b..9573c438 100644 --- a/update_attempter.cc +++ b/update_attempter.cc @@ -830,19 +830,17 @@ BootControlInterface::Slot UpdateAttempter::GetRollbackSlot() const { bool UpdateAttempter::CheckForUpdate(const string& app_version, const string& omaha_url, UpdateAttemptFlags flags) { - dlc_module_ids_.clear(); - is_install_ = false; - bool interactive = !(flags & UpdateAttemptFlags::kFlagNonInteractive); - - if (interactive && status_ != UpdateStatus::IDLE) { - // An update check is either in-progress, or an update has completed and the - // system is in UPDATED_NEED_REBOOT. Either way, don't do an interactive - // update at this time. - LOG(INFO) << "Refusing to do an interactive update with an update already " - "in progress"; + if (status_ != UpdateStatus::IDLE) { + LOG(INFO) << "Refusing to do an update as there is an " + << (is_install_ ? "install" : "update") + << " already in progress."; return false; } + bool interactive = !(flags & UpdateAttemptFlags::kFlagNonInteractive); + dlc_module_ids_.clear(); + is_install_ = false; + LOG(INFO) << "Forced update check requested."; forced_app_version_.clear(); forced_omaha_url_.clear(); @@ -888,6 +886,13 @@ bool UpdateAttempter::CheckForUpdate(const string& app_version, bool UpdateAttempter::CheckForInstall(const vector& dlc_module_ids, const string& omaha_url) { + if (status_ != UpdateStatus::IDLE) { + LOG(INFO) << "Refusing to do an install as there is an " + << (is_install_ ? "install" : "update") + << " already in progress."; + return false; + } + dlc_module_ids_ = dlc_module_ids; is_install_ = true; forced_omaha_url_.clear(); diff --git a/update_attempter.h b/update_attempter.h index b0654d84..3b580ad7 100644 --- a/update_attempter.h +++ b/update_attempter.h @@ -250,6 +250,7 @@ class UpdateAttempter : public ActionProcessorDelegate, FRIEND_TEST(UpdateAttempterTest, BootTimeInUpdateMarkerFile); FRIEND_TEST(UpdateAttempterTest, BroadcastCompleteDownloadTest); FRIEND_TEST(UpdateAttempterTest, ChangeToDownloadingOnReceivedBytesTest); + FRIEND_TEST(UpdateAttempterTest, CheckForInstallNotIdleFails); FRIEND_TEST(UpdateAttempterTest, CheckForUpdateAUDlcTest); FRIEND_TEST(UpdateAttempterTest, CreatePendingErrorEventTest); FRIEND_TEST(UpdateAttempterTest, CreatePendingErrorEventResumedTest); diff --git a/update_attempter_unittest.cc b/update_attempter_unittest.cc index b7d09971..b69527de 100644 --- a/update_attempter_unittest.cc +++ b/update_attempter_unittest.cc @@ -88,6 +88,19 @@ namespace chromeos_update_engine { namespace { +const UpdateStatus kNonIdleUpdateStatuses[] = { + UpdateStatus::CHECKING_FOR_UPDATE, + UpdateStatus::UPDATE_AVAILABLE, + UpdateStatus::DOWNLOADING, + UpdateStatus::VERIFYING, + UpdateStatus::FINALIZING, + UpdateStatus::UPDATED_NEED_REBOOT, + UpdateStatus::REPORTING_ERROR_EVENT, + UpdateStatus::ATTEMPTING_ROLLBACK, + UpdateStatus::DISABLED, + UpdateStatus::NEED_PERMISSION_TO_UPDATE, +}; + struct CheckForUpdateTestParams { // Setups + Inputs: UpdateStatus status = UpdateStatus::IDLE; @@ -1389,50 +1402,29 @@ TEST_F(UpdateAttempterTest, AnyUpdateSourceDisallowedOfficialNormal) { EXPECT_FALSE(attempter_.IsAnyUpdateSourceAllowed()); } -TEST_F(UpdateAttempterTest, CheckForUpdateInteractiveNotIdleFails) { - // GIVEN an update is in progress. - cfu_params_.status = UpdateStatus::CHECKING_FOR_UPDATE; - // GIVEN a interactive update. - - // THEN |ScheduleUpdates()| should not be called. - cfu_params_.should_schedule_updates_be_called = false; - // THEN result should indicate failure. - cfu_params_.expected_result = false; - - TestCheckForUpdate(); -} - -// TODO(b/137217982): Currently, since the logic is to flow through, the app -// version and omaha url are cleared. -TEST_F(UpdateAttempterTest, - CheckForUpdateNonInteractiveNotIdleOfficialBuildSucceeds) { - // GIVEN an update is in progress. - cfu_params_.status = UpdateStatus::CHECKING_FOR_UPDATE; - // GIVEN a non interactive update. - cfu_params_.flags = UpdateAttemptFlags::kFlagNonInteractive; - - // THEN we except forced app version + forced omaha url to be cleared. +// TODO(kimjae): Follow testing pattern with params for |CheckForInstall()|. +// When adding, remove older tests related to |CheckForInstall()|. +TEST_F(UpdateAttempterTest, CheckForInstallNotIdleFails) { + for (const auto status : kNonIdleUpdateStatuses) { + // GIVEN a non-idle status. + attempter_.status_ = status; - TestCheckForUpdate(); + EXPECT_FALSE(attempter_.CheckForInstall({}, "")); + } } -// TODO(b/137217982): Currently, since the logic is to flow through, the app -// version and omaha url are set based on inputs. -TEST_F(UpdateAttempterTest, - CheckForUpdateNonInteractiveNotIdleUnofficialBuildSucceeds) { - // GIVEN an update is in progress. - cfu_params_.status = UpdateStatus::CHECKING_FOR_UPDATE; - // GIVEN a non interactive update. - cfu_params_.flags = UpdateAttemptFlags::kFlagNonInteractive; - // GIVEN a non offical build with dev features enabled. - cfu_params_.is_official_build = false; - cfu_params_.are_dev_features_enabled = true; +TEST_F(UpdateAttempterTest, CheckForUpdateNotIdleFails) { + for (const auto status : kNonIdleUpdateStatuses) { + // GIVEN a non-idle status. + cfu_params_.status = status; - // THEN the forced app version + forced omaha url changes based on input. - cfu_params_.expected_forced_app_version = cfu_params_.app_version; - cfu_params_.expected_forced_omaha_url = cfu_params_.omaha_url; + // THEN |ScheduleUpdates()| should not be called. + cfu_params_.should_schedule_updates_be_called = false; + // THEN result should indicate failure. + cfu_params_.expected_result = false; - TestCheckForUpdate(); + TestCheckForUpdate(); + } } TEST_F(UpdateAttempterTest, CheckForUpdateOfficalBuildClearsSource) { @@ -1444,7 +1436,7 @@ TEST_F(UpdateAttempterTest, CheckForUpdateOfficalBuildClearsSource) { } TEST_F(UpdateAttempterTest, CheckForUpdateUnofficialBuildChangesSource) { - // GIVEN a non offical build with dev features enabled. + // GIVEN a nonofficial build with dev features enabled. cfu_params_.is_official_build = false; cfu_params_.are_dev_features_enabled = true; @@ -1469,7 +1461,7 @@ TEST_F(UpdateAttempterTest, CheckForUpdateOfficialBuildScheduledAUTest) { TEST_F(UpdateAttempterTest, CheckForUpdateUnofficialBuildScheduledAUTest) { // GIVEN a scheduled autest omaha url. cfu_params_.omaha_url = "autest-scheduled"; - // GIVEN a non offical build with dev features enabled. + // GIVEN a nonofficial build with dev features enabled. cfu_params_.is_official_build = false; cfu_params_.are_dev_features_enabled = true; @@ -1495,7 +1487,7 @@ TEST_F(UpdateAttempterTest, CheckForUpdateOfficialBuildAUTest) { TEST_F(UpdateAttempterTest, CheckForUpdateUnofficialBuildAUTest) { // GIVEN a autest omha url. cfu_params_.omaha_url = "autest"; - // GIVEN a non offical build with dev features enabled. + // GIVEN a nonofficial build with dev features enabled. cfu_params_.is_official_build = false; cfu_params_.are_dev_features_enabled = true; @@ -1511,7 +1503,7 @@ TEST_F(UpdateAttempterTest, CheckForUpdateNonInteractiveOfficialBuildScheduledAUTest) { // GIVEN a scheduled autest omaha url. cfu_params_.omaha_url = "autest-scheduled"; - // GIVEN a non interactive update. + // GIVEN a noninteractive update. cfu_params_.flags = UpdateAttemptFlags::kFlagNonInteractive; // THEN forced app version is cleared. @@ -1525,9 +1517,9 @@ TEST_F(UpdateAttempterTest, CheckForUpdateNonInteractiveUnofficialBuildScheduledAUTest) { // GIVEN a scheduled autest omaha url. cfu_params_.omaha_url = "autest-scheduled"; - // GIVEN a non interactive update. + // GIVEN a noninteractive update. cfu_params_.flags = UpdateAttemptFlags::kFlagNonInteractive; - // GIVEN a non offical build with dev features enabled. + // GIVEN a nonofficial build with dev features enabled. cfu_params_.is_official_build = false; cfu_params_.are_dev_features_enabled = true; @@ -1542,7 +1534,7 @@ TEST_F(UpdateAttempterTest, TEST_F(UpdateAttempterTest, CheckForUpdateNonInteractiveOfficialBuildAUTest) { // GIVEN a autest omaha url. cfu_params_.omaha_url = "autest"; - // GIVEN a non interactive update. + // GIVEN a noninteractive update. cfu_params_.flags = UpdateAttemptFlags::kFlagNonInteractive; // THEN forced app version is cleared. @@ -1555,9 +1547,9 @@ TEST_F(UpdateAttempterTest, CheckForUpdateNonInteractiveOfficialBuildAUTest) { TEST_F(UpdateAttempterTest, CheckForUpdateNonInteractiveUnofficialBuildAUTest) { // GIVEN a autest omaha url. cfu_params_.omaha_url = "autest"; - // GIVEN a non interactive update. + // GIVEN a noninteractive update. cfu_params_.flags = UpdateAttemptFlags::kFlagNonInteractive; - // GIVEN a non offical build with dev features enabled. + // GIVEN a nonofficial build with dev features enabled. cfu_params_.is_official_build = false; cfu_params_.are_dev_features_enabled = true; @@ -1582,7 +1574,7 @@ TEST_F(UpdateAttempterTest, CheckForUpdateMissingForcedCallback1) { } TEST_F(UpdateAttempterTest, CheckForUpdateMissingForcedCallback2) { - // GIVEN a non offical build with dev features enabled. + // GIVEN a nonofficial build with dev features enabled. cfu_params_.is_official_build = false; cfu_params_.are_dev_features_enabled = true; // GIVEN forced callback is not set. From a441743c13237847fc96d5721faa8721806d8363 Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Mon, 8 Jul 2019 12:52:40 -0700 Subject: [PATCH 066/624] update_engine: Use operation instead of current_operation from update_engine.proto The current mechanism for interchaning the current operation of update_engine is quite old and very fragile to changes. Currently, each client defines its own set of operations, writes their own string to enum conversion logic, etc. We need to unify all these clients to use just one set of well defined operations. This CL uses the new enum Operation from the protobuf instead of transferring a string to identify the current operation of the update_engine. BUG=chromium:977320 TEST=precq Cq-Depend: chromium:1690424 Change-Id: I4d3a2a142c169cf6c972fe58d1d8d936d2349eed Reviewed-on: https://chromium-review.googlesource.com/1690683 Tested-by: Amin Hassani Commit-Ready: Amin Hassani Legacy-Commit-Queue: Commit Bot Reviewed-by: Xiaochu Liu Reviewed-by: Sen Jiang Reviewed-by: Nicolas Norvez Reviewed-by: Ben Chan --- client_library/client_dbus.cc | 11 +++--- .../include/update_engine/update_status.h | 12 ++++++ dbus_service.cc | 7 +++- update_status_utils.cc | 38 ------------------- update_status_utils.h | 3 -- 5 files changed, 23 insertions(+), 48 deletions(-) diff --git a/client_library/client_dbus.cc b/client_library/client_dbus.cc index 18ae23b7..d0465029 100644 --- a/client_library/client_dbus.cc +++ b/client_library/client_dbus.cc @@ -26,7 +26,6 @@ #include "update_engine/update_status_utils.h" -using chromeos_update_engine::StringToUpdateStatus; using dbus::Bus; using org::chromium::UpdateEngineInterfaceProxy; using std::string; @@ -48,13 +47,13 @@ namespace internal { namespace { // This converts the status from Protobuf |StatusResult| to The internal // |UpdateEngineStatus| struct. -bool ConvertToUpdateEngineStatus(const StatusResult& status, +void ConvertToUpdateEngineStatus(const StatusResult& status, UpdateEngineStatus* out_status) { out_status->last_checked_time = status.last_checked_time(); out_status->progress = status.progress(); out_status->new_version = status.new_version(); out_status->new_size_bytes = status.new_size(); - return StringToUpdateStatus(status.current_operation(), &out_status->status); + out_status->status = static_cast(status.current_operation()); } } // namespace @@ -110,7 +109,8 @@ bool DBusUpdateEngineClient::GetStatus(int64_t* out_last_checked_time, *out_progress = status.progress(); *out_new_version = status.new_version(); *out_new_size = status.new_size(); - return StringToUpdateStatus(status.current_operation(), out_update_status); + *out_update_status = static_cast(status.current_operation()); + return true; } bool DBusUpdateEngineClient::GetStatus(UpdateEngineStatus* out_status) const { @@ -119,7 +119,8 @@ bool DBusUpdateEngineClient::GetStatus(UpdateEngineStatus* out_status) const { return false; } - return ConvertToUpdateEngineStatus(status, out_status); + ConvertToUpdateEngineStatus(status, out_status); + return true; } bool DBusUpdateEngineClient::SetCohortHint(const string& cohort_hint) { diff --git a/client_library/include/update_engine/update_status.h b/client_library/include/update_engine/update_status.h index edf90b4f..059181cc 100644 --- a/client_library/include/update_engine/update_status.h +++ b/client_library/include/update_engine/update_status.h @@ -21,6 +21,11 @@ #include +// NOTE: Keep this file in sync with +// platform2/system_api/dbus/update_engine/update_engine.proto especially: +// - |UpdateStatus| <-> |Operation| +// - |UpdateEngineStatus| <-> |StatusResult| + namespace update_engine { // ATTENTION: @@ -43,6 +48,13 @@ enum class UpdateStatus { // Broadcast this state when an update aborts because user preferences do not // allow updates, e.g. over cellular network. NEED_PERMISSION_TO_UPDATE = 10, + + // This value is exclusively used in Chrome. DO NOT define nor use it. + // TODO(crbug.com/977320): Remove this value from chrome by refactoring the + // Chrome code and evantually from here. This is not really an operation or + // state that the update engine stays on. This is the result of an internal + // failure and should be reflected differently. + // ERROR = -1, }; // Enum of bit-wise flags for controlling how updates are attempted. diff --git a/dbus_service.cc b/dbus_service.cc index 4e372212..0cfe26b4 100644 --- a/dbus_service.cc +++ b/dbus_service.cc @@ -33,8 +33,10 @@ using chromeos_update_engine::UpdateEngineService; using dlcservice::DlcModuleList; using std::string; using std::vector; +using update_engine::Operation; using update_engine::StatusResult; using update_engine::UpdateEngineStatus; +using update_engine::UpdateStatus; namespace { // Converts the internal |UpdateEngineStatus| to the protobuf |StatusResult|. @@ -42,7 +44,7 @@ void ConvertToStatusResult(const UpdateEngineStatus& ue_status, StatusResult* out_status) { out_status->set_last_checked_time(ue_status.last_checked_time); out_status->set_progress(ue_status.progress); - out_status->set_current_operation(UpdateStatusToString(ue_status.status)); + out_status->set_current_operation(static_cast(ue_status.status)); out_status->set_new_version(ue_status.new_version); out_status->set_new_size(ue_status.new_size_bytes); } @@ -240,7 +242,8 @@ void UpdateEngineAdaptor::SendStatusUpdate( // TODO(crbug.com/977320): Deprecate |StatusUpdate| signal. SendStatusUpdateSignal(status.last_checked_time(), status.progress(), - status.current_operation(), + UpdateStatusToString(static_cast( + status.current_operation())), status.new_version(), status.new_size()); diff --git a/update_status_utils.cc b/update_status_utils.cc index cbc4f14b..f3917d1d 100644 --- a/update_status_utils.cc +++ b/update_status_utils.cc @@ -52,42 +52,4 @@ const char* UpdateStatusToString(const UpdateStatus& status) { return nullptr; } -bool StringToUpdateStatus(const std::string& s, UpdateStatus* status) { - if (s == update_engine::kUpdateStatusIdle) { - *status = UpdateStatus::IDLE; - return true; - } else if (s == update_engine::kUpdateStatusCheckingForUpdate) { - *status = UpdateStatus::CHECKING_FOR_UPDATE; - return true; - } else if (s == update_engine::kUpdateStatusUpdateAvailable) { - *status = UpdateStatus::UPDATE_AVAILABLE; - return true; - } else if (s == update_engine::kUpdateStatusNeedPermissionToUpdate) { - *status = UpdateStatus::NEED_PERMISSION_TO_UPDATE; - return true; - } else if (s == update_engine::kUpdateStatusDownloading) { - *status = UpdateStatus::DOWNLOADING; - return true; - } else if (s == update_engine::kUpdateStatusVerifying) { - *status = UpdateStatus::VERIFYING; - return true; - } else if (s == update_engine::kUpdateStatusFinalizing) { - *status = UpdateStatus::FINALIZING; - return true; - } else if (s == update_engine::kUpdateStatusUpdatedNeedReboot) { - *status = UpdateStatus::UPDATED_NEED_REBOOT; - return true; - } else if (s == update_engine::kUpdateStatusReportingErrorEvent) { - *status = UpdateStatus::REPORTING_ERROR_EVENT; - return true; - } else if (s == update_engine::kUpdateStatusAttemptingRollback) { - *status = UpdateStatus::ATTEMPTING_ROLLBACK; - return true; - } else if (s == update_engine::kUpdateStatusDisabled) { - *status = UpdateStatus::DISABLED; - return true; - } - return false; -} - } // namespace chromeos_update_engine diff --git a/update_status_utils.h b/update_status_utils.h index 30ae53b7..e3b8b43a 100644 --- a/update_status_utils.h +++ b/update_status_utils.h @@ -25,9 +25,6 @@ namespace chromeos_update_engine { const char* UpdateStatusToString(const update_engine::UpdateStatus& status); -bool StringToUpdateStatus(const std::string& update_status_as_string, - update_engine::UpdateStatus* status); - } // namespace chromeos_update_engine #endif // UPDATE_ENGINE_UPDATE_STATUS_UTILS_H_ From c43f6bbd8ab12ee2bf0c82913c63e77b191e00e8 Mon Sep 17 00:00:00 2001 From: Jae Hoon Kim Date: Wed, 3 Jul 2019 12:56:52 -0700 Subject: [PATCH 067/624] update_engine: Query dlcservice to avoid stale DLC module IDs Within |UpdateAttempter|, the possibility of using stale DLC module IDs existed. This should never happen. The fix is to go through |DlcService| to recieve the list of fresh and current DLC module IDs. This change now will handle both interactive and non-interactive update cases. This is only for updates. BUG=chromium:978525 TEST=FEATURES="test" emerge-${BOARD} update_engine Change-Id: Iffa75f60f367b2a110c559654fa3df9c48c714b1 --- dlcservice_chromeos.cc | 2 ++ dlcservice_chromeos.h | 4 ++++ update_attempter.cc | 16 +++++++++++----- 3 files changed, 17 insertions(+), 5 deletions(-) diff --git a/dlcservice_chromeos.cc b/dlcservice_chromeos.cc index b7dee360..ad5806ac 100644 --- a/dlcservice_chromeos.cc +++ b/dlcservice_chromeos.cc @@ -35,6 +35,8 @@ std::unique_ptr CreateDlcService() { bool DlcServiceChromeOS::GetInstalled(vector* dlc_module_ids) { if (!dlc_module_ids) return false; + dlc_module_ids->clear(); + org::chromium::DlcServiceInterfaceProxy dlcservice_proxy( DBusConnection::Get()->GetDBus()); diff --git a/dlcservice_chromeos.h b/dlcservice_chromeos.h index 8d103c13..73442e62 100644 --- a/dlcservice_chromeos.h +++ b/dlcservice_chromeos.h @@ -33,6 +33,10 @@ class DlcServiceChromeOS : public DlcServiceInterface { ~DlcServiceChromeOS() = default; // BootControlInterface overrides. + // Will clear the |dlc_module_ids|, passed to be modified. Clearing by + // default has the added benefit of avoiding indeterminate behavior in the + // case that |dlc_module_ids| wasn't empty to begin which would lead to + // possible duplicates and cases when error was not checked it's still safe. bool GetInstalled(std::vector* dlc_module_ids) override; private: diff --git a/update_attempter.cc b/update_attempter.cc index 9573c438..b06807d3 100644 --- a/update_attempter.cc +++ b/update_attempter.cc @@ -425,7 +425,14 @@ bool UpdateAttempter::CalculateUpdateParams(const string& app_version, // target channel. omaha_request_params_->UpdateDownloadChannel(); } - // Set the DLC module ID list. + + // Set the |dlc_module_ids_| only for an update. This is required to get the + // currently installed DLC(s). + if (!is_install_ && + !system_state_->dlcservice()->GetInstalled(&dlc_module_ids_)) { + LOG(INFO) << "Failed to retrieve DLC module IDs from dlcservice. Check the " + "state of dlcservice, will not update DLC modules."; + } omaha_request_params_->set_dlc_module_ids(dlc_module_ids_); omaha_request_params_->set_is_install(is_install_); @@ -838,7 +845,6 @@ bool UpdateAttempter::CheckForUpdate(const string& app_version, } bool interactive = !(flags & UpdateAttemptFlags::kFlagNonInteractive); - dlc_module_ids_.clear(); is_install_ = false; LOG(INFO) << "Forced update check requested."; @@ -903,9 +909,9 @@ bool UpdateAttempter::CheckForInstall(const vector& dlc_module_ids, if (IsAnyUpdateSourceAllowed()) { forced_omaha_url_ = omaha_url; } - if (omaha_url == kScheduledAUTestURLRequest) { - forced_omaha_url_ = constants::kOmahaDefaultAUTestURL; - } else if (omaha_url == kAUTestURLRequest) { + + if (omaha_url == kScheduledAUTestURLRequest || + omaha_url == kAUTestURLRequest) { forced_omaha_url_ = constants::kOmahaDefaultAUTestURL; } From ba2fdce6739b74c4150ef365e8e7af9c78d5b8a6 Mon Sep 17 00:00:00 2001 From: Jae Hoon Kim Date: Thu, 11 Jul 2019 13:18:58 -0700 Subject: [PATCH 068/624] update_engine: More accurate and general method name This is a small review that merely changes |UpdateAttempter|'s member function |IsUpdateRunningOrScheduled()| to a more accurate name and general name of |IsBusyOrUpdateScheduled()|. BUG=none TEST=FEATURES="test" emerge-$BOARD update_engine Change-Id: I4dcce374c86f61e6cee9d060d915110765a70a79 --- update_attempter.cc | 6 +++--- update_attempter.h | 4 ++-- update_attempter_unittest.cc | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/update_attempter.cc b/update_attempter.cc index b06807d3..31d728b5 100644 --- a/update_attempter.cc +++ b/update_attempter.cc @@ -153,7 +153,7 @@ void UpdateAttempter::Init() { } bool UpdateAttempter::ScheduleUpdates() { - if (IsUpdateRunningOrScheduled()) + if (IsBusyOrUpdateScheduled()) return false; chromeos_update_manager::UpdateManager* const update_manager = @@ -1018,7 +1018,7 @@ void UpdateAttempter::OnUpdateScheduled(EvalStatus status, // a bug that will most likely prevent further automatic update checks. It // seems better to crash in such cases and restart the update_engine daemon // into, hopefully, a known good state. - CHECK(IsUpdateRunningOrScheduled()); + CHECK(IsBusyOrUpdateScheduled()); } void UpdateAttempter::UpdateLastCheckedTime() { @@ -1686,7 +1686,7 @@ bool UpdateAttempter::GetBootTimeAtUpdate(Time* out_boot_time) { return true; } -bool UpdateAttempter::IsUpdateRunningOrScheduled() { +bool UpdateAttempter::IsBusyOrUpdateScheduled() { return ((status_ != UpdateStatus::IDLE && status_ != UpdateStatus::UPDATED_NEED_REBOOT) || waiting_for_scheduled_check_); diff --git a/update_attempter.h b/update_attempter.h index 3b580ad7..880e9754 100644 --- a/update_attempter.h +++ b/update_attempter.h @@ -407,8 +407,8 @@ class UpdateAttempter : public ActionProcessorDelegate, // policy is available again. void UpdateRollbackHappened(); - // Returns whether an update is currently running or scheduled. - bool IsUpdateRunningOrScheduled(); + // Returns if an update is: running, applied and needs reboot, or scheduled. + bool IsBusyOrUpdateScheduled(); void CalculateStagingParams(bool interactive); diff --git a/update_attempter_unittest.cc b/update_attempter_unittest.cc index b69527de..254579c0 100644 --- a/update_attempter_unittest.cc +++ b/update_attempter_unittest.cc @@ -1710,10 +1710,10 @@ TEST_F(UpdateAttempterTest, UpdateDeferredByPolicyTest) { TEST_F(UpdateAttempterTest, UpdateIsNotRunningWhenUpdateAvailable) { // Default construction for |waiting_for_scheduled_check_| is false. - EXPECT_FALSE(attempter_.IsUpdateRunningOrScheduled()); + EXPECT_FALSE(attempter_.IsBusyOrUpdateScheduled()); // Verify in-progress update with UPDATE_AVAILABLE is running attempter_.status_ = UpdateStatus::UPDATE_AVAILABLE; - EXPECT_TRUE(attempter_.IsUpdateRunningOrScheduled()); + EXPECT_TRUE(attempter_.IsBusyOrUpdateScheduled()); } TEST_F(UpdateAttempterTest, UpdateAttemptFlagsCachedAtUpdateStart) { From b5ba797ba9ee97130d56f1457ba239721d5b8177 Mon Sep 17 00:00:00 2001 From: Xiaochu Liu Date: Thu, 11 Jul 2019 09:51:06 -0700 Subject: [PATCH 069/624] update_engine: call res_init and retry one extra time on unresolved host libcurl error Based on https://curl.haxx.se/docs/todo.html#updated_DNS_server_while_running: "If /etc/resolv.conf gets updated while a program using libcurl is running, it may cause name resolves to fail unless res_init() is called. We should consider calling res_init() + retry once unconditionally on all name resolve failures to mitigate against this." This CL added following behavior: On libcurl returns CURLE_COULDNT_RESOLVE_HOST error code: 1. we increase the max retry count by 1 for the first time it happens in the lifetime of an LibcurlHttpFetcher object. 2. we call res_init unconditionally. We also add UMA metrics to measure whether calling res_init helps mitigate the unresolved host problem. WIP CL: https://chromium-review.googlesource.com/c/chromium/src/+/1698722 BUG=chromium:982813 TEST=FEATURES="test" emerge-kefka update_engine, tested on a device Change-Id: Ia894eae93b3a0adbac1a831e657b75cba835dfa0 --- libcurl_http_fetcher.cc | 66 ++++++++++++++++++++++++++++++-- libcurl_http_fetcher.h | 53 +++++++++++++++++++++++-- libcurl_http_fetcher_unittest.cc | 57 +++++++++++++++++++++++++++ metrics_constants.h | 5 +++ 4 files changed, 174 insertions(+), 7 deletions(-) diff --git a/libcurl_http_fetcher.cc b/libcurl_http_fetcher.cc index 06722fd6..247327ab 100644 --- a/libcurl_http_fetcher.cc +++ b/libcurl_http_fetcher.cc @@ -16,6 +16,8 @@ #include "update_engine/libcurl_http_fetcher.h" +#include +#include #include #include @@ -480,14 +482,45 @@ void LibcurlHttpFetcher::CurlPerformOnce() { if (http_response_code_) { LOG(INFO) << "HTTP response code: " << http_response_code_; no_network_retry_count_ = 0; + unresolved_host_state_machine_.UpdateState(false); } else { LOG(ERROR) << "Unable to get http response code."; - LogCurlHandleInfo(); + CURLcode curl_code = GetCurlCode(); + LOG(ERROR) << "Return code for the transfer: " << curl_code; + if (curl_code == CURLE_COULDNT_RESOLVE_HOST) { + LOG(ERROR) << "libcurl can not resolve host."; + unresolved_host_state_machine_.UpdateState(true); + if (delegate_) { + delegate_->ReportUpdateCheckMetrics( + metrics::CheckResult::kUnset, + metrics::CheckReaction::kUnset, + metrics::DownloadErrorCode::kUnresolvedHost); + } + } } // we're done! CleanUp(); + if (unresolved_host_state_machine_.getState() == + UnresolvedHostStateMachine::State::kRetry) { + // Based on + // https://curl.haxx.se/docs/todo.html#updated_DNS_server_while_running, + // update_engine process should call res_init() and unconditionally retry. + res_init(); + no_network_max_retries_++; + LOG(INFO) << "Will retry after reloading resolv.conf because last attempt " + "failed to resolve host."; + } else if (unresolved_host_state_machine_.getState() == + UnresolvedHostStateMachine::State::kRetriedSuccess) { + if (delegate_) { + delegate_->ReportUpdateCheckMetrics( + metrics::CheckResult::kUnset, + metrics::CheckReaction::kUnset, + metrics::DownloadErrorCode::kUnresolvedHostRecovered); + } + } + // TODO(petkov): This temporary code tries to deal with the case where the // update engine performs an update check while the network is not ready // (e.g., right after resume). Longer term, we should check if the network @@ -813,7 +846,8 @@ void LibcurlHttpFetcher::GetHttpResponseCode() { } } -void LibcurlHttpFetcher::LogCurlHandleInfo() { +CURLcode LibcurlHttpFetcher::GetCurlCode() { + CURLcode curl_code = CURLE_OK; while (true) { // Repeated calls to |curl_multi_info_read| will return a new struct each // time, until a NULL is returned as a signal that there is no more to get @@ -831,7 +865,7 @@ void LibcurlHttpFetcher::LogCurlHandleInfo() { CHECK_EQ(curl_handle_, curl_msg->easy_handle); // Transfer return code reference: // https://curl.haxx.se/libcurl/c/libcurl-errors.html - LOG(ERROR) << "Return code for the transfer: " << curl_msg->data.result; + curl_code = curl_msg->data.result; } } @@ -842,6 +876,32 @@ void LibcurlHttpFetcher::LogCurlHandleInfo() { if (res == CURLE_OK && connect_error) { LOG(ERROR) << "Connect error code from the OS: " << connect_error; } + + return curl_code; +} + +void UnresolvedHostStateMachine::UpdateState(bool failed_to_resolve_host) { + switch (state_) { + case State::kInit: + if (failed_to_resolve_host) { + state_ = State::kRetry; + } + break; + case State::kRetry: + if (failed_to_resolve_host) { + state_ = State::kNotRetry; + } else { + state_ = State::kRetriedSuccess; + } + break; + case State::kNotRetry: + break; + case State::kRetriedSuccess: + break; + default: + NOTREACHED(); + break; + } } } // namespace chromeos_update_engine diff --git a/libcurl_http_fetcher.h b/libcurl_http_fetcher.h index 3978b70d..cdd489d6 100644 --- a/libcurl_http_fetcher.h +++ b/libcurl_http_fetcher.h @@ -37,6 +37,48 @@ namespace chromeos_update_engine { +// |UnresolvedHostStateMachine| is a representation of internal state machine of +// |LibcurlHttpFetcher|. +class UnresolvedHostStateMachine { + public: + UnresolvedHostStateMachine() = default; + enum class State { + kInit = 0, + kRetry = 1, + kRetriedSuccess = 2, + kNotRetry = 3, + }; + + State getState() { return state_; } + + // Updates the following internal state machine: + // + // |kInit| + // | + // | + // \/ + // (Try, host Unresolved) + // | + // | + // \/ + // |kRetry| --> (Retry, host resolved) + // | | + // | | + // \/ \/ + // (Retry, host Unresolved) |kRetriedSuccess| + // | + // | + // \/ + // |kNotRetry| + // + void UpdateState(bool failed_to_resolve_host); + + private: + State state_ = {State::kInit}; + + DISALLOW_COPY_AND_ASSIGN(UnresolvedHostStateMachine); +}; + class LibcurlHttpFetcher : public HttpFetcher { public: LibcurlHttpFetcher(ProxyResolver* proxy_resolver, @@ -88,6 +130,8 @@ class LibcurlHttpFetcher : public HttpFetcher { no_network_max_retries_ = retries; } + int get_no_network_max_retries() { return no_network_max_retries_; } + void set_server_to_check(ServerToCheck server_to_check) { server_to_check_ = server_to_check; } @@ -125,10 +169,8 @@ class LibcurlHttpFetcher : public HttpFetcher { // Asks libcurl for the http response code and stores it in the object. void GetHttpResponseCode(); - // Logs curl handle info. - // This can be called only when an http request failed to avoid spamming the - // logs. This must be called after |ResumeTransfer| and before |CleanUp|. - void LogCurlHandleInfo(); + // Returns the last |CURLcode|. + CURLcode GetCurlCode(); // Checks whether stored HTTP response is within the success range. inline bool IsHttpResponseSuccess() { @@ -280,6 +322,9 @@ class LibcurlHttpFetcher : public HttpFetcher { // True if this object is for update check. bool is_update_check_{false}; + // Internal state machine. + UnresolvedHostStateMachine unresolved_host_state_machine_; + int low_speed_limit_bps_{kDownloadLowSpeedLimitBps}; int low_speed_time_seconds_{kDownloadLowSpeedTimeSeconds}; int connect_timeout_seconds_{kDownloadConnectTimeoutSeconds}; diff --git a/libcurl_http_fetcher_unittest.cc b/libcurl_http_fetcher_unittest.cc index 88e48fa4..7f00daee 100644 --- a/libcurl_http_fetcher_unittest.cc +++ b/libcurl_http_fetcher_unittest.cc @@ -43,6 +43,7 @@ class LibcurlHttpFetcherTest : public ::testing::Test { brillo::FakeMessageLoop loop_{nullptr}; FakeHardware fake_hardware_; LibcurlHttpFetcher libcurl_fetcher_{nullptr, &fake_hardware_}; + UnresolvedHostStateMachine state_machine_; }; TEST_F(LibcurlHttpFetcherTest, GetEmptyHeaderValueTest) { @@ -78,4 +79,60 @@ TEST_F(LibcurlHttpFetcherTest, GetHeaderEdgeCaseTest) { EXPECT_EQ(header_value, actual_header_value); } +TEST_F(LibcurlHttpFetcherTest, InvalidURLTest) { + int no_network_max_retries = 1; + libcurl_fetcher_.set_no_network_max_retries(no_network_max_retries); + + libcurl_fetcher_.BeginTransfer("not-an-URL"); + while (loop_.PendingTasks()) { + loop_.RunOnce(true); + } + + EXPECT_EQ(libcurl_fetcher_.get_no_network_max_retries(), + no_network_max_retries); +} + +TEST_F(LibcurlHttpFetcherTest, CouldntResolveHostTest) { + int no_network_max_retries = 1; + libcurl_fetcher_.set_no_network_max_retries(no_network_max_retries); + + // This test actually sends request to internet but according to + // https://tools.ietf.org/html/rfc2606#section-2, .invalid domain names are + // reserved and sure to be invalid. Ideally we should mock libcurl or + // reorganize LibcurlHttpFetcher so the part that sends request can be mocked + // easily. + // TODO(xiaochu) Refactor LibcurlHttpFetcher (and its relates) so it's + // easier to mock the part that depends on internet connectivity. + libcurl_fetcher_.BeginTransfer("https://An-uNres0lvable-uRl.invalid"); + while (loop_.PendingTasks()) { + loop_.RunOnce(true); + } + + // If libcurl fails to resolve the name, we call res_init() to reload + // resolv.conf and retry exactly once more. See crbug.com/982813 for details. + EXPECT_EQ(libcurl_fetcher_.get_no_network_max_retries(), + no_network_max_retries + 1); +} + +TEST_F(LibcurlHttpFetcherTest, HttpFetcherStateMachineRetryFailedTest) { + state_machine_.UpdateState(true); + state_machine_.UpdateState(true); + EXPECT_EQ(state_machine_.getState(), + UnresolvedHostStateMachine::State::kNotRetry); +} + +TEST_F(LibcurlHttpFetcherTest, HttpFetcherStateMachineRetrySucceedTest) { + state_machine_.UpdateState(true); + state_machine_.UpdateState(false); + EXPECT_EQ(state_machine_.getState(), + UnresolvedHostStateMachine::State::kRetriedSuccess); +} + +TEST_F(LibcurlHttpFetcherTest, HttpFetcherStateMachineNoRetryTest) { + state_machine_.UpdateState(false); + state_machine_.UpdateState(false); + EXPECT_EQ(state_machine_.getState(), + UnresolvedHostStateMachine::State::kInit); +} + } // namespace chromeos_update_engine diff --git a/metrics_constants.h b/metrics_constants.h index b3833a3d..161d5856 100644 --- a/metrics_constants.h +++ b/metrics_constants.h @@ -60,6 +60,11 @@ enum class DownloadErrorCode { // above block and before the kInputMalformed field. This // is to ensure that error codes are not reordered. + // This error is reported when libcurl returns CURLE_COULDNT_RESOLVE_HOST and + // calling res_init() can recover. + kUnresolvedHostRecovered = 97, + // This error is reported when libcurl returns CURLE_COULDNT_RESOLVE_HOST. + kUnresolvedHost = 98, // This error is reported when libcurl has an internal error that // update_engine can't recover from. kInternalError = 99, From 7b428f507892dc8f80895ae43e8fe12b75be2ad8 Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Wed, 26 Jun 2019 10:03:35 -0700 Subject: [PATCH 070/624] update_engine: drop MTD logic It has no users. Note that I'm also dropping unit tests of the form: EXPECT_TRUE(utils::SplitPartitionName("/dev/loop10_0", &disk, &part_num)); EXPECT_EQ("/dev/loop", disk); EXPECT_EQ(10, part_num); EXPECT_TRUE(utils::SplitPartitionName("/dev/loop28p11_0", &disk, &part_num)); EXPECT_EQ("/dev/loop28", disk); EXPECT_EQ(11, part_num); AFAICT, the part of the change that introduced these (https://crrev.com/c/191785) was not based on any real issue; it was (correctly) handling partition suffixes for loop devices (e.g., the 'p1' in '/dev/loop0p1'), but the underscore syntax is specific to ubi/ubiblock and should not apply to loop devices. See the ubiblock naming code [1], analogous loop device naming code [2], and the partition-name generation code [3]. [1] https://elixir.bootlin.com/linux/v5.1.15/source/drivers/mtd/ubi/block.c#L402 [2] https://elixir.bootlin.com/linux/v5.1.15/source/drivers/block/loop.c#L2012 [3] https://elixir.bootlin.com/linux/v5.1.15/source/block/partition-generic.c#L35 BUG=chromium:978563 TEST=unit tests Change-Id: I38754a5060ed3c9e6b11fb53d82ff6fb79149c72 --- Android.bp | 1 - BUILD.gn | 6 - common/utils.cc | 107 +------ common/utils.h | 13 - common/utils_unittest.cc | 35 --- payload_consumer/delta_performer.cc | 43 +-- payload_consumer/mtd_file_descriptor.cc | 263 ------------------ payload_consumer/mtd_file_descriptor.h | 103 ------- payload_consumer/postinstall_runner_action.cc | 3 +- 9 files changed, 3 insertions(+), 571 deletions(-) delete mode 100644 payload_consumer/mtd_file_descriptor.cc delete mode 100644 payload_consumer/mtd_file_descriptor.h diff --git a/Android.bp b/Android.bp index b91e8839..e9b7b138 100644 --- a/Android.bp +++ b/Android.bp @@ -32,7 +32,6 @@ cc_defaults { "-DUSE_CHROME_NETWORK_PROXY=0", "-DUSE_CHROME_KIOSK_APP=0", "-DUSE_HWID_OVERRIDE=0", - "-DUSE_MTD=0", "-D_FILE_OFFSET_BITS=64", "-D_POSIX_C_SOURCE=199309L", "-Wa,--noexecstack", diff --git a/BUILD.gn b/BUILD.gn index 51a4ae00..5e76bfbf 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -77,7 +77,6 @@ pkg_config("target_defaults") { "USE_HWID_OVERRIDE=${use.hwid_override}", "USE_CHROME_KIOSK_APP=${use.chrome_kiosk_app}", "USE_CHROME_NETWORK_PROXY=${use.chrome_network_proxy}", - "USE_MTD=${use.mtd}", "USE_SHILL=1", ] include_dirs = [ @@ -179,11 +178,6 @@ static_library("libpayload_consumer") { deps = [ ":update_metadata-protos", ] - - if (use.mtd) { - sources += [ "payload_consumer/mtd_file_descriptor.cc" ] - libs += [ "mtdutils" ] - } } # The main daemon static_library with all the code used to check for updates diff --git a/common/utils.cc b/common/utils.cc index 34d97a2a..3a234cb8 100644 --- a/common/utils.cc +++ b/common/utils.cc @@ -83,49 +83,6 @@ const int kGetFileFormatMaxHeaderSize = 32; // The path to the kernel's boot_id. const char kBootIdPath[] = "/proc/sys/kernel/random/boot_id"; -// Return true if |disk_name| is an MTD or a UBI device. Note that this test is -// simply based on the name of the device. -bool IsMtdDeviceName(const string& disk_name) { - return base::StartsWith( - disk_name, "/dev/ubi", base::CompareCase::SENSITIVE) || - base::StartsWith(disk_name, "/dev/mtd", base::CompareCase::SENSITIVE); -} - -// Return the device name for the corresponding partition on a NAND device. -// WARNING: This function returns device names that are not mountable. -string MakeNandPartitionName(int partition_num) { - switch (partition_num) { - case 2: - case 4: - case 6: { - return base::StringPrintf("/dev/mtd%d", partition_num); - } - default: { - return base::StringPrintf("/dev/ubi%d_0", partition_num); - } - } -} - -// Return the device name for the corresponding partition on a NAND device that -// may be mountable (but may not be writable). -string MakeNandPartitionNameForMount(int partition_num) { - switch (partition_num) { - case 2: - case 4: - case 6: { - return base::StringPrintf("/dev/mtd%d", partition_num); - } - case 3: - case 5: - case 7: { - return base::StringPrintf("/dev/ubiblock%d_0", partition_num); - } - default: { - return base::StringPrintf("/dev/ubi%d_0", partition_num); - } - } -} - // If |path| is absolute, or explicit relative to the current working directory, // leaves it as is. Otherwise, uses the system's temp directory, as defined by // base::GetTempDir() and prepends it to |path|. On success stores the full @@ -473,22 +430,6 @@ bool SplitPartitionName(const string& partition_name, return false; } - size_t partition_name_len = string::npos; - if (partition_name[last_nondigit_pos] == '_') { - // NAND block devices have weird naming which could be something - // like "/dev/ubiblock2_0". We discard "_0" in such a case. - size_t prev_nondigit_pos = - partition_name.find_last_not_of("0123456789", last_nondigit_pos - 1); - if (prev_nondigit_pos == string::npos || - (prev_nondigit_pos + 1) == last_nondigit_pos) { - LOG(ERROR) << "Unable to parse partition device name: " << partition_name; - return false; - } - - partition_name_len = last_nondigit_pos - prev_nondigit_pos; - last_nondigit_pos = prev_nondigit_pos; - } - if (out_disk_name) { // Special case for MMC devices which have the following naming scheme: // mmcblk0p2 @@ -501,8 +442,7 @@ bool SplitPartitionName(const string& partition_name, } if (out_partition_num) { - string partition_str = - partition_name.substr(last_nondigit_pos + 1, partition_name_len); + string partition_str = partition_name.substr(last_nondigit_pos + 1); *out_partition_num = atoi(partition_str.c_str()); } return true; @@ -519,13 +459,6 @@ string MakePartitionName(const string& disk_name, int partition_num) { return string(); } - if (IsMtdDeviceName(disk_name)) { - // Special case for UBI block devices. - // 1. ubiblock is not writable, we need to use plain "ubi". - // 2. There is a "_0" suffix. - return MakeNandPartitionName(partition_num); - } - string partition_name = disk_name; if (isdigit(partition_name.back())) { // Special case for devices with names ending with a digit. @@ -539,17 +472,6 @@ string MakePartitionName(const string& disk_name, int partition_num) { return partition_name; } -string MakePartitionNameForMount(const string& part_name) { - if (IsMtdDeviceName(part_name)) { - int partition_num; - if (!SplitPartitionName(part_name, nullptr, &partition_num)) { - return ""; - } - return MakeNandPartitionNameForMount(partition_num); - } - return part_name; -} - string ErrnoNumberAsString(int err) { char buf[100]; buf[0] = '\0'; @@ -566,33 +488,6 @@ bool IsSymlink(const char* path) { return lstat(path, &stbuf) == 0 && S_ISLNK(stbuf.st_mode) != 0; } -bool TryAttachingUbiVolume(int volume_num, int timeout) { - const string volume_path = base::StringPrintf("/dev/ubi%d_0", volume_num); - if (FileExists(volume_path.c_str())) { - return true; - } - - int exit_code; - vector cmd = {"ubiattach", - "-m", - base::StringPrintf("%d", volume_num), - "-d", - base::StringPrintf("%d", volume_num)}; - TEST_AND_RETURN_FALSE(Subprocess::SynchronousExec(cmd, &exit_code, nullptr)); - TEST_AND_RETURN_FALSE(exit_code == 0); - - cmd = {"ubiblock", "--create", volume_path}; - TEST_AND_RETURN_FALSE(Subprocess::SynchronousExec(cmd, &exit_code, nullptr)); - TEST_AND_RETURN_FALSE(exit_code == 0); - - while (timeout > 0 && !FileExists(volume_path.c_str())) { - sleep(1); - timeout--; - } - - return FileExists(volume_path.c_str()); -} - bool MakeTempFile(const string& base_filename_template, string* filename, int* fd) { diff --git a/common/utils.h b/common/utils.h index 9160d9f8..d949a3e9 100644 --- a/common/utils.h +++ b/common/utils.h @@ -127,11 +127,6 @@ bool FileExists(const char* path); // Returns true if |path| exists and is a symbolic link. bool IsSymlink(const char* path); -// Try attaching UBI |volume_num|. If there is any error executing required -// commands to attach the volume, this function returns false. This function -// only returns true if "/dev/ubi%d_0" becomes available in |timeout| seconds. -bool TryAttachingUbiVolume(int volume_num, int timeout); - // If |base_filename_template| is neither absolute (starts with "/") nor // explicitly relative to the current working directory (starts with "./" or // "../"), then it is prepended the system's temporary directory. On success, @@ -162,14 +157,6 @@ bool SplitPartitionName(const std::string& partition_name, // Returns empty string when invalid parameters are passed in std::string MakePartitionName(const std::string& disk_name, int partition_num); -// Similar to "MakePartitionName" but returns a name that is suitable for -// mounting. On NAND system we can write to "/dev/ubiX_0", which is what -// MakePartitionName returns, but we cannot mount that device. To mount, we -// have to use "/dev/ubiblockX_0" for rootfs. Stateful and OEM partitions are -// mountable with "/dev/ubiX_0". The input is a partition device such as -// /dev/sda3. Return empty string on error. -std::string MakePartitionNameForMount(const std::string& part_name); - // Set the read-only attribute on the block device |device| to the value passed // in |read_only|. Return whether the operation succeeded. bool SetBlockDeviceReadOnly(const std::string& device, bool read_only); diff --git a/common/utils_unittest.cc b/common/utils_unittest.cc index 7d1c59ed..f9eb5966 100644 --- a/common/utils_unittest.cc +++ b/common/utils_unittest.cc @@ -123,10 +123,6 @@ TEST(UtilsTest, SplitPartitionNameTest) { EXPECT_EQ("/dev/mmcblk0", disk); EXPECT_EQ(3, part_num); - EXPECT_TRUE(utils::SplitPartitionName("/dev/ubiblock3_2", &disk, &part_num)); - EXPECT_EQ("/dev/ubiblock", disk); - EXPECT_EQ(3, part_num); - EXPECT_TRUE(utils::SplitPartitionName("/dev/loop10", &disk, &part_num)); EXPECT_EQ("/dev/loop", disk); EXPECT_EQ(10, part_num); @@ -135,14 +131,6 @@ TEST(UtilsTest, SplitPartitionNameTest) { EXPECT_EQ("/dev/loop28", disk); EXPECT_EQ(11, part_num); - EXPECT_TRUE(utils::SplitPartitionName("/dev/loop10_0", &disk, &part_num)); - EXPECT_EQ("/dev/loop", disk); - EXPECT_EQ(10, part_num); - - EXPECT_TRUE(utils::SplitPartitionName("/dev/loop28p11_0", &disk, &part_num)); - EXPECT_EQ("/dev/loop28", disk); - EXPECT_EQ(11, part_num); - EXPECT_FALSE(utils::SplitPartitionName("/dev/mmcblk0p", &disk, &part_num)); EXPECT_FALSE(utils::SplitPartitionName("/dev/sda", &disk, &part_num)); EXPECT_FALSE(utils::SplitPartitionName("/dev/foo/bar", &disk, &part_num)); @@ -157,29 +145,6 @@ TEST(UtilsTest, MakePartitionNameTest) { EXPECT_EQ("/dev/mmcblk0p2", utils::MakePartitionName("/dev/mmcblk0", 2)); EXPECT_EQ("/dev/loop8", utils::MakePartitionName("/dev/loop", 8)); EXPECT_EQ("/dev/loop12p2", utils::MakePartitionName("/dev/loop12", 2)); - EXPECT_EQ("/dev/ubi5_0", utils::MakePartitionName("/dev/ubiblock", 5)); - EXPECT_EQ("/dev/mtd4", utils::MakePartitionName("/dev/ubiblock", 4)); - EXPECT_EQ("/dev/ubi3_0", utils::MakePartitionName("/dev/ubiblock", 3)); - EXPECT_EQ("/dev/mtd2", utils::MakePartitionName("/dev/ubiblock", 2)); - EXPECT_EQ("/dev/ubi1_0", utils::MakePartitionName("/dev/ubiblock", 1)); -} - -TEST(UtilsTest, MakePartitionNameForMountTest) { - EXPECT_EQ("/dev/sda4", utils::MakePartitionNameForMount("/dev/sda4")); - EXPECT_EQ("/dev/sda123", utils::MakePartitionNameForMount("/dev/sda123")); - EXPECT_EQ("/dev/mmcblk2", utils::MakePartitionNameForMount("/dev/mmcblk2")); - EXPECT_EQ("/dev/mmcblk0p2", - utils::MakePartitionNameForMount("/dev/mmcblk0p2")); - EXPECT_EQ("/dev/loop0", utils::MakePartitionNameForMount("/dev/loop0")); - EXPECT_EQ("/dev/loop8", utils::MakePartitionNameForMount("/dev/loop8")); - EXPECT_EQ("/dev/loop12p2", utils::MakePartitionNameForMount("/dev/loop12p2")); - EXPECT_EQ("/dev/ubiblock5_0", - utils::MakePartitionNameForMount("/dev/ubiblock5_0")); - EXPECT_EQ("/dev/mtd4", utils::MakePartitionNameForMount("/dev/ubi4_0")); - EXPECT_EQ("/dev/ubiblock3_0", - utils::MakePartitionNameForMount("/dev/ubiblock3")); - EXPECT_EQ("/dev/mtd2", utils::MakePartitionNameForMount("/dev/ubi2")); - EXPECT_EQ("/dev/ubi1_0", utils::MakePartitionNameForMount("/dev/ubiblock1")); } TEST(UtilsTest, FuzzIntTest) { diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc index f405bd93..53acc117 100644 --- a/payload_consumer/delta_performer.cc +++ b/payload_consumer/delta_performer.cc @@ -54,9 +54,6 @@ #endif // USE_FEC #include "update_engine/payload_consumer/file_descriptor_utils.h" #include "update_engine/payload_consumer/mount_history.h" -#if USE_MTD -#include "update_engine/payload_consumer/mtd_file_descriptor.h" -#endif // USE_MTD #include "update_engine/payload_consumer/payload_constants.h" #include "update_engine/payload_consumer/payload_verifier.h" #include "update_engine/payload_consumer/xz_extent_writer.h" @@ -76,40 +73,9 @@ const uint64_t DeltaPerformer::kCheckpointFrequencySeconds = 1; namespace { const int kUpdateStateOperationInvalid = -1; const int kMaxResumedUpdateFailures = 10; -#if USE_MTD -const int kUbiVolumeAttachTimeout = 5 * 60; -#endif const uint64_t kCacheSize = 1024 * 1024; // 1MB -FileDescriptorPtr CreateFileDescriptor(const char* path) { - FileDescriptorPtr ret; -#if USE_MTD - if (strstr(path, "/dev/ubi") == path) { - if (!UbiFileDescriptor::IsUbi(path)) { - // The volume might not have been attached at boot time. - int volume_no; - if (utils::SplitPartitionName(path, nullptr, &volume_no)) { - utils::TryAttachingUbiVolume(volume_no, kUbiVolumeAttachTimeout); - } - } - if (UbiFileDescriptor::IsUbi(path)) { - LOG(INFO) << path << " is a UBI device."; - ret.reset(new UbiFileDescriptor); - } - } else if (MtdFileDescriptor::IsMtd(path)) { - LOG(INFO) << path << " is an MTD device."; - ret.reset(new MtdFileDescriptor); - } else { - LOG(INFO) << path << " is not an MTD nor a UBI device."; -#endif - ret.reset(new EintrSafeFileDescriptor); -#if USE_MTD - } -#endif - return ret; -} - // Opens path for read/write. On success returns an open FileDescriptor // and sets *err to 0. On failure, sets *err to errno and returns nullptr. FileDescriptorPtr OpenFile(const char* path, @@ -121,18 +87,11 @@ FileDescriptorPtr OpenFile(const char* path, bool read_only = (mode & O_ACCMODE) == O_RDONLY; utils::SetBlockDeviceReadOnly(path, read_only); - FileDescriptorPtr fd = CreateFileDescriptor(path); + FileDescriptorPtr fd(new EintrSafeFileDescriptor()); if (cache_writes && !read_only) { fd = FileDescriptorPtr(new CachedFileDescriptor(fd, kCacheSize)); LOG(INFO) << "Caching writes."; } -#if USE_MTD - // On NAND devices, we can either read, or write, but not both. So here we - // use O_WRONLY. - if (UbiFileDescriptor::IsUbi(path) || MtdFileDescriptor::IsMtd(path)) { - mode = O_WRONLY; - } -#endif if (!fd->Open(path, mode, 000)) { *err = errno; PLOG(ERROR) << "Unable to open file " << path; diff --git a/payload_consumer/mtd_file_descriptor.cc b/payload_consumer/mtd_file_descriptor.cc deleted file mode 100644 index 5d940cbb..00000000 --- a/payload_consumer/mtd_file_descriptor.cc +++ /dev/null @@ -1,263 +0,0 @@ -// -// Copyright (C) 2014 The Android Open Source Project -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -#include "update_engine/payload_consumer/mtd_file_descriptor.h" - -#include -#include -#include -#include -#include - -#include -#include - -#include -#include -#include -#include - -#include "update_engine/common/subprocess.h" -#include "update_engine/common/utils.h" - -using std::string; - -namespace { - -static const char kSysfsClassUbi[] = "/sys/class/ubi/"; -static const char kUsableEbSize[] = "/usable_eb_size"; -static const char kReservedEbs[] = "/reserved_ebs"; - -using chromeos_update_engine::UbiVolumeInfo; -using chromeos_update_engine::utils::ReadFile; - -// Return a UbiVolumeInfo pointer if |path| is a UBI volume. Otherwise, return -// a null unique pointer. -std::unique_ptr GetUbiVolumeInfo(const string& path) { - base::FilePath device_node(path); - base::FilePath ubi_name(device_node.BaseName()); - - string sysfs_node(kSysfsClassUbi); - sysfs_node.append(ubi_name.MaybeAsASCII()); - - std::unique_ptr ret; - - // Obtain volume info from sysfs. - string s_reserved_ebs; - if (!ReadFile(sysfs_node + kReservedEbs, &s_reserved_ebs)) { - LOG(ERROR) << "Cannot read " << sysfs_node + kReservedEbs; - return ret; - } - string s_eb_size; - if (!ReadFile(sysfs_node + kUsableEbSize, &s_eb_size)) { - LOG(ERROR) << "Cannot read " << sysfs_node + kUsableEbSize; - return ret; - } - - base::TrimWhitespaceASCII( - s_reserved_ebs, base::TRIM_TRAILING, &s_reserved_ebs); - base::TrimWhitespaceASCII(s_eb_size, base::TRIM_TRAILING, &s_eb_size); - - uint64_t reserved_ebs, eb_size; - if (!base::StringToUint64(s_reserved_ebs, &reserved_ebs)) { - LOG(ERROR) << "Cannot parse reserved_ebs: " << s_reserved_ebs; - return ret; - } - if (!base::StringToUint64(s_eb_size, &eb_size)) { - LOG(ERROR) << "Cannot parse usable_eb_size: " << s_eb_size; - return ret; - } - - ret.reset(new UbiVolumeInfo); - ret->reserved_ebs = reserved_ebs; - ret->eraseblock_size = eb_size; - return ret; -} - -} // namespace - -namespace chromeos_update_engine { - -MtdFileDescriptor::MtdFileDescriptor() - : read_ctx_(nullptr, &mtd_read_close), - write_ctx_(nullptr, &mtd_write_close) {} - -bool MtdFileDescriptor::IsMtd(const char* path) { - uint64_t size; - return mtd_node_info(path, &size, nullptr, nullptr) == 0; -} - -bool MtdFileDescriptor::Open(const char* path, int flags, mode_t mode) { - // This File Descriptor does not support read and write. - TEST_AND_RETURN_FALSE((flags & O_ACCMODE) != O_RDWR); - // But we need to open the underlying file descriptor in O_RDWR mode because - // during write, we need to read back to verify the write actually sticks or - // we have to skip the block. That job is done by mtdutils library. - if ((flags & O_ACCMODE) == O_WRONLY) { - flags &= ~O_ACCMODE; - flags |= O_RDWR; - } - TEST_AND_RETURN_FALSE( - EintrSafeFileDescriptor::Open(path, flags | O_CLOEXEC, mode)); - - if ((flags & O_ACCMODE) == O_RDWR) { - write_ctx_.reset(mtd_write_descriptor(fd_, path)); - nr_written_ = 0; - } else { - read_ctx_.reset(mtd_read_descriptor(fd_, path)); - } - - if (!read_ctx_ && !write_ctx_) { - Close(); - return false; - } - - return true; -} - -bool MtdFileDescriptor::Open(const char* path, int flags) { - mode_t cur = umask(022); - umask(cur); - return Open(path, flags, 0777 & ~cur); -} - -ssize_t MtdFileDescriptor::Read(void* buf, size_t count) { - CHECK(read_ctx_); - return mtd_read_data(read_ctx_.get(), static_cast(buf), count); -} - -ssize_t MtdFileDescriptor::Write(const void* buf, size_t count) { - CHECK(write_ctx_); - ssize_t result = - mtd_write_data(write_ctx_.get(), static_cast(buf), count); - if (result > 0) { - nr_written_ += result; - } - return result; -} - -off64_t MtdFileDescriptor::Seek(off64_t offset, int whence) { - if (write_ctx_) { - // Ignore seek in write mode. - return nr_written_; - } - return EintrSafeFileDescriptor::Seek(offset, whence); -} - -bool MtdFileDescriptor::Close() { - read_ctx_.reset(); - write_ctx_.reset(); - return EintrSafeFileDescriptor::Close(); -} - -bool UbiFileDescriptor::IsUbi(const char* path) { - base::FilePath device_node(path); - base::FilePath ubi_name(device_node.BaseName()); - TEST_AND_RETURN_FALSE(base::StartsWith( - ubi_name.MaybeAsASCII(), "ubi", base::CompareCase::SENSITIVE)); - - return static_cast(GetUbiVolumeInfo(path)); -} - -bool UbiFileDescriptor::Open(const char* path, int flags, mode_t mode) { - std::unique_ptr info = GetUbiVolumeInfo(path); - if (!info) { - return false; - } - - // This File Descriptor does not support read and write. - TEST_AND_RETURN_FALSE((flags & O_ACCMODE) != O_RDWR); - TEST_AND_RETURN_FALSE( - EintrSafeFileDescriptor::Open(path, flags | O_CLOEXEC, mode)); - - usable_eb_blocks_ = info->reserved_ebs; - eraseblock_size_ = info->eraseblock_size; - volume_size_ = usable_eb_blocks_ * eraseblock_size_; - - if ((flags & O_ACCMODE) == O_WRONLY) { - // It's best to use volume update ioctl so that UBI layer will mark the - // volume as being updated, and only clear that mark if the update is - // successful. We will need to pad to the whole volume size at close. - uint64_t vsize = volume_size_; - if (ioctl(fd_, UBI_IOCVOLUP, &vsize) != 0) { - PLOG(ERROR) << "Cannot issue volume update ioctl"; - EintrSafeFileDescriptor::Close(); - return false; - } - mode_ = kWriteOnly; - nr_written_ = 0; - } else { - mode_ = kReadOnly; - } - - return true; -} - -bool UbiFileDescriptor::Open(const char* path, int flags) { - mode_t cur = umask(022); - umask(cur); - return Open(path, flags, 0777 & ~cur); -} - -ssize_t UbiFileDescriptor::Read(void* buf, size_t count) { - CHECK(mode_ == kReadOnly); - return EintrSafeFileDescriptor::Read(buf, count); -} - -ssize_t UbiFileDescriptor::Write(const void* buf, size_t count) { - CHECK(mode_ == kWriteOnly); - ssize_t nr_chunk = EintrSafeFileDescriptor::Write(buf, count); - if (nr_chunk >= 0) { - nr_written_ += nr_chunk; - } - return nr_chunk; -} - -off64_t UbiFileDescriptor::Seek(off64_t offset, int whence) { - if (mode_ == kWriteOnly) { - // Ignore seek in write mode. - return nr_written_; - } - return EintrSafeFileDescriptor::Seek(offset, whence); -} - -bool UbiFileDescriptor::Close() { - bool pad_ok = true; - if (IsOpen() && mode_ == kWriteOnly) { - char buf[1024]; - memset(buf, 0xFF, sizeof(buf)); - while (nr_written_ < volume_size_) { - // We have written less than the whole volume. In order for us to clear - // the update marker, we need to fill the rest. It is recommended to fill - // UBI writes with 0xFF. - uint64_t to_write = volume_size_ - nr_written_; - if (to_write > sizeof(buf)) { - to_write = sizeof(buf); - } - ssize_t nr_chunk = EintrSafeFileDescriptor::Write(buf, to_write); - if (nr_chunk < 0) { - LOG(ERROR) << "Cannot 0xFF-pad before closing."; - // There is an error, but we can't really do any meaningful thing here. - pad_ok = false; - break; - } - nr_written_ += nr_chunk; - } - } - return EintrSafeFileDescriptor::Close() && pad_ok; -} - -} // namespace chromeos_update_engine diff --git a/payload_consumer/mtd_file_descriptor.h b/payload_consumer/mtd_file_descriptor.h deleted file mode 100644 index c0170b78..00000000 --- a/payload_consumer/mtd_file_descriptor.h +++ /dev/null @@ -1,103 +0,0 @@ -// -// Copyright (C) 2014 The Android Open Source Project -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -#ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_MTD_FILE_DESCRIPTOR_H_ -#define UPDATE_ENGINE_PAYLOAD_CONSUMER_MTD_FILE_DESCRIPTOR_H_ - -// This module defines file descriptors that deal with NAND media. We are -// concerned with raw NAND access (as MTD device), and through UBI layer. - -#include - -#include - -#include "update_engine/payload_consumer/file_descriptor.h" - -namespace chromeos_update_engine { - -// A class defining the file descriptor API for raw MTD device. This file -// descriptor supports either random read, or sequential write but not both at -// once. -class MtdFileDescriptor : public EintrSafeFileDescriptor { - public: - MtdFileDescriptor(); - - static bool IsMtd(const char* path); - - bool Open(const char* path, int flags, mode_t mode) override; - bool Open(const char* path, int flags) override; - ssize_t Read(void* buf, size_t count) override; - ssize_t Write(const void* buf, size_t count) override; - off64_t Seek(off64_t offset, int whence) override; - uint64_t BlockDevSize() override { return 0; } - bool BlkIoctl(int request, - uint64_t start, - uint64_t length, - int* result) override { - return false; - } - bool Close() override; - - private: - std::unique_ptr read_ctx_; - std::unique_ptr write_ctx_; - uint64_t nr_written_; -}; - -struct UbiVolumeInfo { - // Number of eraseblocks. - uint64_t reserved_ebs; - // Size of each eraseblock. - uint64_t eraseblock_size; -}; - -// A file descriptor to update a UBI volume, similar to MtdFileDescriptor. -// Once the file descriptor is opened for write, the volume is marked as being -// updated. The volume will not be usable until an update is completed. See -// UBI_IOCVOLUP ioctl operation. -class UbiFileDescriptor : public EintrSafeFileDescriptor { - public: - // Perform some queries about |path| to see if it is a UBI volume. - static bool IsUbi(const char* path); - - bool Open(const char* path, int flags, mode_t mode) override; - bool Open(const char* path, int flags) override; - ssize_t Read(void* buf, size_t count) override; - ssize_t Write(const void* buf, size_t count) override; - off64_t Seek(off64_t offset, int whence) override; - uint64_t BlockDevSize() override { return 0; } - bool BlkIoctl(int request, - uint64_t start, - uint64_t length, - int* result) override { - return false; - } - bool Close() override; - - private: - enum Mode { kReadOnly, kWriteOnly }; - - uint64_t usable_eb_blocks_; - uint64_t eraseblock_size_; - uint64_t volume_size_; - uint64_t nr_written_; - - Mode mode_; -}; - -} // namespace chromeos_update_engine - -#endif // UPDATE_ENGINE_PAYLOAD_CONSUMER_MTD_FILE_DESCRIPTOR_H_ diff --git a/payload_consumer/postinstall_runner_action.cc b/payload_consumer/postinstall_runner_action.cc index cc3843d6..894ac7da 100644 --- a/payload_consumer/postinstall_runner_action.cc +++ b/payload_consumer/postinstall_runner_action.cc @@ -111,8 +111,7 @@ void PostinstallRunnerAction::PerformPartitionPostinstall() { const InstallPlan::Partition& partition = install_plan_.partitions[current_partition_]; - const string mountable_device = - utils::MakePartitionNameForMount(partition.target_path); + const string mountable_device = partition.target_path; if (mountable_device.empty()) { LOG(ERROR) << "Cannot make mountable device from " << partition.target_path; return CompletePostinstall(ErrorCode::kPostinstallRunnerError); From ed3fcc05a9c47c26bec6f524d33855aa5813e00d Mon Sep 17 00:00:00 2001 From: Jae Hoon Kim Date: Thu, 11 Jul 2019 14:35:38 -0700 Subject: [PATCH 071/624] update_engine: Clear lingering install indication When an install for DLC is initiated, the member variable |is_install_| is set to true, however the issue arises from that fact that it is never reverted to be false. This can cause periodic updates to still have the lingering |is_install_| as true from a previous install. The state must be correctly cleaned up/cleared. |ProcessingDone()| will be called at the end of an install/update. This also depends on the fact that |is_install_| will not be changed by the time |OnUpdateScheduled()| is scheduled periodically or forced up until |ProcessingDone()| for an install or update is finished. The new approach is to have |ProcessingDone()| dispatch to two different methods (|ProcessingDoneInstall()| and |ProcessingDoneUpdate()|) based on what the action finished on (an install or update indicated by |is_install_|). Then those dispatched actions will be performed, then the |is_install_| can be reset. BUG=chromium:982929 TEST=FEATURES="test" P2_TEST_FILTER="*UpdateAttempterTest.*-*RunAsRoot*" emerge-$BOARD update_engine TEST=FEATURES="test" emerge-$BOARD update_engine Change-Id: I1fb9387dd416ed0815964cfcb22a6559ab81fa80 --- update_attempter.cc | 162 +++++++++++++++++++---------------- update_attempter.h | 7 ++ update_attempter_unittest.cc | 135 +++++++++++++++++++++++++++++ 3 files changed, 230 insertions(+), 74 deletions(-) diff --git a/update_attempter.cc b/update_attempter.cc index 31d728b5..dc7a4b5b 100644 --- a/update_attempter.cc +++ b/update_attempter.cc @@ -1038,11 +1038,8 @@ void UpdateAttempter::UpdateRollbackHappened() { } } -// Delegate methods: -void UpdateAttempter::ProcessingDone(const ActionProcessor* processor, - ErrorCode code) { - LOG(INFO) << "Processing Done."; - +void UpdateAttempter::ProcessingDoneInternal(const ActionProcessor* processor, + ErrorCode code) { // Reset cpu shares back to normal. cpu_limiter_.StopLimiter(); @@ -1064,84 +1061,101 @@ void UpdateAttempter::ProcessingDone(const ActionProcessor* processor, attempt_error_code_ = utils::GetBaseErrorCode(code); - if (code == ErrorCode::kSuccess) { - // For install operation, we do not mark update complete since we do not - // need reboot. - if (!is_install_) - WriteUpdateCompletedMarker(); - ReportTimeToUpdateAppliedMetric(); - - prefs_->SetInt64(kPrefsDeltaUpdateFailures, 0); - prefs_->SetString(kPrefsPreviousVersion, - omaha_request_params_->app_version()); - DeltaPerformer::ResetUpdateProgress(prefs_, false); - - system_state_->payload_state()->UpdateSucceeded(); - - // Since we're done with scattering fully at this point, this is the - // safest point delete the state files, as we're sure that the status is - // set to reboot (which means no more updates will be applied until reboot) - // This deletion is required for correctness as we want the next update - // check to re-create a new random number for the update check count. - // Similarly, we also delete the wall-clock-wait period that was persisted - // so that we start with a new random value for the next update check - // after reboot so that the same device is not favored or punished in any - // way. - prefs_->Delete(kPrefsUpdateCheckCount); - system_state_->payload_state()->SetScatteringWaitPeriod(TimeDelta()); - system_state_->payload_state()->SetStagingWaitPeriod(TimeDelta()); - prefs_->Delete(kPrefsUpdateFirstSeenAt); - - if (is_install_) { - LOG(INFO) << "DLC successfully installed, no reboot needed."; - SetStatusAndNotify(UpdateStatus::IDLE); - ScheduleUpdates(); + if (code != ErrorCode::kSuccess) { + if (ScheduleErrorEventAction()) { return; } - - SetStatusAndNotify(UpdateStatus::UPDATED_NEED_REBOOT); + LOG(INFO) << "No update."; + SetStatusAndNotify(UpdateStatus::IDLE); ScheduleUpdates(); - LOG(INFO) << "Update successfully applied, waiting to reboot."; - - // |install_plan_| is null during rollback operations, and the stats don't - // make much sense then anyway. - if (install_plan_) { - // Generate an unique payload identifier. - string target_version_uid; - for (const auto& payload : install_plan_->payloads) { - target_version_uid += - brillo::data_encoding::Base64Encode(payload.hash) + ":" + - payload.metadata_signature + ":"; - } - - // If we just downloaded a rollback image, we should preserve this fact - // over the following powerwash. - if (install_plan_->is_rollback) { - system_state_->payload_state()->SetRollbackHappened(true); - system_state_->metrics_reporter()->ReportEnterpriseRollbackMetrics( - /*success=*/true, install_plan_->version); - } - - // Expect to reboot into the new version to send the proper metric during - // next boot. - system_state_->payload_state()->ExpectRebootInNewVersion( - target_version_uid); - } else { - // If we just finished a rollback, then we expect to have no Omaha - // response. Otherwise, it's an error. - if (system_state_->payload_state()->GetRollbackVersion().empty()) { - LOG(ERROR) << "Can't send metrics because there was no Omaha response"; - } - } return; } - if (ScheduleErrorEventAction()) { - return; + ReportTimeToUpdateAppliedMetric(); + prefs_->SetInt64(kPrefsDeltaUpdateFailures, 0); + prefs_->SetString(kPrefsPreviousVersion, + omaha_request_params_->app_version()); + DeltaPerformer::ResetUpdateProgress(prefs_, false); + + system_state_->payload_state()->UpdateSucceeded(); + + // Since we're done with scattering fully at this point, this is the + // safest point delete the state files, as we're sure that the status is + // set to reboot (which means no more updates will be applied until reboot) + // This deletion is required for correctness as we want the next update + // check to re-create a new random number for the update check count. + // Similarly, we also delete the wall-clock-wait period that was persisted + // so that we start with a new random value for the next update check + // after reboot so that the same device is not favored or punished in any + // way. + prefs_->Delete(kPrefsUpdateCheckCount); + system_state_->payload_state()->SetScatteringWaitPeriod(TimeDelta()); + system_state_->payload_state()->SetStagingWaitPeriod(TimeDelta()); + prefs_->Delete(kPrefsUpdateFirstSeenAt); + + // Note: below this comment should only be on |ErrorCode::kSuccess|. + if (is_install_) { + ProcessingDoneInstall(processor, code); + } else { + ProcessingDoneUpdate(processor, code); } - LOG(INFO) << "No update."; +} + +void UpdateAttempter::ProcessingDoneInstall(const ActionProcessor* processor, + ErrorCode code) { SetStatusAndNotify(UpdateStatus::IDLE); ScheduleUpdates(); + LOG(INFO) << "DLC successfully installed, no reboot needed."; +} + +void UpdateAttempter::ProcessingDoneUpdate(const ActionProcessor* processor, + ErrorCode code) { + WriteUpdateCompletedMarker(); + + SetStatusAndNotify(UpdateStatus::UPDATED_NEED_REBOOT); + ScheduleUpdates(); + LOG(INFO) << "Update successfully applied, waiting to reboot."; + + // |install_plan_| is null during rollback operations, and the stats don't + // make much sense then anyway. + if (install_plan_) { + // Generate an unique payload identifier. + string target_version_uid; + for (const auto& payload : install_plan_->payloads) { + target_version_uid += brillo::data_encoding::Base64Encode(payload.hash) + + ":" + payload.metadata_signature + ":"; + } + + // If we just downloaded a rollback image, we should preserve this fact + // over the following powerwash. + if (install_plan_->is_rollback) { + system_state_->payload_state()->SetRollbackHappened(true); + system_state_->metrics_reporter()->ReportEnterpriseRollbackMetrics( + /*success=*/true, install_plan_->version); + } + + // Expect to reboot into the new version to send the proper metric during + // next boot. + system_state_->payload_state()->ExpectRebootInNewVersion( + target_version_uid); + } else { + // If we just finished a rollback, then we expect to have no Omaha + // response. Otherwise, it's an error. + if (system_state_->payload_state()->GetRollbackVersion().empty()) { + LOG(ERROR) << "Can't send metrics because there was no Omaha response"; + } + } +} + +// Delegate methods: +void UpdateAttempter::ProcessingDone(const ActionProcessor* processor, + ErrorCode code) { + LOG(INFO) << "Processing Done."; + ProcessingDoneInternal(processor, code); + + // Note: do cleanups here for any variables that need to be reset after a + // failure, error, update, or install. + is_install_ = false; } void UpdateAttempter::ProcessingStopped(const ActionProcessor* processor) { diff --git a/update_attempter.h b/update_attempter.h index 880e9754..3db40978 100644 --- a/update_attempter.h +++ b/update_attempter.h @@ -259,6 +259,8 @@ class UpdateAttempter : public ActionProcessorDelegate, FRIEND_TEST(UpdateAttempterTest, InstallSetsStatusIdle); FRIEND_TEST(UpdateAttempterTest, MarkDeltaUpdateFailureTest); FRIEND_TEST(UpdateAttempterTest, PingOmahaTest); + FRIEND_TEST(UpdateAttempterTest, ProcessingDoneInstallError); + FRIEND_TEST(UpdateAttempterTest, ProcessingDoneUpdateError); FRIEND_TEST(UpdateAttempterTest, ReportDailyMetrics); FRIEND_TEST(UpdateAttempterTest, RollbackNotAllowed); FRIEND_TEST(UpdateAttempterTest, RollbackAfterInstall); @@ -284,6 +286,11 @@ class UpdateAttempter : public ActionProcessorDelegate, // parameters used in the current update attempt. uint32_t GetErrorCodeFlags(); + // ActionProcessorDelegate methods |ProcessingDone()| internal helpers. + void ProcessingDoneInternal(const ActionProcessor* processor, ErrorCode code); + void ProcessingDoneUpdate(const ActionProcessor* processor, ErrorCode code); + void ProcessingDoneInstall(const ActionProcessor* processor, ErrorCode code); + // CertificateChecker::Observer method. // Report metrics about the certificate being checked. void CertificateChecked(ServerToCheck server_to_check, diff --git a/update_attempter_unittest.cc b/update_attempter_unittest.cc index 254579c0..4b9bc750 100644 --- a/update_attempter_unittest.cc +++ b/update_attempter_unittest.cc @@ -127,6 +127,19 @@ struct OnUpdateScheduledTestParams { bool should_update_be_called = false; }; +struct ProcessingDoneTestParams { + // Setups + Inputs: + bool is_install = false; + UpdateStatus status = UpdateStatus::CHECKING_FOR_UPDATE; + ActionProcessor* processor = nullptr; + ErrorCode code = ErrorCode::kSuccess; + + // Expects: + const bool kExpectedIsInstall = false; + bool should_schedule_updates_be_called = true; + UpdateStatus expected_exit_status = UpdateStatus::IDLE; +}; + class MockDlcService : public DlcServiceInterface { public: MOCK_METHOD1(GetInstalled, bool(vector*)); @@ -302,6 +315,9 @@ class UpdateAttempterTest : public ::testing::Test { // |OnUpdateScheduled()| related member functions. void TestOnUpdateScheduled(); + // |ProcessingDone()| related member functions. + void TestProcessingDone(); + base::MessageLoopForIO base_loop_; brillo::BaseMessageLoop loop_{&base_loop_}; @@ -323,6 +339,9 @@ class UpdateAttempterTest : public ::testing::Test { // |OnUpdateScheduled()| test params. OnUpdateScheduledTestParams ous_params_; + // |ProcessingDone()| test params. + ProcessingDoneTestParams pd_params_; + bool actual_using_p2p_for_downloading_; bool actual_using_p2p_for_sharing_; }; @@ -350,6 +369,22 @@ void UpdateAttempterTest::TestCheckForUpdate() { attempter_.WasScheduleUpdatesCalled()); } +void UpdateAttempterTest::TestProcessingDone() { + // Setup + attempter_.DisableScheduleUpdates(); + attempter_.is_install_ = pd_params_.is_install; + attempter_.status_ = pd_params_.status; + + // Invocation + attempter_.ProcessingDone(pd_params_.processor, pd_params_.code); + + // Verify + EXPECT_EQ(pd_params_.kExpectedIsInstall, attempter_.is_install_); + EXPECT_EQ(pd_params_.should_schedule_updates_be_called, + attempter_.WasScheduleUpdatesCalled()); + EXPECT_EQ(pd_params_.expected_exit_status, attempter_.status_); +} + void UpdateAttempterTest::ScheduleQuitMainLoop() { loop_.PostTask( FROM_HERE, @@ -1961,6 +1996,106 @@ TEST_F(UpdateAttempterTest, attempter_.ProcessingDone(nullptr, ErrorCode::kSuccess); } +TEST_F(UpdateAttempterTest, ProcessingDoneUpdated) { + // GIVEN an update finished. + + // THEN need reboot since update applied. + pd_params_.expected_exit_status = UpdateStatus::UPDATED_NEED_REBOOT; + // THEN install indication should be false. + + TestProcessingDone(); +} + +TEST_F(UpdateAttempterTest, ProcessingDoneInstalled) { + // GIVEN an install finished. + pd_params_.is_install = true; + + // THEN go idle. + // THEN install indication should be false. + + TestProcessingDone(); +} + +TEST_F(UpdateAttempterTest, ProcessingDoneInstallReportingError) { + // GIVEN an install finished. + pd_params_.is_install = true; + // GIVEN a reporting error occurred. + pd_params_.status = UpdateStatus::REPORTING_ERROR_EVENT; + + // THEN go idle. + // THEN install indication should be false. + + TestProcessingDone(); +} + +TEST_F(UpdateAttempterTest, ProcessingDoneNoUpdate) { + // GIVEN an update finished. + // GIVEN an action error occured. + pd_params_.code = ErrorCode::kNoUpdate; + + // THEN go idle. + // THEN install indication should be false. + + TestProcessingDone(); +} + +TEST_F(UpdateAttempterTest, ProcessingDoneNoInstall) { + // GIVEN an install finished. + pd_params_.is_install = true; + // GIVEN an action error occured. + pd_params_.code = ErrorCode::kNoUpdate; + + // THEN go idle. + // THEN install indication should be false. + + TestProcessingDone(); +} + +TEST_F(UpdateAttempterTest, ProcessingDoneUpdateError) { + // GIVEN an update finished. + // GIVEN an action error occured. + pd_params_.code = ErrorCode::kError; + // GIVEN an event error is set. + attempter_.error_event_.reset(new OmahaEvent(OmahaEvent::kTypeUpdateComplete, + OmahaEvent::kResultError, + ErrorCode::kError)); + + // THEN indicate a error event. + pd_params_.expected_exit_status = UpdateStatus::REPORTING_ERROR_EVENT; + // THEN install indication should be false. + + // THEN expect critical actions of |ScheduleErrorEventAction()|. + EXPECT_CALL(*processor_, EnqueueAction(Pointee(_))).Times(1); + EXPECT_CALL(*processor_, StartProcessing()).Times(1); + // THEN |ScheduleUpdates()| will be called next |ProcessingDone()| so skip. + pd_params_.should_schedule_updates_be_called = false; + + TestProcessingDone(); +} + +TEST_F(UpdateAttempterTest, ProcessingDoneInstallError) { + // GIVEN an install finished. + pd_params_.is_install = true; + // GIVEN an action error occured. + pd_params_.code = ErrorCode::kError; + // GIVEN an event error is set. + attempter_.error_event_.reset(new OmahaEvent(OmahaEvent::kTypeUpdateComplete, + OmahaEvent::kResultError, + ErrorCode::kError)); + + // THEN indicate a error event. + pd_params_.expected_exit_status = UpdateStatus::REPORTING_ERROR_EVENT; + // THEN install indication should be false. + + // THEN expect critical actions of |ScheduleErrorEventAction()|. + EXPECT_CALL(*processor_, EnqueueAction(Pointee(_))).Times(1); + EXPECT_CALL(*processor_, StartProcessing()).Times(1); + // THEN |ScheduleUpdates()| will be called next |ProcessingDone()| so skip. + pd_params_.should_schedule_updates_be_called = false; + + TestProcessingDone(); +} + void UpdateAttempterTest::UpdateToQuickFixBuildStart(bool set_token) { // Tests that checks if |device_quick_fix_build_token| arrives when // policy is set and the device is enterprise enrolled based on |set_token|. From 5d2c453db110c642a30caab42cd2cc082ff86acb Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Wed, 24 Jul 2019 19:14:23 -0700 Subject: [PATCH 072/624] IncludeBlocks: Preserve Test: clang-format no longer reorder blocks Change-Id: Ib5060a5c5cddad7f003f64bc3d59671db77fa224 --- .clang-format | 1 + 1 file changed, 1 insertion(+) diff --git a/.clang-format b/.clang-format index c1244fea..3b6a6276 100644 --- a/.clang-format +++ b/.clang-format @@ -34,5 +34,6 @@ BinPackArguments: false BinPackParameters: false CommentPragmas: NOLINT:.* DerivePointerAlignment: false +IncludeBlocks: Preserve PointerAlignment: Left TabWidth: 2 From 493fecb3f48c8478fd3ef244d631d857730dd14d Mon Sep 17 00:00:00 2001 From: Hidehiko Abe Date: Wed, 10 Jul 2019 23:30:50 +0900 Subject: [PATCH 073/624] update_engine: Replace FileWatcher. base::MessageLoop::WatchFileDescriptor() is removed in the next uprev. This CL replaces the uses (including indirect uses), by FileDescriptorWatcher. BUG=chromium:909719 TEST=Built locally. Ran cros_run_unit_tests. Change-Id: Ide9f94daf2be28696ec6bc1f82ab46a2bd2b6c6f --- common/subprocess.cc | 13 ++-- common/subprocess.h | 4 +- libcurl_http_fetcher.cc | 71 ++++++++----------- libcurl_http_fetcher.h | 6 +- payload_consumer/postinstall_runner_action.cc | 17 ++--- payload_consumer/postinstall_runner_action.h | 4 +- 6 files changed, 46 insertions(+), 69 deletions(-) diff --git a/common/subprocess.cc b/common/subprocess.cc index 0131f10f..36655c7e 100644 --- a/common/subprocess.cc +++ b/common/subprocess.cc @@ -127,8 +127,7 @@ void Subprocess::OnStdoutReady(SubprocessRecord* record) { if (!ok || eof) { // There was either an error or an EOF condition, so we are done watching // the file descriptor. - MessageLoop::current()->CancelTask(record->stdout_task_id); - record->stdout_task_id = MessageLoop::kTaskIdNull; + record->stdout_controller.reset(); return; } } while (bytes_read); @@ -143,8 +142,7 @@ void Subprocess::ChildExitedCallback(const siginfo_t& info) { // Make sure we read any remaining process output and then close the pipe. OnStdoutReady(record); - MessageLoop::current()->CancelTask(record->stdout_task_id); - record->stdout_task_id = MessageLoop::kTaskIdNull; + record->stdout_controller.reset(); // Don't print any log if the subprocess exited with exit code 0. if (info.si_code != CLD_EXITED) { @@ -199,12 +197,9 @@ pid_t Subprocess::ExecFlags(const vector& cmd, << record->stdout_fd << "."; } - record->stdout_task_id = MessageLoop::current()->WatchFileDescriptor( - FROM_HERE, + record->stdout_controller = base::FileDescriptorWatcher::WatchReadable( record->stdout_fd, - MessageLoop::WatchMode::kWatchRead, - true, - base::Bind(&Subprocess::OnStdoutReady, record.get())); + base::BindRepeating(&Subprocess::OnStdoutReady, record.get())); subprocess_records_[pid] = std::move(record); return pid; diff --git a/common/subprocess.h b/common/subprocess.h index bc19d16f..bac9e489 100644 --- a/common/subprocess.h +++ b/common/subprocess.h @@ -25,6 +25,7 @@ #include #include +#include #include #include #include @@ -120,8 +121,7 @@ class Subprocess { // These are used to monitor the stdout of the running process, including // the stderr if it was redirected. - brillo::MessageLoop::TaskId stdout_task_id{ - brillo::MessageLoop::kTaskIdNull}; + std::unique_ptr stdout_controller; int stdout_fd{-1}; std::string stdout; }; diff --git a/libcurl_http_fetcher.cc b/libcurl_http_fetcher.cc index 247327ab..6b30eeba 100644 --- a/libcurl_http_fetcher.cc +++ b/libcurl_http_fetcher.cc @@ -80,16 +80,8 @@ int LibcurlHttpFetcher::LibcurlCloseSocketCallback(void* clientp, #endif // __ANDROID__ LibcurlHttpFetcher* fetcher = static_cast(clientp); // Stop watching the socket before closing it. - for (size_t t = 0; t < arraysize(fetcher->fd_task_maps_); ++t) { - const auto fd_task_pair = fetcher->fd_task_maps_[t].find(item); - if (fd_task_pair != fetcher->fd_task_maps_[t].end()) { - if (!MessageLoop::current()->CancelTask(fd_task_pair->second)) { - LOG(WARNING) << "Error canceling the watch task " - << fd_task_pair->second << " for " - << (t ? "writing" : "reading") << " the fd " << item; - } - fetcher->fd_task_maps_[t].erase(item); - } + for (size_t t = 0; t < arraysize(fetcher->fd_controller_maps_); ++t) { + fetcher->fd_controller_maps_[t].erase(item); } // Documentation for this callback says to return 0 on success or 1 on error. @@ -701,15 +693,15 @@ void LibcurlHttpFetcher::SetupMessageLoopSources() { // We should iterate through all file descriptors up to libcurl's fd_max or // the highest one we're tracking, whichever is larger. - for (size_t t = 0; t < arraysize(fd_task_maps_); ++t) { - if (!fd_task_maps_[t].empty()) - fd_max = max(fd_max, fd_task_maps_[t].rbegin()->first); + for (size_t t = 0; t < arraysize(fd_controller_maps_); ++t) { + if (!fd_controller_maps_[t].empty()) + fd_max = max(fd_max, fd_controller_maps_[t].rbegin()->first); } // For each fd, if we're not tracking it, track it. If we are tracking it, but // libcurl doesn't care about it anymore, stop tracking it. After this loop, - // there should be exactly as many tasks scheduled in fd_task_maps_[0|1] as - // there are read/write fds that we're tracking. + // there should be exactly as many tasks scheduled in + // fd_controller_maps_[0|1] as there are read/write fds that we're tracking. for (int fd = 0; fd <= fd_max; ++fd) { // Note that fd_exc is unused in the current version of libcurl so is_exc // should always be false. @@ -718,21 +710,14 @@ void LibcurlHttpFetcher::SetupMessageLoopSources() { is_exc || (FD_ISSET(fd, &fd_read) != 0), // track 0 -- read is_exc || (FD_ISSET(fd, &fd_write) != 0) // track 1 -- write }; - MessageLoop::WatchMode watch_modes[2] = { - MessageLoop::WatchMode::kWatchRead, - MessageLoop::WatchMode::kWatchWrite, - }; - for (size_t t = 0; t < arraysize(fd_task_maps_); ++t) { - auto fd_task_it = fd_task_maps_[t].find(fd); - bool tracked = fd_task_it != fd_task_maps_[t].end(); + for (size_t t = 0; t < arraysize(fd_controller_maps_); ++t) { + bool tracked = + fd_controller_maps_[t].find(fd) != fd_controller_maps_[t].end(); if (!must_track[t]) { // If we have an outstanding io_channel, remove it. - if (tracked) { - MessageLoop::current()->CancelTask(fd_task_it->second); - fd_task_maps_[t].erase(fd_task_it); - } + fd_controller_maps_[t].erase(fd); continue; } @@ -741,14 +726,21 @@ void LibcurlHttpFetcher::SetupMessageLoopSources() { continue; // Track a new fd. - fd_task_maps_[t][fd] = MessageLoop::current()->WatchFileDescriptor( - FROM_HERE, - fd, - watch_modes[t], - true, // persistent - base::Bind(&LibcurlHttpFetcher::CurlPerformOnce, - base::Unretained(this))); - + switch (t) { + case 0: // Read + fd_controller_maps_[t][fd] = + base::FileDescriptorWatcher::WatchReadable( + fd, + base::BindRepeating(&LibcurlHttpFetcher::CurlPerformOnce, + base::Unretained(this))); + break; + case 1: // Write + fd_controller_maps_[t][fd] = + base::FileDescriptorWatcher::WatchWritable( + fd, + base::BindRepeating(&LibcurlHttpFetcher::CurlPerformOnce, + base::Unretained(this))); + } static int io_counter = 0; io_counter++; if (io_counter % 50 == 0) { @@ -800,15 +792,8 @@ void LibcurlHttpFetcher::CleanUp() { MessageLoop::current()->CancelTask(timeout_id_); timeout_id_ = MessageLoop::kTaskIdNull; - for (size_t t = 0; t < arraysize(fd_task_maps_); ++t) { - for (const auto& fd_taks_pair : fd_task_maps_[t]) { - if (!MessageLoop::current()->CancelTask(fd_taks_pair.second)) { - LOG(WARNING) << "Error canceling the watch task " << fd_taks_pair.second - << " for " << (t ? "writing" : "reading") << " the fd " - << fd_taks_pair.first; - } - } - fd_task_maps_[t].clear(); + for (size_t t = 0; t < arraysize(fd_controller_maps_); ++t) { + fd_controller_maps_[t].clear(); } if (curl_http_headers_) { diff --git a/libcurl_http_fetcher.h b/libcurl_http_fetcher.h index cdd489d6..8f4258db 100644 --- a/libcurl_http_fetcher.h +++ b/libcurl_http_fetcher.h @@ -24,6 +24,7 @@ #include +#include #include #include #include @@ -215,7 +216,7 @@ class LibcurlHttpFetcher : public HttpFetcher { } // Cleans up the following if they are non-null: - // curl(m) handles, fd_task_maps_, timeout_id_. + // curl(m) handles, fd_controller_maps_, timeout_id_. void CleanUp(); // Force terminate the transfer. This will invoke the delegate's (if any) @@ -252,7 +253,8 @@ class LibcurlHttpFetcher : public HttpFetcher { // the message loop. libcurl may open/close descriptors and switch their // directions so maintain two separate lists so that watch conditions can be // set appropriately. - std::map fd_task_maps_[2]; + std::map> + fd_controller_maps_[2]; // The TaskId of the timer we're waiting on. kTaskIdNull if we are not waiting // on it. diff --git a/payload_consumer/postinstall_runner_action.cc b/payload_consumer/postinstall_runner_action.cc index 894ac7da..264161ca 100644 --- a/payload_consumer/postinstall_runner_action.cc +++ b/payload_consumer/postinstall_runner_action.cc @@ -217,13 +217,10 @@ void PostinstallRunnerAction::PerformPartitionPostinstall() { PLOG(ERROR) << "Unable to set non-blocking I/O mode on fd " << progress_fd_; } - progress_task_ = MessageLoop::current()->WatchFileDescriptor( - FROM_HERE, + progress_controller_ = base::FileDescriptorWatcher::WatchReadable( progress_fd_, - MessageLoop::WatchMode::kWatchRead, - true, - base::Bind(&PostinstallRunnerAction::OnProgressFdReady, - base::Unretained(this))); + base::BindRepeating(&PostinstallRunnerAction::OnProgressFdReady, + base::Unretained(this))); } void PostinstallRunnerAction::OnProgressFdReady() { @@ -248,8 +245,7 @@ void PostinstallRunnerAction::OnProgressFdReady() { if (!ok || eof) { // There was either an error or an EOF condition, so we are done watching // the file descriptor. - MessageLoop::current()->CancelTask(progress_task_); - progress_task_ = MessageLoop::kTaskIdNull; + progress_controller_.reset(); return; } } while (bytes_read); @@ -293,10 +289,7 @@ void PostinstallRunnerAction::Cleanup() { fs_mount_dir_.clear(); progress_fd_ = -1; - if (progress_task_ != MessageLoop::kTaskIdNull) { - MessageLoop::current()->CancelTask(progress_task_); - progress_task_ = MessageLoop::kTaskIdNull; - } + progress_controller_.reset(); progress_buffer_.clear(); } diff --git a/payload_consumer/postinstall_runner_action.h b/payload_consumer/postinstall_runner_action.h index b9b7069c..838b2355 100644 --- a/payload_consumer/postinstall_runner_action.h +++ b/payload_consumer/postinstall_runner_action.h @@ -17,9 +17,11 @@ #ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_POSTINSTALL_RUNNER_ACTION_H_ #define UPDATE_ENGINE_PAYLOAD_CONSUMER_POSTINSTALL_RUNNER_ACTION_H_ +#include #include #include +#include #include #include @@ -139,7 +141,7 @@ class PostinstallRunnerAction : public InstallPlanAction { // The parent progress file descriptor used to watch for progress reports from // the postinstall program and the task watching for them. int progress_fd_{-1}; - brillo::MessageLoop::TaskId progress_task_{brillo::MessageLoop::kTaskIdNull}; + std::unique_ptr progress_controller_; // A buffer of a partial read line from the progress file descriptor. std::string progress_buffer_; From 2f78c1c636cd5476192060f604fd32839aa07c3b Mon Sep 17 00:00:00 2001 From: Jae Hoon Kim Date: Thu, 25 Jul 2019 13:20:43 -0700 Subject: [PATCH 074/624] update_engine: Leverage install indication in StatusResult protobuf Update engine will provide this install indication for signal listeners (specifically dlcservice) and status requesters to indicate whether update engine is in the process of installing or updating. With this, dlcservice will can be altered to not probe update engine for status during a DLC uninstall. The update engine client is also updated when getting the status from update engine by using KeyValueStore printouts now. Old output: [0725/202915.815630:INFO:update_engine_client.cc(501)] Querying Update Engine status... LAST_CHECKED_TIME=1564102396 PROGRESS=1.000000 CURRENT_OP=UPDATE_STATUS_IDLE NEW_VERSION=12354.0.2019_07_19_1136 NEW_SIZE=792 New output: [0726/173804.558077:INFO:update_engine_client.cc(490)] Querying Update Engine status... CURRENT_OPERATION=UPDATE_STATUS_IDLE IS_INSTALL=false LAST_CHECKED_TIME=1564187860 NEW_SIZE=792 NEW_VERSION=12369.0.2019_07_26_0904 PROGRESS=1.0 BUG=chromium:871340 TEST=FEATURES="test" emerge-$BOARD update_engine update_engine-client system_api TEST=/usr/bin/update_engine_client --status Cq-Depend: chromium:1717661 Change-Id: Iaacea27e0fc0711200ec81fdebb7fef45f94af43 --- client_library/client_dbus.cc | 1 + .../include/update_engine/update_status.h | 17 ++++++----- dbus_service.cc | 1 + update_attempter.cc | 1 + update_engine_client.cc | 17 ++--------- update_status_utils.cc | 29 +++++++++++++++++++ update_status_utils.h | 3 ++ 7 files changed, 47 insertions(+), 22 deletions(-) diff --git a/client_library/client_dbus.cc b/client_library/client_dbus.cc index d0465029..e2defe7a 100644 --- a/client_library/client_dbus.cc +++ b/client_library/client_dbus.cc @@ -54,6 +54,7 @@ void ConvertToUpdateEngineStatus(const StatusResult& status, out_status->new_version = status.new_version(); out_status->new_size_bytes = status.new_size(); out_status->status = static_cast(status.current_operation()); + out_status->is_install = status.is_install(); } } // namespace diff --git a/client_library/include/update_engine/update_status.h b/client_library/include/update_engine/update_status.h index 059181cc..bc14e675 100644 --- a/client_library/include/update_engine/update_status.h +++ b/client_library/include/update_engine/update_status.h @@ -52,7 +52,7 @@ enum class UpdateStatus { // This value is exclusively used in Chrome. DO NOT define nor use it. // TODO(crbug.com/977320): Remove this value from chrome by refactoring the // Chrome code and evantually from here. This is not really an operation or - // state that the update engine stays on. This is the result of an internal + // state that the update_engine stays on. This is the result of an internal // failure and should be reflected differently. // ERROR = -1, }; @@ -71,19 +71,20 @@ enum UpdateAttemptFlags : int32_t { DECLARE_FLAGS_ENUM(UpdateAttemptFlags); struct UpdateEngineStatus { - // When the update_engine last checked for updates (time_t: seconds from unix - // epoch) + // Update engine last checked update (time_t: seconds from unix epoch). int64_t last_checked_time; - // the current status/operation of the update_engine + // Current status/operation of the update_engine. UpdateStatus status; - // the current product version (oem bundle id) + // Current product version (oem bundle id). std::string current_version; - // The current progress (0.0f-1.0f). + // Current progress (0.0f-1.0f). double progress; - // the size of the update (bytes) + // Size of the update in bytes. uint64_t new_size_bytes; - // the new product version + // New product version. std::string new_version; + // Indication of install for DLC(s). + bool is_install; }; } // namespace update_engine diff --git a/dbus_service.cc b/dbus_service.cc index 0cfe26b4..b3796030 100644 --- a/dbus_service.cc +++ b/dbus_service.cc @@ -47,6 +47,7 @@ void ConvertToStatusResult(const UpdateEngineStatus& ue_status, out_status->set_current_operation(static_cast(ue_status.status)); out_status->set_new_version(ue_status.new_version); out_status->set_new_size(ue_status.new_size_bytes); + out_status->set_is_install(ue_status.is_install); } } // namespace diff --git a/update_attempter.cc b/update_attempter.cc index dc7a4b5b..71463b5c 100644 --- a/update_attempter.cc +++ b/update_attempter.cc @@ -1382,6 +1382,7 @@ bool UpdateAttempter::GetStatus(UpdateEngineStatus* out_status) { out_status->progress = download_progress_; out_status->new_size_bytes = new_payload_size_; out_status->new_version = new_version_; + out_status->is_install = is_install_; return true; } diff --git a/update_engine_client.cc b/update_engine_client.cc index 1b680d12..954e856d 100644 --- a/update_engine_client.cc +++ b/update_engine_client.cc @@ -41,6 +41,7 @@ using chromeos_update_engine::EolStatus; using chromeos_update_engine::ErrorCode; +using chromeos_update_engine::UpdateEngineStatusToString; using chromeos_update_engine::UpdateStatusToString; using chromeos_update_engine::utils::ErrorCodeToString; using std::string; @@ -138,12 +139,7 @@ class WatchingStatusUpdateHandler : public ExitingStatusUpdateHandler { void WatchingStatusUpdateHandler::HandleStatusUpdate( const UpdateEngineStatus& status) { - LOG(INFO) << "Got status update:"; - LOG(INFO) << " last_checked_time: " << status.last_checked_time; - LOG(INFO) << " progress: " << status.progress; - LOG(INFO) << " current_operation: " << UpdateStatusToString(status.status); - LOG(INFO) << " new_version: " << status.new_version; - LOG(INFO) << " new_size: " << status.new_size_bytes; + LOG(INFO) << "Got status update: " << UpdateEngineStatusToString(status); } bool UpdateEngineClient::ShowStatus() { @@ -161,14 +157,7 @@ bool UpdateEngineClient::ShowStatus() { base::TimeDelta::FromSeconds(kShowStatusRetryIntervalInSeconds)); } - printf("LAST_CHECKED_TIME=%" PRIi64 - "\nPROGRESS=%f\nCURRENT_OP=%s\n" - "NEW_VERSION=%s\nNEW_SIZE=%" PRIi64 "\n", - status.last_checked_time, - status.progress, - UpdateStatusToString(status.status), - status.new_version.c_str(), - status.new_size_bytes); + printf("%s", UpdateEngineStatusToString(status).c_str()); return true; } diff --git a/update_status_utils.cc b/update_status_utils.cc index f3917d1d..b56d94a1 100644 --- a/update_status_utils.cc +++ b/update_status_utils.cc @@ -16,8 +16,13 @@ #include "update_engine/update_status_utils.h" #include +#include +#include #include +using brillo::KeyValueStore; +using std::string; +using update_engine::UpdateEngineStatus; using update_engine::UpdateStatus; namespace chromeos_update_engine { @@ -52,4 +57,28 @@ const char* UpdateStatusToString(const UpdateStatus& status) { return nullptr; } +string UpdateEngineStatusToString(const UpdateEngineStatus& status) { + KeyValueStore key_value_store; + +#if BASE_VER < 576279 + key_value_store.SetString("LAST_CHECKED_TIME", + base::Int64ToString(status.last_checked_time)); + key_value_store.SetString("PROGRESS", base::DoubleToString(status.progress)); + key_value_store.SetString("NEW_SIZE", + base::Uint64ToString(status.new_size_bytes)); +#else + key_value_store.SetString("LAST_CHECKED_TIME", + base::NumberToString(status.last_checked_time)); + key_value_store.SetString("PROGRESS", base::NumberToString(status.progress)); + key_value_store.SetString("NEW_SIZE", + base::NumberToString(status.new_size_bytes)); +#endif + key_value_store.SetString("CURRENT_OPERATION", + UpdateStatusToString(status.status)); + key_value_store.SetString("NEW_VERSION", status.new_version); + key_value_store.SetBoolean("IS_INSTALL", status.is_install); + + return key_value_store.SaveToString(); +} + } // namespace chromeos_update_engine diff --git a/update_status_utils.h b/update_status_utils.h index e3b8b43a..1e3fdde5 100644 --- a/update_status_utils.h +++ b/update_status_utils.h @@ -25,6 +25,9 @@ namespace chromeos_update_engine { const char* UpdateStatusToString(const update_engine::UpdateStatus& status); +std::string UpdateEngineStatusToString( + const update_engine::UpdateEngineStatus& status); + } // namespace chromeos_update_engine #endif // UPDATE_ENGINE_UPDATE_STATUS_UTILS_H_ From f9464b4faa1ae76e2c1f8cf2e217e375e93465ad Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Mon, 22 Jul 2019 18:18:24 -0700 Subject: [PATCH 075/624] DynamicPartitionControlAndroid::LoadMetadataBuilder: always NewForUpdate MetadataBuilder::NewForUpdate handles both retrofit DAP and launch DAP devices, so there is no need to differetiate using sysprops here. In tests, LoadMetadataBuilder is just mocked, because there's no interesting logic to be tested. Test: unit tests Test: manually apply OTA on launch device Change-Id: Id24b105e523435f4f273cb57f995e6b778703ef1 --- dynamic_partition_control_android.cc | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc index bfdd3752..b5b22df1 100644 --- a/dynamic_partition_control_android.cc +++ b/dynamic_partition_control_android.cc @@ -175,16 +175,8 @@ DynamicPartitionControlAndroid::LoadMetadataBuilder( const std::string& super_device, uint32_t source_slot, uint32_t target_slot) { - std::unique_ptr builder; - - if (target_slot != BootControlInterface::kInvalidSlot && - IsDynamicPartitionsRetrofit()) { - builder = MetadataBuilder::NewForUpdate( - PartitionOpener(), super_device, source_slot, target_slot); - } else { - builder = - MetadataBuilder::New(PartitionOpener(), super_device, source_slot); - } + auto builder = MetadataBuilder::NewForUpdate( + PartitionOpener(), super_device, source_slot, target_slot); if (builder == nullptr) { LOG(WARNING) << "No metadata slot " From 012508efa3bb00b5b458caa918f2960653f41df2 Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Mon, 22 Jul 2019 18:30:40 -0700 Subject: [PATCH 076/624] [REFACTOR] DynamicPartitionControl: minimize API - Move UpdatePartitionMetadata in boot_control_android.cc to DynamicPartitionControlAndroid::PreparePartitionsForUpdate to reflect code ownership better. - Minimize the API by removing unused functions now that we have PreparePartitionsForUpdate - Fix tests: replace MockDynamicPartitionControl to MockDynamicPartitionControlAndroid because it tests DynamicPartitionControlAndroid. Test: unit test Test: manually apply full OTA Change-Id: I2959270bd89c4f8ddaa45cf45ba93acdec850f67 --- Android.bp | 1 + boot_control_android.cc | 132 +------------------------- boot_control_android_unittest.cc | 6 +- dynamic_partition_control_android.cc | 115 ++++++++++++++++++++++ dynamic_partition_control_android.h | 48 ++++++++-- dynamic_partition_control_interface.h | 26 ++--- dynamic_partition_utils.cc | 39 ++++++++ dynamic_partition_utils.h | 33 +++++++ mock_dynamic_partition_control.h | 5 +- 9 files changed, 251 insertions(+), 154 deletions(-) create mode 100644 dynamic_partition_utils.cc create mode 100644 dynamic_partition_utils.h diff --git a/Android.bp b/Android.bp index de4d3b3c..fab5d275 100644 --- a/Android.bp +++ b/Android.bp @@ -214,6 +214,7 @@ cc_library_static { srcs: [ "boot_control_android.cc", "dynamic_partition_control_android.cc", + "dynamic_partition_utils.cc", ], } diff --git a/boot_control_android.cc b/boot_control_android.cc index 8ab73be1..b820deda 100644 --- a/boot_control_android.cc +++ b/boot_control_android.cc @@ -22,7 +22,6 @@ #include #include -#include #include #include #include @@ -34,7 +33,6 @@ using std::string; using android::dm::DmDeviceState; -using android::fs_mgr::Partition; using android::hardware::hidl_string; using android::hardware::Return; using android::hardware::boot::V1_0::BoolResult; @@ -112,8 +110,8 @@ bool BootControlAndroid::IsSuperBlockDevice( const string& partition_name_suffix) const { string source_device = device_dir.Append(fs_mgr_get_super_partition_name(slot)).value(); - auto source_metadata = dynamic_control_->LoadMetadataBuilder( - source_device, slot, BootControlInterface::kInvalidSlot); + auto source_metadata = + dynamic_control_->LoadMetadataBuilder(source_device, slot); return source_metadata->HasBlockDevice(partition_name_suffix); } @@ -126,8 +124,7 @@ BootControlAndroid::GetDynamicPartitionDevice( string super_device = device_dir.Append(fs_mgr_get_super_partition_name(slot)).value(); - auto builder = dynamic_control_->LoadMetadataBuilder( - super_device, slot, BootControlInterface::kInvalidSlot); + auto builder = dynamic_control_->LoadMetadataBuilder(super_device, slot); if (builder == nullptr) { LOG(ERROR) << "No metadata in slot " @@ -280,110 +277,6 @@ bool BootControlAndroid::MarkBootSuccessfulAsync( brillo::MessageLoop::kTaskIdNull; } -namespace { - -bool UpdatePartitionMetadata(DynamicPartitionControlInterface* dynamic_control, - Slot source_slot, - Slot target_slot, - const string& target_suffix, - const PartitionMetadata& partition_metadata) { - string device_dir_str; - if (!dynamic_control->GetDeviceDir(&device_dir_str)) { - return false; - } - base::FilePath device_dir(device_dir_str); - auto source_device = - device_dir.Append(fs_mgr_get_super_partition_name(source_slot)).value(); - - auto builder = dynamic_control->LoadMetadataBuilder( - source_device, source_slot, target_slot); - if (builder == nullptr) { - // TODO(elsk): allow reconstructing metadata from partition_metadata - // in recovery sideload. - LOG(ERROR) << "No metadata at " - << BootControlInterface::SlotName(source_slot); - return false; - } - - std::vector groups = builder->ListGroups(); - for (const auto& group_name : groups) { - if (base::EndsWith( - group_name, target_suffix, base::CompareCase::SENSITIVE)) { - LOG(INFO) << "Removing group " << group_name; - builder->RemoveGroupAndPartitions(group_name); - } - } - - uint64_t total_size = 0; - for (const auto& group : partition_metadata.groups) { - total_size += group.size; - } - - string expr; - uint64_t allocatable_space = builder->AllocatableSpace(); - if (!dynamic_control->IsDynamicPartitionsRetrofit()) { - allocatable_space /= 2; - expr = "half of "; - } - if (total_size > allocatable_space) { - LOG(ERROR) << "The maximum size of all groups with suffix " << target_suffix - << " (" << total_size << ") has exceeded " << expr - << " allocatable space for dynamic partitions " - << allocatable_space << "."; - return false; - } - - for (const auto& group : partition_metadata.groups) { - auto group_name_suffix = group.name + target_suffix; - if (!builder->AddGroup(group_name_suffix, group.size)) { - LOG(ERROR) << "Cannot add group " << group_name_suffix << " with size " - << group.size; - return false; - } - LOG(INFO) << "Added group " << group_name_suffix << " with size " - << group.size; - - for (const auto& partition : group.partitions) { - auto partition_name_suffix = partition.name + target_suffix; - Partition* p = builder->AddPartition( - partition_name_suffix, group_name_suffix, LP_PARTITION_ATTR_READONLY); - if (!p) { - LOG(ERROR) << "Cannot add partition " << partition_name_suffix - << " to group " << group_name_suffix; - return false; - } - if (!builder->ResizePartition(p, partition.size)) { - LOG(ERROR) << "Cannot resize partition " << partition_name_suffix - << " to size " << partition.size << ". Not enough space?"; - return false; - } - LOG(INFO) << "Added partition " << partition_name_suffix << " to group " - << group_name_suffix << " with size " << partition.size; - } - } - - auto target_device = - device_dir.Append(fs_mgr_get_super_partition_name(target_slot)).value(); - return dynamic_control->StoreMetadata( - target_device, builder.get(), target_slot); -} - -bool UnmapTargetPartitions(DynamicPartitionControlInterface* dynamic_control, - const string& target_suffix, - const PartitionMetadata& partition_metadata) { - for (const auto& group : partition_metadata.groups) { - for (const auto& partition : group.partitions) { - if (!dynamic_control->UnmapPartitionOnDeviceMapper(partition.name + - target_suffix)) { - return false; - } - } - } - return true; -} - -} // namespace - bool BootControlAndroid::InitPartitionMetadata( Slot target_slot, const PartitionMetadata& partition_metadata, @@ -417,23 +310,8 @@ bool BootControlAndroid::InitPartitionMetadata( return true; } - string target_suffix; - if (!GetSuffix(target_slot, &target_suffix)) { - return false; - } - - // Unmap all the target dynamic partitions because they would become - // inconsistent with the new metadata. - if (!UnmapTargetPartitions( - dynamic_control_.get(), target_suffix, partition_metadata)) { - return false; - } - - return UpdatePartitionMetadata(dynamic_control_.get(), - source_slot, - target_slot, - target_suffix, - partition_metadata); + return dynamic_control_->PreparePartitionsForUpdate( + source_slot, target_slot, partition_metadata); } } // namespace chromeos_update_engine diff --git a/boot_control_android_unittest.cc b/boot_control_android_unittest.cc index 94e195f8..dfcb6fb6 100644 --- a/boot_control_android_unittest.cc +++ b/boot_control_android_unittest.cc @@ -262,7 +262,7 @@ class BootControlAndroidTest : public ::testing::Test { // Fake init bootctl_ bootctl_.module_ = new NiceMock(); bootctl_.dynamic_control_ = - std::make_unique>(); + std::make_unique>(); ON_CALL(module(), getNumberSlots()).WillByDefault(Invoke([] { return kMaxNumSlots; @@ -297,8 +297,8 @@ class BootControlAndroidTest : public ::testing::Test { } // Return the mocked DynamicPartitionControlInterface. - NiceMock& dynamicControl() { - return static_cast&>( + NiceMock& dynamicControl() { + return static_cast&>( *bootctl_.dynamic_control_); } diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc index b5b22df1..5a172b0b 100644 --- a/dynamic_partition_control_android.cc +++ b/dynamic_partition_control_android.cc @@ -19,16 +19,20 @@ #include #include #include +#include #include #include #include #include +#include #include +#include #include #include "update_engine/common/boot_control_interface.h" #include "update_engine/common/utils.h" +#include "update_engine/dynamic_partition_utils.h" using android::base::GetBoolProperty; using android::base::Join; @@ -37,10 +41,14 @@ using android::dm::DmDeviceState; using android::fs_mgr::CreateLogicalPartition; using android::fs_mgr::DestroyLogicalPartition; using android::fs_mgr::MetadataBuilder; +using android::fs_mgr::Partition; using android::fs_mgr::PartitionOpener; +using android::fs_mgr::SlotSuffixForSlotNumber; namespace chromeos_update_engine { +using PartitionMetadata = BootControlInterface::PartitionMetadata; + constexpr char kUseDynamicPartitions[] = "ro.boot.dynamic_partitions"; constexpr char kRetrfoitDynamicPartitions[] = "ro.boot.dynamic_partitions_retrofit"; @@ -170,6 +178,13 @@ bool DynamicPartitionControlAndroid::GetDmDevicePathByName( return DeviceMapper::Instance().GetDmDevicePathByName(name, path); } +std::unique_ptr +DynamicPartitionControlAndroid::LoadMetadataBuilder( + const std::string& super_device, uint32_t source_slot) { + return LoadMetadataBuilder( + super_device, source_slot, BootControlInterface::kInvalidSlot); +} + std::unique_ptr DynamicPartitionControlAndroid::LoadMetadataBuilder( const std::string& super_device, @@ -257,4 +272,104 @@ bool DynamicPartitionControlAndroid::GetDeviceDir(std::string* out) { *out = base::FilePath(misc_device).DirName().value(); return true; } + +bool DynamicPartitionControlAndroid::PreparePartitionsForUpdate( + uint32_t source_slot, + uint32_t target_slot, + const PartitionMetadata& partition_metadata) { + const std::string target_suffix = SlotSuffixForSlotNumber(target_slot); + + // Unmap all the target dynamic partitions because they would become + // inconsistent with the new metadata. + for (const auto& group : partition_metadata.groups) { + for (const auto& partition : group.partitions) { + if (!UnmapPartitionOnDeviceMapper(partition.name + target_suffix)) { + return false; + } + } + } + + std::string device_dir_str; + if (!GetDeviceDir(&device_dir_str)) { + return false; + } + base::FilePath device_dir(device_dir_str); + auto source_device = + device_dir.Append(fs_mgr_get_super_partition_name(source_slot)).value(); + + auto builder = LoadMetadataBuilder(source_device, source_slot, target_slot); + if (builder == nullptr) { + LOG(ERROR) << "No metadata at " + << BootControlInterface::SlotName(source_slot); + return false; + } + + if (!UpdatePartitionMetadata( + builder.get(), target_slot, partition_metadata)) { + return false; + } + + auto target_device = + device_dir.Append(fs_mgr_get_super_partition_name(target_slot)).value(); + return StoreMetadata(target_device, builder.get(), target_slot); +} + +bool DynamicPartitionControlAndroid::UpdatePartitionMetadata( + MetadataBuilder* builder, + uint32_t target_slot, + const PartitionMetadata& partition_metadata) { + const std::string target_suffix = SlotSuffixForSlotNumber(target_slot); + DeleteGroupsWithSuffix(builder, target_suffix); + + uint64_t total_size = 0; + for (const auto& group : partition_metadata.groups) { + total_size += group.size; + } + + std::string expr; + uint64_t allocatable_space = builder->AllocatableSpace(); + if (!IsDynamicPartitionsRetrofit()) { + allocatable_space /= 2; + expr = "half of "; + } + if (total_size > allocatable_space) { + LOG(ERROR) << "The maximum size of all groups with suffix " << target_suffix + << " (" << total_size << ") has exceeded " << expr + << "allocatable space for dynamic partitions " + << allocatable_space << "."; + return false; + } + + for (const auto& group : partition_metadata.groups) { + auto group_name_suffix = group.name + target_suffix; + if (!builder->AddGroup(group_name_suffix, group.size)) { + LOG(ERROR) << "Cannot add group " << group_name_suffix << " with size " + << group.size; + return false; + } + LOG(INFO) << "Added group " << group_name_suffix << " with size " + << group.size; + + for (const auto& partition : group.partitions) { + auto partition_name_suffix = partition.name + target_suffix; + Partition* p = builder->AddPartition( + partition_name_suffix, group_name_suffix, LP_PARTITION_ATTR_READONLY); + if (!p) { + LOG(ERROR) << "Cannot add partition " << partition_name_suffix + << " to group " << group_name_suffix; + return false; + } + if (!builder->ResizePartition(p, partition.size)) { + LOG(ERROR) << "Cannot resize partition " << partition_name_suffix + << " to size " << partition.size << ". Not enough space?"; + return false; + } + LOG(INFO) << "Added partition " << partition_name_suffix << " to group " + << group_name_suffix << " with size " << partition.size; + } + } + + return true; +} + } // namespace chromeos_update_engine diff --git a/dynamic_partition_control_android.h b/dynamic_partition_control_android.h index 334f9bd7..e0859ed9 100644 --- a/dynamic_partition_control_android.h +++ b/dynamic_partition_control_android.h @@ -36,21 +36,48 @@ class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { uint32_t slot, bool force_writable, std::string* path) override; - bool UnmapPartitionOnDeviceMapper( - const std::string& target_partition_name) override; void Cleanup() override; bool DeviceExists(const std::string& path) override; android::dm::DmDeviceState GetState(const std::string& name) override; bool GetDmDevicePathByName(const std::string& name, std::string* path) override; std::unique_ptr LoadMetadataBuilder( + const std::string& super_device, uint32_t source_slot) override; + + bool PreparePartitionsForUpdate(uint32_t source_slot, + uint32_t target_slot, + const BootControlInterface::PartitionMetadata& + partition_metadata) override; + bool GetDeviceDir(std::string* path) override; + + protected: + // These functions are exposed for testing. + + // Unmap logical partition on device mapper. This is the reverse operation + // of MapPartitionOnDeviceMapper. + // Returns true if unmapped successfully. + virtual bool UnmapPartitionOnDeviceMapper( + const std::string& target_partition_name); + + // Retrieve metadata from |super_device| at slot |source_slot|. + // + // If |target_slot| != kInvalidSlot, before returning the metadata, this + // function modifies the metadata so that during updates, the metadata can be + // written to |target_slot|. In particular, on retrofit devices, the returned + // metadata automatically includes block devices at |target_slot|. + // + // If |target_slot| == kInvalidSlot, this function returns metadata at + // |source_slot| without modifying it. This is the same as + // LoadMetadataBuilder(const std::string&, uint32_t). + virtual std::unique_ptr LoadMetadataBuilder( const std::string& super_device, uint32_t source_slot, - uint32_t target_slot) override; - bool StoreMetadata(const std::string& super_device, - android::fs_mgr::MetadataBuilder* builder, - uint32_t target_slot) override; - bool GetDeviceDir(std::string* path) override; + uint32_t target_slot); + + // Write metadata |builder| to |super_device| at slot |target_slot|. + virtual bool StoreMetadata(const std::string& super_device, + android::fs_mgr::MetadataBuilder* builder, + uint32_t target_slot); private: std::set mapped_devices_; @@ -62,6 +89,13 @@ class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { bool force_writable, std::string* path); + // Update |builder| according to |partition_metadata|, assuming the device + // does not have Virtual A/B. + bool UpdatePartitionMetadata( + android::fs_mgr::MetadataBuilder* builder, + uint32_t target_slot, + const BootControlInterface::PartitionMetadata& partition_metadata); + DISALLOW_COPY_AND_ASSIGN(DynamicPartitionControlAndroid); }; diff --git a/dynamic_partition_control_interface.h b/dynamic_partition_control_interface.h index d4590f7d..12e62e08 100644 --- a/dynamic_partition_control_interface.h +++ b/dynamic_partition_control_interface.h @@ -26,6 +26,8 @@ #include #include +#include "update_engine/common/boot_control_interface.h" + namespace chromeos_update_engine { class DynamicPartitionControlInterface { @@ -52,13 +54,6 @@ class DynamicPartitionControlInterface { bool force_writable, std::string* path) = 0; - // Unmap logical partition on device mapper. This is the reverse operation - // of MapPartitionOnDeviceMapper. - // If |wait| is set, wait until the device is unmapped. - // Returns true if unmapped successfully. - virtual bool UnmapPartitionOnDeviceMapper( - const std::string& target_partition_name) = 0; - // Do necessary cleanups before destroying the object. virtual void Cleanup() = 0; @@ -77,17 +72,16 @@ class DynamicPartitionControlInterface { std::string* path) = 0; // Retrieve metadata from |super_device| at slot |source_slot|. - // On retrofit devices, if |target_slot| != kInvalidSlot, the returned - // metadata automatically includes block devices at |target_slot|. virtual std::unique_ptr LoadMetadataBuilder( - const std::string& super_device, - uint32_t source_slot, - uint32_t target_slot) = 0; + const std::string& super_device, uint32_t source_slot) = 0; - // Write metadata |builder| to |super_device| at slot |target_slot|. - virtual bool StoreMetadata(const std::string& super_device, - android::fs_mgr::MetadataBuilder* builder, - uint32_t target_slot) = 0; + // Prepare all partitions for an update specified in |partition_metadata|. + // This is needed before calling MapPartitionOnDeviceMapper(), otherwise the + // device would be mapped in an inconsistent way. + virtual bool PreparePartitionsForUpdate( + uint32_t source_slot, + uint32_t target_slot, + const BootControlInterface::PartitionMetadata& partition_metadata) = 0; // Return a possible location for devices listed by name. virtual bool GetDeviceDir(std::string* path) = 0; diff --git a/dynamic_partition_utils.cc b/dynamic_partition_utils.cc new file mode 100644 index 00000000..f9bd886b --- /dev/null +++ b/dynamic_partition_utils.cc @@ -0,0 +1,39 @@ +// +// Copyright (C) 2019 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include "update_engine/dynamic_partition_utils.h" + +#include + +#include +#include + +using android::fs_mgr::MetadataBuilder; + +namespace chromeos_update_engine { + +void DeleteGroupsWithSuffix(MetadataBuilder* builder, + const std::string& suffix) { + std::vector groups = builder->ListGroups(); + for (const auto& group_name : groups) { + if (base::EndsWith(group_name, suffix, base::CompareCase::SENSITIVE)) { + LOG(INFO) << "Removing group " << group_name; + builder->RemoveGroupAndPartitions(group_name); + } + } +} + +} // namespace chromeos_update_engine diff --git a/dynamic_partition_utils.h b/dynamic_partition_utils.h new file mode 100644 index 00000000..09fce00c --- /dev/null +++ b/dynamic_partition_utils.h @@ -0,0 +1,33 @@ +// +// Copyright (C) 2019 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef UPDATE_ENGINE_DYNAMIC_PARTITION_UTILS_H_ +#define UPDATE_ENGINE_DYNAMIC_PARTITION_UTILS_H_ + +#include + +#include + +namespace chromeos_update_engine { + +// Delete all groups (and their partitions) in |builder| that have names +// ending with |suffix|. +void DeleteGroupsWithSuffix(android::fs_mgr::MetadataBuilder* builder, + const std::string& suffix); + +} // namespace chromeos_update_engine + +#endif // UPDATE_ENGINE_DYNAMIC_PARTITION_UTILS_H_ diff --git a/mock_dynamic_partition_control.h b/mock_dynamic_partition_control.h index cdfeeccc..310e528d 100644 --- a/mock_dynamic_partition_control.h +++ b/mock_dynamic_partition_control.h @@ -21,11 +21,14 @@ #include +#include "update_engine/common/boot_control_interface.h" +#include "update_engine/dynamic_partition_control_android.h" #include "update_engine/dynamic_partition_control_interface.h" namespace chromeos_update_engine { -class MockDynamicPartitionControl : public DynamicPartitionControlInterface { +class MockDynamicPartitionControlAndroid + : public DynamicPartitionControlAndroid { public: MOCK_METHOD5(MapPartitionOnDeviceMapper, bool(const std::string&, From 186bb68d4eb9a5767245b764522b26b466aa0cb6 Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Tue, 23 Jul 2019 14:04:39 -0700 Subject: [PATCH 077/624] [REFACTOR] DynamicPartitionControl: combine DAP feature flag APIs Combine IsDynamicPartitionsEnabled/Retrofit into one API that returns a FeatureFlag struct instead. This allows us to add new feature flags more easily. Test: unittest Change-Id: I32f0f0c4d5c636d2eac3bf99d6f92fcc6b71c7a9 --- boot_control_android.cc | 4 ++-- boot_control_android_unittest.cc | 6 ++---- dynamic_partition_control_android.cc | 26 ++++++++++++++++++++------ dynamic_partition_control_android.h | 3 +-- dynamic_partition_control_interface.h | 20 +++++++++++++++----- mock_dynamic_partition_control.h | 3 +-- 6 files changed, 41 insertions(+), 21 deletions(-) diff --git a/boot_control_android.cc b/boot_control_android.cc index b820deda..44fc0faa 100644 --- a/boot_control_android.cc +++ b/boot_control_android.cc @@ -193,7 +193,7 @@ bool BootControlAndroid::GetPartitionDevice(const string& partition_name, // current payload doesn't encode them as dynamic partitions. This may happen // when applying a retrofit update on top of a dynamic-partitions-enabled // build. - if (dynamic_control_->IsDynamicPartitionsEnabled() && + if (dynamic_control_->GetDynamicPartitionsFeatureFlag().IsEnabled() && (slot == GetCurrentSlot() || is_target_dynamic_)) { switch (GetDynamicPartitionDevice( device_dir, partition_name_suffix, slot, device)) { @@ -288,7 +288,7 @@ bool BootControlAndroid::InitPartitionMetadata( "resources.\n" << "run adb enable-verity to deactivate if required and try again."; } - if (!dynamic_control_->IsDynamicPartitionsEnabled()) { + if (!dynamic_control_->GetDynamicPartitionsFeatureFlag().IsEnabled()) { return true; } diff --git a/boot_control_android_unittest.cc b/boot_control_android_unittest.cc index dfcb6fb6..3e0320d2 100644 --- a/boot_control_android_unittest.cc +++ b/boot_control_android_unittest.cc @@ -274,10 +274,8 @@ class BootControlAndroidTest : public ::testing::Test { return Void(); })); - ON_CALL(dynamicControl(), IsDynamicPartitionsEnabled()) - .WillByDefault(Return(true)); - ON_CALL(dynamicControl(), IsDynamicPartitionsRetrofit()) - .WillByDefault(Return(false)); + ON_CALL(dynamicControl(), GetDynamicPartitionsFeatureFlag()) + .WillByDefault(Return(FeatureFlag(FeatureFlag::Value::LAUNCH))); ON_CALL(dynamicControl(), DeviceExists(_)).WillByDefault(Return(true)); ON_CALL(dynamicControl(), GetDeviceDir(_)) .WillByDefault(Invoke([](auto path) { diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc index 5a172b0b..329ddd34 100644 --- a/dynamic_partition_control_android.cc +++ b/dynamic_partition_control_android.cc @@ -58,12 +58,26 @@ DynamicPartitionControlAndroid::~DynamicPartitionControlAndroid() { CleanupInternal(false /* wait */); } -bool DynamicPartitionControlAndroid::IsDynamicPartitionsEnabled() { - return GetBoolProperty(kUseDynamicPartitions, false); +static FeatureFlag GetFeatureFlag(const char* enable_prop, + const char* retrofit_prop) { + bool retrofit = GetBoolProperty(retrofit_prop, false); + bool enabled = GetBoolProperty(enable_prop, false); + if (retrofit && !enabled) { + LOG(ERROR) << retrofit_prop << " is true but " << enable_prop + << " is not. These sysprops are inconsistent. Assume that " + << enable_prop << " is true from now on."; + } + if (retrofit) { + return FeatureFlag(FeatureFlag::Value::RETROFIT); + } + if (enabled) { + return FeatureFlag(FeatureFlag::Value::LAUNCH); + } + return FeatureFlag(FeatureFlag::Value::NONE); } -bool DynamicPartitionControlAndroid::IsDynamicPartitionsRetrofit() { - return GetBoolProperty(kRetrfoitDynamicPartitions, false); +FeatureFlag DynamicPartitionControlAndroid::GetDynamicPartitionsFeatureFlag() { + return GetFeatureFlag(kUseDynamicPartitions, kRetrfoitDynamicPartitions); } bool DynamicPartitionControlAndroid::MapPartitionInternal( @@ -217,7 +231,7 @@ bool DynamicPartitionControlAndroid::StoreMetadata( return false; } - if (IsDynamicPartitionsRetrofit()) { + if (GetDynamicPartitionsFeatureFlag().IsRetrofit()) { if (!FlashPartitionTable(super_device, *metadata)) { LOG(ERROR) << "Cannot write metadata to " << super_device; return false; @@ -328,7 +342,7 @@ bool DynamicPartitionControlAndroid::UpdatePartitionMetadata( std::string expr; uint64_t allocatable_space = builder->AllocatableSpace(); - if (!IsDynamicPartitionsRetrofit()) { + if (!GetDynamicPartitionsFeatureFlag().IsRetrofit()) { allocatable_space /= 2; expr = "half of "; } diff --git a/dynamic_partition_control_android.h b/dynamic_partition_control_android.h index e0859ed9..73d7539a 100644 --- a/dynamic_partition_control_android.h +++ b/dynamic_partition_control_android.h @@ -29,8 +29,7 @@ class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { public: DynamicPartitionControlAndroid() = default; ~DynamicPartitionControlAndroid(); - bool IsDynamicPartitionsEnabled() override; - bool IsDynamicPartitionsRetrofit() override; + FeatureFlag GetDynamicPartitionsFeatureFlag() override; bool MapPartitionOnDeviceMapper(const std::string& super_device, const std::string& target_partition_name, uint32_t slot, diff --git a/dynamic_partition_control_interface.h b/dynamic_partition_control_interface.h index 12e62e08..32fbbe4a 100644 --- a/dynamic_partition_control_interface.h +++ b/dynamic_partition_control_interface.h @@ -30,15 +30,25 @@ namespace chromeos_update_engine { +struct FeatureFlag { + enum class Value { NONE = 0, RETROFIT, LAUNCH }; + constexpr explicit FeatureFlag(Value value) : value_(value) {} + constexpr bool IsEnabled() const { return value_ != Value::NONE; } + constexpr bool IsRetrofit() const { return value_ == Value::RETROFIT; } + + private: + Value value_; +}; + class DynamicPartitionControlInterface { public: virtual ~DynamicPartitionControlInterface() = default; - // Return true iff dynamic partitions is enabled on this device. - virtual bool IsDynamicPartitionsEnabled() = 0; - - // Return true iff dynamic partitions is retrofitted on this device. - virtual bool IsDynamicPartitionsRetrofit() = 0; + // Return the feature flags of dynamic partitions on this device. + // Return RETROFIT iff dynamic partitions is retrofitted on this device, + // LAUNCH iff this device is launched with dynamic partitions, + // NONE iff dynamic partitions is disabled on this device. + virtual FeatureFlag GetDynamicPartitionsFeatureFlag() = 0; // Map logical partition on device-mapper. // |super_device| is the device path of the physical partition ("super"). diff --git a/mock_dynamic_partition_control.h b/mock_dynamic_partition_control.h index 310e528d..a0701e7a 100644 --- a/mock_dynamic_partition_control.h +++ b/mock_dynamic_partition_control.h @@ -49,8 +49,7 @@ class MockDynamicPartitionControlAndroid android::fs_mgr::MetadataBuilder*, uint32_t)); MOCK_METHOD1(GetDeviceDir, bool(std::string*)); - MOCK_METHOD0(IsDynamicPartitionsEnabled, bool()); - MOCK_METHOD0(IsDynamicPartitionsRetrofit, bool()); + MOCK_METHOD0(GetDynamicPartitionsFeatureFlag, FeatureFlag()); }; } // namespace chromeos_update_engine From c049f93549c6dedef00ac73c5b19f2b875e796e1 Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Tue, 23 Jul 2019 15:06:05 -0700 Subject: [PATCH 078/624] [REFACTOR] Move and refactor tests. Some tests from boot_control_android_unittest only test logic of UpdatePartitionMetadata. Move them to dynamic_partition_control_android_unittest. After moving the test, boot_control_android_unittest can depend on the MockDynamicPartitionControl (which is a mocked DynamicPartitionControlInterface) directly again. It no longer depends on internal implementation of DynamicPartitionControlAndroid. Test: unittest Fixes: 138333673 Change-Id: Idc5f11be98754b8f6d38fcb8604af497e5d86376 --- Android.bp | 1 + boot_control_android_unittest.cc | 638 +----------------- dynamic_partition_control_android.h | 2 + dynamic_partition_control_android_unittest.cc | 477 +++++++++++++ dynamic_partition_test_utils.h | 258 +++++++ mock_dynamic_partition_control.h | 24 + 6 files changed, 771 insertions(+), 629 deletions(-) create mode 100644 dynamic_partition_control_android_unittest.cc create mode 100644 dynamic_partition_test_utils.h diff --git a/Android.bp b/Android.bp index fab5d275..57b2febd 100644 --- a/Android.bp +++ b/Android.bp @@ -678,6 +678,7 @@ cc_test { "common/terminator_unittest.cc", "common/test_utils.cc", "common/utils_unittest.cc", + "dynamic_partition_control_android_unittest.cc", "payload_consumer/bzip_extent_writer_unittest.cc", "payload_consumer/cached_file_descriptor_unittest.cc", "payload_consumer/delta_performer_integration_test.cc", diff --git a/boot_control_android_unittest.cc b/boot_control_android_unittest.cc index 3e0320d2..1a875474 100644 --- a/boot_control_android_unittest.cc +++ b/boot_control_android_unittest.cc @@ -26,243 +26,29 @@ #include #include +#include "update_engine/dynamic_partition_test_utils.h" #include "update_engine/mock_boot_control_hal.h" #include "update_engine/mock_dynamic_partition_control.h" using android::dm::DmDeviceState; -using android::fs_mgr::MetadataBuilder; using android::hardware::Void; using std::string; using testing::_; using testing::AnyNumber; -using testing::Contains; -using testing::Eq; using testing::Invoke; -using testing::Key; -using testing::MakeMatcher; -using testing::Matcher; -using testing::MatcherInterface; -using testing::MatchResultListener; using testing::NiceMock; using testing::Not; using testing::Return; namespace chromeos_update_engine { -constexpr const uint32_t kMaxNumSlots = 2; -constexpr const char* kSlotSuffixes[kMaxNumSlots] = {"_a", "_b"}; -constexpr const char* kFakeDevicePath = "/fake/dev/path/"; -constexpr const char* kFakeDmDevicePath = "/fake/dm/dev/path/"; -constexpr const uint32_t kFakeMetadataSize = 65536; -constexpr const char* kDefaultGroup = "foo"; - -// A map describing the size of each partition. -// "{name, size}" -using PartitionSizes = std::map; - -// "{name_a, size}" -using PartitionSuffixSizes = std::map; - -using PartitionMetadata = BootControlInterface::PartitionMetadata; - -// C++ standards do not allow uint64_t (aka unsigned long) to be the parameter -// of user-defined literal operators. -constexpr unsigned long long operator"" _MiB(unsigned long long x) { // NOLINT - return x << 20; -} -constexpr unsigned long long operator"" _GiB(unsigned long long x) { // NOLINT - return x << 30; -} - -constexpr uint64_t kDefaultGroupSize = 5_GiB; -// Super device size. 1 MiB for metadata. -constexpr uint64_t kDefaultSuperSize = kDefaultGroupSize * 2 + 1_MiB; - -template -std::ostream& operator<<(std::ostream& os, const std::map& param) { - os << "{"; - bool first = true; - for (const auto& pair : param) { - if (!first) - os << ", "; - os << pair.first << ":" << pair.second; - first = false; - } - return os << "}"; -} - -template -std::ostream& operator<<(std::ostream& os, const std::vector& param) { - os << "["; - bool first = true; - for (const auto& e : param) { - if (!first) - os << ", "; - os << e; - first = false; - } - return os << "]"; -} - -std::ostream& operator<<(std::ostream& os, - const PartitionMetadata::Partition& p) { - return os << "{" << p.name << ", " << p.size << "}"; -} - -std::ostream& operator<<(std::ostream& os, const PartitionMetadata::Group& g) { - return os << "{" << g.name << ", " << g.size << ", " << g.partitions << "}"; -} - -std::ostream& operator<<(std::ostream& os, const PartitionMetadata& m) { - return os << m.groups; -} - -inline string GetDevice(const string& name) { - return kFakeDevicePath + name; -} - -inline string GetDmDevice(const string& name) { - return kFakeDmDevicePath + name; -} - -// TODO(elsk): fs_mgr_get_super_partition_name should be mocked. -inline string GetSuperDevice(uint32_t slot) { - return GetDevice(fs_mgr_get_super_partition_name(slot)); -} - -struct TestParam { - uint32_t source; - uint32_t target; -}; -std::ostream& operator<<(std::ostream& os, const TestParam& param) { - return os << "{source: " << param.source << ", target:" << param.target - << "}"; -} - -// To support legacy tests, auto-convert {name_a: size} map to -// PartitionMetadata. -PartitionMetadata partitionSuffixSizesToMetadata( - const PartitionSuffixSizes& partition_sizes) { - PartitionMetadata metadata; - for (const char* suffix : kSlotSuffixes) { - metadata.groups.push_back( - {string(kDefaultGroup) + suffix, kDefaultGroupSize, {}}); - } - for (const auto& pair : partition_sizes) { - for (size_t suffix_idx = 0; suffix_idx < kMaxNumSlots; ++suffix_idx) { - if (base::EndsWith(pair.first, - kSlotSuffixes[suffix_idx], - base::CompareCase::SENSITIVE)) { - metadata.groups[suffix_idx].partitions.push_back( - {pair.first, pair.second}); - } - } - } - return metadata; -} - -// To support legacy tests, auto-convert {name: size} map to PartitionMetadata. -PartitionMetadata partitionSizesToMetadata( - const PartitionSizes& partition_sizes) { - PartitionMetadata metadata; - metadata.groups.push_back({string{kDefaultGroup}, kDefaultGroupSize, {}}); - for (const auto& pair : partition_sizes) { - metadata.groups[0].partitions.push_back({pair.first, pair.second}); - } - return metadata; -} - -std::unique_ptr NewFakeMetadata( - const PartitionMetadata& metadata) { - auto builder = - MetadataBuilder::New(kDefaultSuperSize, kFakeMetadataSize, kMaxNumSlots); - EXPECT_GE(builder->AllocatableSpace(), kDefaultGroupSize * 2); - EXPECT_NE(nullptr, builder); - if (builder == nullptr) - return nullptr; - for (const auto& group : metadata.groups) { - EXPECT_TRUE(builder->AddGroup(group.name, group.size)); - for (const auto& partition : group.partitions) { - auto p = builder->AddPartition(partition.name, group.name, 0 /* attr */); - EXPECT_TRUE(p && builder->ResizePartition(p, partition.size)); - } - } - return builder; -} - -class MetadataMatcher : public MatcherInterface { - public: - explicit MetadataMatcher(const PartitionSuffixSizes& partition_sizes) - : partition_metadata_(partitionSuffixSizesToMetadata(partition_sizes)) {} - explicit MetadataMatcher(const PartitionMetadata& partition_metadata) - : partition_metadata_(partition_metadata) {} - - bool MatchAndExplain(MetadataBuilder* metadata, - MatchResultListener* listener) const override { - bool success = true; - for (const auto& group : partition_metadata_.groups) { - for (const auto& partition : group.partitions) { - auto p = metadata->FindPartition(partition.name); - if (p == nullptr) { - if (!success) - *listener << "; "; - *listener << "No partition " << partition.name; - success = false; - continue; - } - if (p->size() != partition.size) { - if (!success) - *listener << "; "; - *listener << "Partition " << partition.name << " has size " - << p->size() << ", expected " << partition.size; - success = false; - } - if (p->group_name() != group.name) { - if (!success) - *listener << "; "; - *listener << "Partition " << partition.name << " has group " - << p->group_name() << ", expected " << group.name; - success = false; - } - } - } - return success; - } - - void DescribeTo(std::ostream* os) const override { - *os << "expect: " << partition_metadata_; - } - - void DescribeNegationTo(std::ostream* os) const override { - *os << "expect not: " << partition_metadata_; - } - - private: - PartitionMetadata partition_metadata_; -}; - -inline Matcher MetadataMatches( - const PartitionSuffixSizes& partition_sizes) { - return MakeMatcher(new MetadataMatcher(partition_sizes)); -} - -inline Matcher MetadataMatches( - const PartitionMetadata& partition_metadata) { - return MakeMatcher(new MetadataMatcher(partition_metadata)); -} - -MATCHER_P(HasGroup, group, " has group " + group) { - auto groups = arg->ListGroups(); - return std::find(groups.begin(), groups.end(), group) != groups.end(); -} - class BootControlAndroidTest : public ::testing::Test { protected: void SetUp() override { // Fake init bootctl_ bootctl_.module_ = new NiceMock(); bootctl_.dynamic_control_ = - std::make_unique>(); + std::make_unique>(); ON_CALL(module(), getNumberSlots()).WillByDefault(Invoke([] { return kMaxNumSlots; @@ -295,61 +81,22 @@ class BootControlAndroidTest : public ::testing::Test { } // Return the mocked DynamicPartitionControlInterface. - NiceMock& dynamicControl() { - return static_cast&>( + NiceMock& dynamicControl() { + return static_cast&>( *bootctl_.dynamic_control_); } // Set the fake metadata to return when LoadMetadataBuilder is called on // |slot|. void SetMetadata(uint32_t slot, const PartitionSuffixSizes& sizes) { - SetMetadata(slot, partitionSuffixSizesToMetadata(sizes)); - } - - void SetMetadata(uint32_t slot, const PartitionMetadata& metadata) { EXPECT_CALL(dynamicControl(), - LoadMetadataBuilder(GetSuperDevice(slot), slot, _)) + LoadMetadataBuilder(GetSuperDevice(slot), slot)) .Times(AnyNumber()) - .WillRepeatedly(Invoke([metadata](auto, auto, auto) { - return NewFakeMetadata(metadata); + .WillRepeatedly(Invoke([sizes](auto, auto) { + return NewFakeMetadata(PartitionSuffixSizesToMetadata(sizes)); })); } - // Expect that UnmapPartitionOnDeviceMapper is called on target() metadata - // slot with each partition in |partitions|. - void ExpectUnmap(const std::set& partitions) { - // Error when UnmapPartitionOnDeviceMapper is called on unknown arguments. - ON_CALL(dynamicControl(), UnmapPartitionOnDeviceMapper(_)) - .WillByDefault(Return(false)); - - for (const auto& partition : partitions) { - EXPECT_CALL(dynamicControl(), UnmapPartitionOnDeviceMapper(partition)) - .WillOnce(Invoke([this](auto partition) { - mapped_devices_.erase(partition); - return true; - })); - } - } - - void ExpectDevicesAreMapped(const std::set& partitions) { - ASSERT_EQ(partitions.size(), mapped_devices_.size()); - for (const auto& partition : partitions) { - EXPECT_THAT(mapped_devices_, Contains(Key(Eq(partition)))) - << "Expect that " << partition << " is mapped, but it is not."; - } - } - - void ExpectStoreMetadata(const PartitionSuffixSizes& partition_sizes) { - ExpectStoreMetadataMatch(MetadataMatches(partition_sizes)); - } - - virtual void ExpectStoreMetadataMatch( - const Matcher& matcher) { - EXPECT_CALL(dynamicControl(), - StoreMetadata(GetSuperDevice(target()), matcher, target())) - .WillOnce(Return(true)); - } - uint32_t source() { return slots_.source; } uint32_t target() { return slots_.target; } @@ -367,28 +114,17 @@ class BootControlAndroidTest : public ::testing::Test { ON_CALL(module(), getCurrentSlot()).WillByDefault(Invoke([this] { return source(); })); - // Should not store metadata to source slot. - EXPECT_CALL(dynamicControl(), - StoreMetadata(GetSuperDevice(source()), _, source())) - .Times(0); - // Should not load metadata from target slot. - EXPECT_CALL(dynamicControl(), - LoadMetadataBuilder(GetSuperDevice(target()), target(), _)) - .Times(0); } bool InitPartitionMetadata(uint32_t slot, PartitionSizes partition_sizes, bool update_metadata = true) { - auto m = partitionSizesToMetadata(partition_sizes); - LOG(INFO) << m; + auto m = PartitionSizesToMetadata(partition_sizes); return bootctl_.InitPartitionMetadata(slot, m, update_metadata); } BootControlAndroid bootctl_; // BootControlAndroid under test. TestParam slots_; - // mapped devices through MapPartitionOnDeviceMapper. - std::map mapped_devices_; }; class BootControlAndroidTestP @@ -401,125 +137,6 @@ class BootControlAndroidTestP } }; -// Test resize case. Grow if target metadata contains a partition with a size -// less than expected. -TEST_P(BootControlAndroidTestP, NeedGrowIfSizeNotMatchWhenResizing) { - SetMetadata(source(), - {{S("system"), 2_GiB}, - {S("vendor"), 1_GiB}, - {T("system"), 2_GiB}, - {T("vendor"), 1_GiB}}); - ExpectStoreMetadata({{S("system"), 2_GiB}, - {S("vendor"), 1_GiB}, - {T("system"), 3_GiB}, - {T("vendor"), 1_GiB}}); - ExpectUnmap({T("system"), T("vendor")}); - - EXPECT_TRUE( - InitPartitionMetadata(target(), {{"system", 3_GiB}, {"vendor", 1_GiB}})); -} - -// Test resize case. Shrink if target metadata contains a partition with a size -// greater than expected. -TEST_P(BootControlAndroidTestP, NeedShrinkIfSizeNotMatchWhenResizing) { - SetMetadata(source(), - {{S("system"), 2_GiB}, - {S("vendor"), 1_GiB}, - {T("system"), 2_GiB}, - {T("vendor"), 1_GiB}}); - ExpectStoreMetadata({{S("system"), 2_GiB}, - {S("vendor"), 1_GiB}, - {T("system"), 2_GiB}, - {T("vendor"), 150_MiB}}); - ExpectUnmap({T("system"), T("vendor")}); - - EXPECT_TRUE(InitPartitionMetadata(target(), - {{"system", 2_GiB}, {"vendor", 150_MiB}})); -} - -// Test adding partitions on the first run. -TEST_P(BootControlAndroidTestP, AddPartitionToEmptyMetadata) { - SetMetadata(source(), PartitionSuffixSizes{}); - ExpectStoreMetadata({{T("system"), 2_GiB}, {T("vendor"), 1_GiB}}); - ExpectUnmap({T("system"), T("vendor")}); - - EXPECT_TRUE( - InitPartitionMetadata(target(), {{"system", 2_GiB}, {"vendor", 1_GiB}})); -} - -// Test subsequent add case. -TEST_P(BootControlAndroidTestP, AddAdditionalPartition) { - SetMetadata(source(), {{S("system"), 2_GiB}, {T("system"), 2_GiB}}); - ExpectStoreMetadata( - {{S("system"), 2_GiB}, {T("system"), 2_GiB}, {T("vendor"), 1_GiB}}); - ExpectUnmap({T("system"), T("vendor")}); - - EXPECT_TRUE( - InitPartitionMetadata(target(), {{"system", 2_GiB}, {"vendor", 1_GiB}})); -} - -// Test delete one partition. -TEST_P(BootControlAndroidTestP, DeletePartition) { - SetMetadata(source(), - {{S("system"), 2_GiB}, - {S("vendor"), 1_GiB}, - {T("system"), 2_GiB}, - {T("vendor"), 1_GiB}}); - // No T("vendor") - ExpectStoreMetadata( - {{S("system"), 2_GiB}, {S("vendor"), 1_GiB}, {T("system"), 2_GiB}}); - ExpectUnmap({T("system")}); - - EXPECT_TRUE(InitPartitionMetadata(target(), {{"system", 2_GiB}})); -} - -// Test delete all partitions. -TEST_P(BootControlAndroidTestP, DeleteAll) { - SetMetadata(source(), - {{S("system"), 2_GiB}, - {S("vendor"), 1_GiB}, - {T("system"), 2_GiB}, - {T("vendor"), 1_GiB}}); - ExpectStoreMetadata({{S("system"), 2_GiB}, {S("vendor"), 1_GiB}}); - - EXPECT_TRUE(InitPartitionMetadata(target(), {})); -} - -// Test corrupt source metadata case. -TEST_P(BootControlAndroidTestP, CorruptedSourceMetadata) { - EXPECT_CALL(dynamicControl(), - LoadMetadataBuilder(GetSuperDevice(source()), source(), _)) - .WillOnce(Invoke([](auto, auto, auto) { return nullptr; })); - ExpectUnmap({T("system")}); - - EXPECT_FALSE(InitPartitionMetadata(target(), {{"system", 1_GiB}})) - << "Should not be able to continue with corrupt source metadata"; -} - -// Test that InitPartitionMetadata fail if there is not enough space on the -// device. -TEST_P(BootControlAndroidTestP, NotEnoughSpace) { - SetMetadata(source(), - {{S("system"), 3_GiB}, - {S("vendor"), 2_GiB}, - {T("system"), 0}, - {T("vendor"), 0}}); - EXPECT_FALSE( - InitPartitionMetadata(target(), {{"system", 3_GiB}, {"vendor", 3_GiB}})) - << "Should not be able to fit 11GiB data into 10GiB space"; -} - -TEST_P(BootControlAndroidTestP, NotEnoughSpaceForSlot) { - SetMetadata(source(), - {{S("system"), 1_GiB}, - {S("vendor"), 1_GiB}, - {T("system"), 0}, - {T("vendor"), 0}}); - EXPECT_FALSE( - InitPartitionMetadata(target(), {{"system", 3_GiB}, {"vendor", 3_GiB}})) - << "Should not be able to grow over size of super / 2"; -} - // Test applying retrofit update on a build with dynamic partitions enabled. TEST_P(BootControlAndroidTestP, ApplyRetrofitUpdateOnDynamicPartitionsEnabledBuild) { @@ -528,12 +145,6 @@ TEST_P(BootControlAndroidTestP, {S("vendor"), 1_GiB}, {T("system"), 2_GiB}, {T("vendor"), 1_GiB}}); - // Should not try to unmap any target partition. - EXPECT_CALL(dynamicControl(), UnmapPartitionOnDeviceMapper(_)).Times(0); - // Should not store metadata to target slot. - EXPECT_CALL(dynamicControl(), - StoreMetadata(GetSuperDevice(target()), _, target())) - .Times(0); // Not calling through BootControlAndroidTest::InitPartitionMetadata(), since // we don't want any default group in the PartitionMetadata. @@ -576,9 +187,7 @@ TEST_P(BootControlAndroidTestP, GetPartitionDeviceWhenResumingUpdate) { {S("vendor"), 1_GiB}, {T("system"), 2_GiB}, {T("vendor"), 1_GiB}}); - EXPECT_CALL(dynamicControl(), - StoreMetadata(GetSuperDevice(target()), _, target())) - .Times(0); + EXPECT_TRUE(InitPartitionMetadata( target(), {{"system", 2_GiB}, {"vendor", 1_GiB}}, false)); @@ -620,239 +229,10 @@ INSTANTIATE_TEST_CASE_P(BootControlAndroidTest, BootControlAndroidTestP, testing::Values(TestParam{0, 1}, TestParam{1, 0})); -const PartitionSuffixSizes update_sizes_0() { - // Initial state is 0 for "other" slot. - return { - {"grown_a", 2_GiB}, - {"shrunk_a", 1_GiB}, - {"same_a", 100_MiB}, - {"deleted_a", 150_MiB}, - // no added_a - {"grown_b", 200_MiB}, - // simulate system_other - {"shrunk_b", 0}, - {"same_b", 0}, - {"deleted_b", 0}, - // no added_b - }; -} - -const PartitionSuffixSizes update_sizes_1() { - return { - {"grown_a", 2_GiB}, - {"shrunk_a", 1_GiB}, - {"same_a", 100_MiB}, - {"deleted_a", 150_MiB}, - // no added_a - {"grown_b", 3_GiB}, - {"shrunk_b", 150_MiB}, - {"same_b", 100_MiB}, - {"added_b", 150_MiB}, - // no deleted_b - }; -} - -const PartitionSuffixSizes update_sizes_2() { - return { - {"grown_a", 4_GiB}, - {"shrunk_a", 100_MiB}, - {"same_a", 100_MiB}, - {"deleted_a", 64_MiB}, - // no added_a - {"grown_b", 3_GiB}, - {"shrunk_b", 150_MiB}, - {"same_b", 100_MiB}, - {"added_b", 150_MiB}, - // no deleted_b - }; -} - -// Test case for first update after the device is manufactured, in which -// case the "other" slot is likely of size "0" (except system, which is -// non-zero because of system_other partition) -TEST_F(BootControlAndroidTest, SimulatedFirstUpdate) { - SetSlots({0, 1}); - - SetMetadata(source(), update_sizes_0()); - SetMetadata(target(), update_sizes_0()); - ExpectStoreMetadata(update_sizes_1()); - ExpectUnmap({"grown_b", "shrunk_b", "same_b", "added_b"}); - - EXPECT_TRUE(InitPartitionMetadata(target(), - {{"grown", 3_GiB}, - {"shrunk", 150_MiB}, - {"same", 100_MiB}, - {"added", 150_MiB}})); -} - -// After first update, test for the second update. In the second update, the -// "added" partition is deleted and "deleted" partition is re-added. -TEST_F(BootControlAndroidTest, SimulatedSecondUpdate) { - SetSlots({1, 0}); - - SetMetadata(source(), update_sizes_1()); - SetMetadata(target(), update_sizes_0()); - - ExpectStoreMetadata(update_sizes_2()); - ExpectUnmap({"grown_a", "shrunk_a", "same_a", "deleted_a"}); - - EXPECT_TRUE(InitPartitionMetadata(target(), - {{"grown", 4_GiB}, - {"shrunk", 100_MiB}, - {"same", 100_MiB}, - {"deleted", 64_MiB}})); -} - TEST_F(BootControlAndroidTest, ApplyingToCurrentSlot) { SetSlots({1, 1}); EXPECT_FALSE(InitPartitionMetadata(target(), {})) << "Should not be able to apply to current slot."; } -class BootControlAndroidGroupTestP : public BootControlAndroidTestP { - public: - void SetUp() override { - BootControlAndroidTestP::SetUp(); - SetMetadata( - source(), - {.groups = {SimpleGroup(S("android"), 3_GiB, S("system"), 2_GiB), - SimpleGroup(S("oem"), 2_GiB, S("vendor"), 1_GiB), - SimpleGroup(T("android"), 3_GiB, T("system"), 0), - SimpleGroup(T("oem"), 2_GiB, T("vendor"), 0)}}); - } - - // Return a simple group with only one partition. - PartitionMetadata::Group SimpleGroup(const string& group, - uint64_t group_size, - const string& partition, - uint64_t partition_size) { - return {.name = group, - .size = group_size, - .partitions = {{.name = partition, .size = partition_size}}}; - } - - void ExpectStoreMetadata(const PartitionMetadata& partition_metadata) { - ExpectStoreMetadataMatch(MetadataMatches(partition_metadata)); - } - - // Expect that target slot is stored with target groups. - void ExpectStoreMetadataMatch( - const Matcher& matcher) override { - BootControlAndroidTestP::ExpectStoreMetadataMatch(AllOf( - MetadataMatches(PartitionMetadata{ - .groups = {SimpleGroup(S("android"), 3_GiB, S("system"), 2_GiB), - SimpleGroup(S("oem"), 2_GiB, S("vendor"), 1_GiB)}}), - matcher)); - } -}; - -// Allow to resize within group. -TEST_P(BootControlAndroidGroupTestP, ResizeWithinGroup) { - ExpectStoreMetadata(PartitionMetadata{ - .groups = {SimpleGroup(T("android"), 3_GiB, T("system"), 3_GiB), - SimpleGroup(T("oem"), 2_GiB, T("vendor"), 2_GiB)}}); - ExpectUnmap({T("system"), T("vendor")}); - - EXPECT_TRUE(bootctl_.InitPartitionMetadata( - target(), - PartitionMetadata{ - .groups = {SimpleGroup("android", 3_GiB, "system", 3_GiB), - SimpleGroup("oem", 2_GiB, "vendor", 2_GiB)}}, - true)); -} - -TEST_P(BootControlAndroidGroupTestP, NotEnoughSpaceForGroup) { - EXPECT_FALSE(bootctl_.InitPartitionMetadata( - target(), - PartitionMetadata{ - .groups = {SimpleGroup("android", 3_GiB, "system", 1_GiB), - SimpleGroup("oem", 2_GiB, "vendor", 3_GiB)}}, - true)) - << "Should not be able to grow over maximum size of group"; -} - -TEST_P(BootControlAndroidGroupTestP, GroupTooBig) { - EXPECT_FALSE(bootctl_.InitPartitionMetadata( - target(), - PartitionMetadata{.groups = {{.name = "android", .size = 3_GiB}, - {.name = "oem", .size = 3_GiB}}}, - true)) - << "Should not be able to grow over size of super / 2"; -} - -TEST_P(BootControlAndroidGroupTestP, AddPartitionToGroup) { - ExpectStoreMetadata(PartitionMetadata{ - .groups = {{.name = T("android"), - .size = 3_GiB, - .partitions = {{.name = T("system"), .size = 2_GiB}, - {.name = T("system_ext"), .size = 1_GiB}}}}}); - ExpectUnmap({T("system"), T("vendor"), T("system_ext")}); - - EXPECT_TRUE(bootctl_.InitPartitionMetadata( - target(), - PartitionMetadata{ - .groups = {{.name = "android", - .size = 3_GiB, - .partitions = {{.name = "system", .size = 2_GiB}, - {.name = "system_ext", .size = 1_GiB}}}, - SimpleGroup("oem", 2_GiB, "vendor", 2_GiB)}}, - true)); -} - -TEST_P(BootControlAndroidGroupTestP, RemovePartitionFromGroup) { - ExpectStoreMetadata(PartitionMetadata{ - .groups = {{.name = T("android"), .size = 3_GiB, .partitions = {}}}}); - ExpectUnmap({T("vendor")}); - - EXPECT_TRUE(bootctl_.InitPartitionMetadata( - target(), - PartitionMetadata{ - .groups = {{.name = "android", .size = 3_GiB, .partitions = {}}, - SimpleGroup("oem", 2_GiB, "vendor", 2_GiB)}}, - true)); -} - -TEST_P(BootControlAndroidGroupTestP, AddGroup) { - ExpectStoreMetadata(PartitionMetadata{ - .groups = { - SimpleGroup(T("new_group"), 2_GiB, T("new_partition"), 2_GiB)}}); - ExpectUnmap({T("system"), T("vendor"), T("new_partition")}); - - EXPECT_TRUE(bootctl_.InitPartitionMetadata( - target(), - PartitionMetadata{ - .groups = {SimpleGroup("android", 2_GiB, "system", 2_GiB), - SimpleGroup("oem", 1_GiB, "vendor", 1_GiB), - SimpleGroup("new_group", 2_GiB, "new_partition", 2_GiB)}}, - true)); -} - -TEST_P(BootControlAndroidGroupTestP, RemoveGroup) { - ExpectStoreMetadataMatch(Not(HasGroup(T("oem")))); - ExpectUnmap({T("system")}); - EXPECT_TRUE(bootctl_.InitPartitionMetadata( - target(), - PartitionMetadata{ - .groups = {SimpleGroup("android", 2_GiB, "system", 2_GiB)}}, - true)); -} - -TEST_P(BootControlAndroidGroupTestP, ResizeGroup) { - ExpectStoreMetadata(PartitionMetadata{ - .groups = {SimpleGroup(T("android"), 2_GiB, T("system"), 2_GiB), - SimpleGroup(T("oem"), 3_GiB, T("vendor"), 3_GiB)}}); - ExpectUnmap({T("system"), T("vendor")}); - - EXPECT_TRUE(bootctl_.InitPartitionMetadata( - target(), - PartitionMetadata{ - .groups = {SimpleGroup("android", 2_GiB, "system", 2_GiB), - SimpleGroup("oem", 3_GiB, "vendor", 3_GiB)}}, - true)); -} - -INSTANTIATE_TEST_CASE_P(BootControlAndroidTest, - BootControlAndroidGroupTestP, - testing::Values(TestParam{0, 1}, TestParam{1, 0})); - } // namespace chromeos_update_engine diff --git a/dynamic_partition_control_android.h b/dynamic_partition_control_android.h index 73d7539a..dc152cc3 100644 --- a/dynamic_partition_control_android.h +++ b/dynamic_partition_control_android.h @@ -79,6 +79,8 @@ class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { uint32_t target_slot); private: + friend class DynamicPartitionControlAndroidTest; + std::set mapped_devices_; void CleanupInternal(bool wait); diff --git a/dynamic_partition_control_android_unittest.cc b/dynamic_partition_control_android_unittest.cc new file mode 100644 index 00000000..2fa0f16f --- /dev/null +++ b/dynamic_partition_control_android_unittest.cc @@ -0,0 +1,477 @@ +// +// Copyright (C) 2019 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include "update_engine/dynamic_partition_control_android.h" + +#include +#include + +#include +#include +#include +#include + +#include "update_engine/dynamic_partition_test_utils.h" +#include "update_engine/mock_dynamic_partition_control.h" + +using std::string; +using testing::_; +using testing::AnyNumber; +using testing::Invoke; +using testing::NiceMock; +using testing::Not; +using testing::Return; + +namespace chromeos_update_engine { + +class DynamicPartitionControlAndroidTest : public ::testing::Test { + public: + void SetUp() override { + module_ = std::make_unique>(); + + ON_CALL(dynamicControl(), GetDynamicPartitionsFeatureFlag()) + .WillByDefault(Return(FeatureFlag(FeatureFlag::Value::LAUNCH))); + + ON_CALL(dynamicControl(), GetDeviceDir(_)) + .WillByDefault(Invoke([](auto path) { + *path = kFakeDevicePath; + return true; + })); + } + + // Return the mocked DynamicPartitionControlInterface. + NiceMock& dynamicControl() { + return static_cast&>(*module_); + } + + uint32_t source() { return slots_.source; } + uint32_t target() { return slots_.target; } + + // Return partition names with suffix of source(). + std::string S(const std::string& name) { + return name + kSlotSuffixes[source()]; + } + + // Return partition names with suffix of target(). + std::string T(const std::string& name) { + return name + kSlotSuffixes[target()]; + } + + // Set the fake metadata to return when LoadMetadataBuilder is called on + // |slot|. + void SetMetadata(uint32_t slot, const PartitionSuffixSizes& sizes) { + EXPECT_CALL(dynamicControl(), + LoadMetadataBuilder(GetSuperDevice(slot), slot, _)) + .Times(AnyNumber()) + .WillRepeatedly(Invoke([sizes](auto, auto, auto) { + return NewFakeMetadata(PartitionSuffixSizesToMetadata(sizes)); + })); + } + + void ExpectStoreMetadata(const PartitionSuffixSizes& partition_sizes) { + EXPECT_CALL(dynamicControl(), + StoreMetadata(GetSuperDevice(target()), + MetadataMatches(partition_sizes), + target())) + .WillOnce(Return(true)); + } + + // Expect that UnmapPartitionOnDeviceMapper is called on target() metadata + // slot with each partition in |partitions|. + void ExpectUnmap(const std::set& partitions) { + // Error when UnmapPartitionOnDeviceMapper is called on unknown arguments. + ON_CALL(dynamicControl(), UnmapPartitionOnDeviceMapper(_)) + .WillByDefault(Return(false)); + + for (const auto& partition : partitions) { + EXPECT_CALL(dynamicControl(), UnmapPartitionOnDeviceMapper(partition)) + .WillOnce(Return(true)); + } + } + bool PreparePartitionsForUpdate(const PartitionSizes& partition_sizes) { + return dynamicControl().PreparePartitionsForUpdate( + source(), target(), PartitionSizesToMetadata(partition_sizes)); + } + void SetSlots(const TestParam& slots) { slots_ = slots; } + + struct Listener : public ::testing::MatchResultListener { + explicit Listener(std::ostream* os) : MatchResultListener(os) {} + }; + + testing::AssertionResult UpdatePartitionMetadata( + const PartitionSuffixSizes& source_metadata, + const PartitionSizes& update_metadata, + const PartitionSuffixSizes& expected) { + return UpdatePartitionMetadata( + PartitionSuffixSizesToMetadata(source_metadata), + PartitionSizesToMetadata(update_metadata), + PartitionSuffixSizesToMetadata(expected)); + } + testing::AssertionResult UpdatePartitionMetadata( + const PartitionMetadata& source_metadata, + const PartitionMetadata& update_metadata, + const PartitionMetadata& expected) { + return UpdatePartitionMetadata( + source_metadata, update_metadata, MetadataMatches(expected)); + } + testing::AssertionResult UpdatePartitionMetadata( + const PartitionMetadata& source_metadata, + const PartitionMetadata& update_metadata, + const Matcher& matcher) { + auto super_metadata = NewFakeMetadata(source_metadata); + if (!module_->UpdatePartitionMetadata( + super_metadata.get(), target(), update_metadata)) { + return testing::AssertionFailure() + << "UpdatePartitionMetadataInternal failed"; + } + std::stringstream ss; + Listener listener(&ss); + if (matcher.MatchAndExplain(super_metadata.get(), &listener)) { + return testing::AssertionSuccess() << ss.str(); + } else { + return testing::AssertionFailure() << ss.str(); + } + } + + std::unique_ptr module_; + TestParam slots_; +}; + +class DynamicPartitionControlAndroidTestP + : public DynamicPartitionControlAndroidTest, + public ::testing::WithParamInterface { + public: + void SetUp() override { + DynamicPartitionControlAndroidTest::SetUp(); + SetSlots(GetParam()); + } +}; + +// Test resize case. Grow if target metadata contains a partition with a size +// less than expected. +TEST_P(DynamicPartitionControlAndroidTestP, + NeedGrowIfSizeNotMatchWhenResizing) { + PartitionSuffixSizes source_metadata{{S("system"), 2_GiB}, + {S("vendor"), 1_GiB}, + {T("system"), 2_GiB}, + {T("vendor"), 1_GiB}}; + PartitionSuffixSizes expected{{S("system"), 2_GiB}, + {S("vendor"), 1_GiB}, + {T("system"), 3_GiB}, + {T("vendor"), 1_GiB}}; + PartitionSizes update_metadata{{"system", 3_GiB}, {"vendor", 1_GiB}}; + EXPECT_TRUE( + UpdatePartitionMetadata(source_metadata, update_metadata, expected)); +} + +// Test resize case. Shrink if target metadata contains a partition with a size +// greater than expected. +TEST_P(DynamicPartitionControlAndroidTestP, + NeedShrinkIfSizeNotMatchWhenResizing) { + PartitionSuffixSizes source_metadata{{S("system"), 2_GiB}, + {S("vendor"), 1_GiB}, + {T("system"), 2_GiB}, + {T("vendor"), 1_GiB}}; + PartitionSuffixSizes expected{{S("system"), 2_GiB}, + {S("vendor"), 1_GiB}, + {T("system"), 2_GiB}, + {T("vendor"), 150_MiB}}; + PartitionSizes update_metadata{{"system", 2_GiB}, {"vendor", 150_MiB}}; + EXPECT_TRUE( + UpdatePartitionMetadata(source_metadata, update_metadata, expected)); +} + +// Test adding partitions on the first run. +TEST_P(DynamicPartitionControlAndroidTestP, AddPartitionToEmptyMetadata) { + PartitionSuffixSizes source_metadata{}; + PartitionSuffixSizes expected{{T("system"), 2_GiB}, {T("vendor"), 1_GiB}}; + PartitionSizes update_metadata{{"system", 2_GiB}, {"vendor", 1_GiB}}; + EXPECT_TRUE( + UpdatePartitionMetadata(source_metadata, update_metadata, expected)); +} + +// Test subsequent add case. +TEST_P(DynamicPartitionControlAndroidTestP, AddAdditionalPartition) { + PartitionSuffixSizes source_metadata{{S("system"), 2_GiB}, + {T("system"), 2_GiB}}; + PartitionSuffixSizes expected{ + {S("system"), 2_GiB}, {T("system"), 2_GiB}, {T("vendor"), 1_GiB}}; + PartitionSizes update_metadata{{"system", 2_GiB}, {"vendor", 1_GiB}}; + EXPECT_TRUE( + UpdatePartitionMetadata(source_metadata, update_metadata, expected)); +} + +// Test delete one partition. +TEST_P(DynamicPartitionControlAndroidTestP, DeletePartition) { + PartitionSuffixSizes source_metadata{{S("system"), 2_GiB}, + {S("vendor"), 1_GiB}, + {T("system"), 2_GiB}, + {T("vendor"), 1_GiB}}; + // No T("vendor") + PartitionSuffixSizes expected{ + {S("system"), 2_GiB}, {S("vendor"), 1_GiB}, {T("system"), 2_GiB}}; + PartitionSizes update_metadata{{"system", 2_GiB}}; + EXPECT_TRUE( + UpdatePartitionMetadata(source_metadata, update_metadata, expected)); +} + +// Test delete all partitions. +TEST_P(DynamicPartitionControlAndroidTestP, DeleteAll) { + PartitionSuffixSizes source_metadata{{S("system"), 2_GiB}, + {S("vendor"), 1_GiB}, + {T("system"), 2_GiB}, + {T("vendor"), 1_GiB}}; + PartitionSuffixSizes expected{{S("system"), 2_GiB}, {S("vendor"), 1_GiB}}; + PartitionSizes update_metadata{}; + EXPECT_TRUE( + UpdatePartitionMetadata(source_metadata, update_metadata, expected)); +} + +// Test corrupt source metadata case. +TEST_P(DynamicPartitionControlAndroidTestP, CorruptedSourceMetadata) { + EXPECT_CALL(dynamicControl(), + LoadMetadataBuilder(GetSuperDevice(source()), source(), _)) + .WillOnce(Invoke([](auto, auto, auto) { return nullptr; })); + ExpectUnmap({T("system")}); + + EXPECT_FALSE(PreparePartitionsForUpdate({{"system", 1_GiB}})) + << "Should not be able to continue with corrupt source metadata"; +} + +// Test that UpdatePartitionMetadata fails if there is not enough space on the +// device. +TEST_P(DynamicPartitionControlAndroidTestP, NotEnoughSpace) { + PartitionSuffixSizes source_metadata{{S("system"), 3_GiB}, + {S("vendor"), 2_GiB}, + {T("system"), 0}, + {T("vendor"), 0}}; + PartitionSizes update_metadata{{"system", 3_GiB}, {"vendor", 3_GiB}}; + + EXPECT_FALSE(UpdatePartitionMetadata(source_metadata, update_metadata, {})) + << "Should not be able to fit 11GiB data into 10GiB space"; +} + +TEST_P(DynamicPartitionControlAndroidTestP, NotEnoughSpaceForSlot) { + PartitionSuffixSizes source_metadata{{S("system"), 1_GiB}, + {S("vendor"), 1_GiB}, + {T("system"), 0}, + {T("vendor"), 0}}; + PartitionSizes update_metadata{{"system", 3_GiB}, {"vendor", 3_GiB}}; + EXPECT_FALSE(UpdatePartitionMetadata(source_metadata, update_metadata, {})) + << "Should not be able to grow over size of super / 2"; +} + +INSTANTIATE_TEST_CASE_P(DynamicPartitionControlAndroidTest, + DynamicPartitionControlAndroidTestP, + testing::Values(TestParam{0, 1}, TestParam{1, 0})); + +class DynamicPartitionControlAndroidGroupTestP + : public DynamicPartitionControlAndroidTestP { + public: + PartitionMetadata source_metadata; + void SetUp() override { + DynamicPartitionControlAndroidTestP::SetUp(); + source_metadata = { + .groups = {SimpleGroup(S("android"), 3_GiB, S("system"), 2_GiB), + SimpleGroup(S("oem"), 2_GiB, S("vendor"), 1_GiB), + SimpleGroup(T("android"), 3_GiB, T("system"), 0), + SimpleGroup(T("oem"), 2_GiB, T("vendor"), 0)}}; + } + + // Return a simple group with only one partition. + PartitionMetadata::Group SimpleGroup(const string& group, + uint64_t group_size, + const string& partition, + uint64_t partition_size) { + return {.name = group, + .size = group_size, + .partitions = {{.name = partition, .size = partition_size}}}; + } +}; + +// Allow to resize within group. +TEST_P(DynamicPartitionControlAndroidGroupTestP, ResizeWithinGroup) { + PartitionMetadata expected{ + .groups = {SimpleGroup(T("android"), 3_GiB, T("system"), 3_GiB), + SimpleGroup(T("oem"), 2_GiB, T("vendor"), 2_GiB)}}; + + PartitionMetadata update_metadata{ + .groups = {SimpleGroup("android", 3_GiB, "system", 3_GiB), + SimpleGroup("oem", 2_GiB, "vendor", 2_GiB)}}; + + EXPECT_TRUE( + UpdatePartitionMetadata(source_metadata, update_metadata, expected)); +} + +TEST_P(DynamicPartitionControlAndroidGroupTestP, NotEnoughSpaceForGroup) { + PartitionMetadata update_metadata{ + .groups = {SimpleGroup("android", 3_GiB, "system", 1_GiB), + SimpleGroup("oem", 2_GiB, "vendor", 3_GiB)}}; + EXPECT_FALSE(UpdatePartitionMetadata(source_metadata, update_metadata, {})) + << "Should not be able to grow over maximum size of group"; +} + +TEST_P(DynamicPartitionControlAndroidGroupTestP, GroupTooBig) { + PartitionMetadata update_metadata{ + .groups = {{.name = "android", .size = 3_GiB}, + {.name = "oem", .size = 3_GiB}}}; + EXPECT_FALSE(UpdatePartitionMetadata(source_metadata, update_metadata, {})) + << "Should not be able to grow over size of super / 2"; +} + +TEST_P(DynamicPartitionControlAndroidGroupTestP, AddPartitionToGroup) { + PartitionMetadata expected{ + .groups = {{.name = T("android"), + .size = 3_GiB, + .partitions = {{.name = T("system"), .size = 2_GiB}, + {.name = T("system_ext"), .size = 1_GiB}}}}}; + PartitionMetadata update_metadata{ + .groups = {{.name = "android", + .size = 3_GiB, + .partitions = {{.name = "system", .size = 2_GiB}, + {.name = "system_ext", .size = 1_GiB}}}, + SimpleGroup("oem", 2_GiB, "vendor", 2_GiB)}}; + EXPECT_TRUE( + UpdatePartitionMetadata(source_metadata, update_metadata, expected)); +} + +TEST_P(DynamicPartitionControlAndroidGroupTestP, RemovePartitionFromGroup) { + PartitionMetadata expected{ + .groups = {{.name = T("android"), .size = 3_GiB, .partitions = {}}}}; + PartitionMetadata update_metadata{ + .groups = {{.name = "android", .size = 3_GiB, .partitions = {}}, + SimpleGroup("oem", 2_GiB, "vendor", 2_GiB)}}; + EXPECT_TRUE( + UpdatePartitionMetadata(source_metadata, update_metadata, expected)); +} + +TEST_P(DynamicPartitionControlAndroidGroupTestP, AddGroup) { + PartitionMetadata expected{ + .groups = { + SimpleGroup(T("new_group"), 2_GiB, T("new_partition"), 2_GiB)}}; + PartitionMetadata update_metadata{ + .groups = {SimpleGroup("android", 2_GiB, "system", 2_GiB), + SimpleGroup("oem", 1_GiB, "vendor", 1_GiB), + SimpleGroup("new_group", 2_GiB, "new_partition", 2_GiB)}}; + EXPECT_TRUE( + UpdatePartitionMetadata(source_metadata, update_metadata, expected)); +} + +TEST_P(DynamicPartitionControlAndroidGroupTestP, RemoveGroup) { + PartitionMetadata update_metadata{ + .groups = {SimpleGroup("android", 2_GiB, "system", 2_GiB)}}; + + EXPECT_TRUE(UpdatePartitionMetadata( + source_metadata, update_metadata, Not(HasGroup(T("oem"))))); +} + +TEST_P(DynamicPartitionControlAndroidGroupTestP, ResizeGroup) { + PartitionMetadata expected{ + .groups = {SimpleGroup(T("android"), 2_GiB, T("system"), 2_GiB), + SimpleGroup(T("oem"), 3_GiB, T("vendor"), 3_GiB)}}; + PartitionMetadata update_metadata{ + .groups = {SimpleGroup("android", 2_GiB, "system", 2_GiB), + SimpleGroup("oem", 3_GiB, "vendor", 3_GiB)}}; + EXPECT_TRUE( + UpdatePartitionMetadata(source_metadata, update_metadata, expected)); +} + +INSTANTIATE_TEST_CASE_P(DynamicPartitionControlAndroidTest, + DynamicPartitionControlAndroidGroupTestP, + testing::Values(TestParam{0, 1}, TestParam{1, 0})); + +const PartitionSuffixSizes update_sizes_0() { + // Initial state is 0 for "other" slot. + return { + {"grown_a", 2_GiB}, + {"shrunk_a", 1_GiB}, + {"same_a", 100_MiB}, + {"deleted_a", 150_MiB}, + // no added_a + {"grown_b", 200_MiB}, + // simulate system_other + {"shrunk_b", 0}, + {"same_b", 0}, + {"deleted_b", 0}, + // no added_b + }; +} + +const PartitionSuffixSizes update_sizes_1() { + return { + {"grown_a", 2_GiB}, + {"shrunk_a", 1_GiB}, + {"same_a", 100_MiB}, + {"deleted_a", 150_MiB}, + // no added_a + {"grown_b", 3_GiB}, + {"shrunk_b", 150_MiB}, + {"same_b", 100_MiB}, + {"added_b", 150_MiB}, + // no deleted_b + }; +} + +const PartitionSuffixSizes update_sizes_2() { + return { + {"grown_a", 4_GiB}, + {"shrunk_a", 100_MiB}, + {"same_a", 100_MiB}, + {"deleted_a", 64_MiB}, + // no added_a + {"grown_b", 3_GiB}, + {"shrunk_b", 150_MiB}, + {"same_b", 100_MiB}, + {"added_b", 150_MiB}, + // no deleted_b + }; +} + +// Test case for first update after the device is manufactured, in which +// case the "other" slot is likely of size "0" (except system, which is +// non-zero because of system_other partition) +TEST_F(DynamicPartitionControlAndroidTest, SimulatedFirstUpdate) { + SetSlots({0, 1}); + + SetMetadata(source(), update_sizes_0()); + SetMetadata(target(), update_sizes_0()); + ExpectStoreMetadata(update_sizes_1()); + ExpectUnmap({"grown_b", "shrunk_b", "same_b", "added_b"}); + + EXPECT_TRUE(PreparePartitionsForUpdate({{"grown", 3_GiB}, + {"shrunk", 150_MiB}, + {"same", 100_MiB}, + {"added", 150_MiB}})); +} + +// After first update, test for the second update. In the second update, the +// "added" partition is deleted and "deleted" partition is re-added. +TEST_F(DynamicPartitionControlAndroidTest, SimulatedSecondUpdate) { + SetSlots({1, 0}); + + SetMetadata(source(), update_sizes_1()); + SetMetadata(target(), update_sizes_0()); + + ExpectStoreMetadata(update_sizes_2()); + ExpectUnmap({"grown_a", "shrunk_a", "same_a", "deleted_a"}); + + EXPECT_TRUE(PreparePartitionsForUpdate({{"grown", 4_GiB}, + {"shrunk", 100_MiB}, + {"same", 100_MiB}, + {"deleted", 64_MiB}})); +} + +} // namespace chromeos_update_engine diff --git a/dynamic_partition_test_utils.h b/dynamic_partition_test_utils.h new file mode 100644 index 00000000..2cfdff5d --- /dev/null +++ b/dynamic_partition_test_utils.h @@ -0,0 +1,258 @@ +// +// Copyright (C) 2019 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef UPDATE_ENGINE_DYNAMIC_PARTITION_TEST_UTILS_H_ +#define UPDATE_ENGINE_DYNAMIC_PARTITION_TEST_UTILS_H_ + +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "update_engine/common/boot_control_interface.h" + +namespace chromeos_update_engine { + +using android::fs_mgr::MetadataBuilder; +using testing::_; +using testing::MakeMatcher; +using testing::Matcher; +using testing::MatcherInterface; +using testing::MatchResultListener; + +constexpr const uint32_t kMaxNumSlots = 2; +constexpr const char* kSlotSuffixes[kMaxNumSlots] = {"_a", "_b"}; +constexpr const char* kFakeDevicePath = "/fake/dev/path/"; +constexpr const char* kFakeDmDevicePath = "/fake/dm/dev/path/"; +constexpr const uint32_t kFakeMetadataSize = 65536; +constexpr const char* kDefaultGroup = "foo"; + +// A map describing the size of each partition. +// "{name, size}" +using PartitionSizes = std::map; + +// "{name_a, size}" +using PartitionSuffixSizes = std::map; + +using PartitionMetadata = BootControlInterface::PartitionMetadata; + +// C++ standards do not allow uint64_t (aka unsigned long) to be the parameter +// of user-defined literal operators. +// clang-format off +inline constexpr unsigned long long operator"" _MiB(unsigned long long x) { // NOLINT + return x << 20; +} +inline constexpr unsigned long long operator"" _GiB(unsigned long long x) { // NOLINT + return x << 30; +} +// clang-format on + +constexpr uint64_t kDefaultGroupSize = 5_GiB; +// Super device size. 1 MiB for metadata. +constexpr uint64_t kDefaultSuperSize = kDefaultGroupSize * 2 + 1_MiB; + +template +inline std::ostream& operator<<(std::ostream& os, const std::map& param) { + os << "{"; + bool first = true; + for (const auto& pair : param) { + if (!first) + os << ", "; + os << pair.first << ":" << pair.second; + first = false; + } + return os << "}"; +} + +template +inline std::ostream& operator<<(std::ostream& os, const std::vector& param) { + os << "["; + bool first = true; + for (const auto& e : param) { + if (!first) + os << ", "; + os << e; + first = false; + } + return os << "]"; +} + +inline std::ostream& operator<<(std::ostream& os, + const PartitionMetadata::Partition& p) { + return os << "{" << p.name << ", " << p.size << "}"; +} + +inline std::ostream& operator<<(std::ostream& os, + const PartitionMetadata::Group& g) { + return os << "{" << g.name << ", " << g.size << ", " << g.partitions << "}"; +} + +inline std::ostream& operator<<(std::ostream& os, const PartitionMetadata& m) { + return os << m.groups; +} + +inline std::string GetDevice(const std::string& name) { + return kFakeDevicePath + name; +} + +inline std::string GetDmDevice(const std::string& name) { + return kFakeDmDevicePath + name; +} + +// TODO(elsk): fs_mgr_get_super_partition_name should be mocked. +inline std::string GetSuperDevice(uint32_t slot) { + return GetDevice(fs_mgr_get_super_partition_name(slot)); +} + +// To support legacy tests, auto-convert {name_a: size} map to +// PartitionMetadata. +inline PartitionMetadata PartitionSuffixSizesToMetadata( + const PartitionSuffixSizes& partition_sizes) { + PartitionMetadata metadata; + for (const char* suffix : kSlotSuffixes) { + metadata.groups.push_back( + {std::string(kDefaultGroup) + suffix, kDefaultGroupSize, {}}); + } + for (const auto& pair : partition_sizes) { + for (size_t suffix_idx = 0; suffix_idx < kMaxNumSlots; ++suffix_idx) { + if (base::EndsWith(pair.first, + kSlotSuffixes[suffix_idx], + base::CompareCase::SENSITIVE)) { + metadata.groups[suffix_idx].partitions.push_back( + {pair.first, pair.second}); + } + } + } + return metadata; +} + +// To support legacy tests, auto-convert {name: size} map to PartitionMetadata. +inline PartitionMetadata PartitionSizesToMetadata( + const PartitionSizes& partition_sizes) { + PartitionMetadata metadata; + metadata.groups.push_back( + {std::string{kDefaultGroup}, kDefaultGroupSize, {}}); + for (const auto& pair : partition_sizes) { + metadata.groups[0].partitions.push_back({pair.first, pair.second}); + } + return metadata; +} + +inline std::unique_ptr NewFakeMetadata( + const PartitionMetadata& metadata) { + auto builder = + MetadataBuilder::New(kDefaultSuperSize, kFakeMetadataSize, kMaxNumSlots); + EXPECT_GE(builder->AllocatableSpace(), kDefaultGroupSize * 2); + EXPECT_NE(nullptr, builder); + if (builder == nullptr) + return nullptr; + for (const auto& group : metadata.groups) { + EXPECT_TRUE(builder->AddGroup(group.name, group.size)); + for (const auto& partition : group.partitions) { + auto p = builder->AddPartition(partition.name, group.name, 0 /* attr */); + EXPECT_TRUE(p && builder->ResizePartition(p, partition.size)); + } + } + return builder; +} + +class MetadataMatcher : public MatcherInterface { + public: + explicit MetadataMatcher(const PartitionSuffixSizes& partition_sizes) + : partition_metadata_(PartitionSuffixSizesToMetadata(partition_sizes)) {} + explicit MetadataMatcher(const PartitionMetadata& partition_metadata) + : partition_metadata_(partition_metadata) {} + + bool MatchAndExplain(MetadataBuilder* metadata, + MatchResultListener* listener) const override { + bool success = true; + for (const auto& group : partition_metadata_.groups) { + for (const auto& partition : group.partitions) { + auto p = metadata->FindPartition(partition.name); + if (p == nullptr) { + if (!success) + *listener << "; "; + *listener << "No partition " << partition.name; + success = false; + continue; + } + if (p->size() != partition.size) { + if (!success) + *listener << "; "; + *listener << "Partition " << partition.name << " has size " + << p->size() << ", expected " << partition.size; + success = false; + } + if (p->group_name() != group.name) { + if (!success) + *listener << "; "; + *listener << "Partition " << partition.name << " has group " + << p->group_name() << ", expected " << group.name; + success = false; + } + } + } + return success; + } + + void DescribeTo(std::ostream* os) const override { + *os << "expect: " << partition_metadata_; + } + + void DescribeNegationTo(std::ostream* os) const override { + *os << "expect not: " << partition_metadata_; + } + + private: + PartitionMetadata partition_metadata_; +}; + +inline Matcher MetadataMatches( + const PartitionSuffixSizes& partition_sizes) { + return MakeMatcher(new MetadataMatcher(partition_sizes)); +} + +inline Matcher MetadataMatches( + const PartitionMetadata& partition_metadata) { + return MakeMatcher(new MetadataMatcher(partition_metadata)); +} + +MATCHER_P(HasGroup, group, " has group " + group) { + auto groups = arg->ListGroups(); + return std::find(groups.begin(), groups.end(), group) != groups.end(); +} + +struct TestParam { + uint32_t source; + uint32_t target; +}; +inline std::ostream& operator<<(std::ostream& os, const TestParam& param) { + return os << "{source: " << param.source << ", target:" << param.target + << "}"; +} + +} // namespace chromeos_update_engine + +#endif // UPDATE_ENGINE_DYNAMIC_PARTITION_TEST_UTILS_H_ diff --git a/mock_dynamic_partition_control.h b/mock_dynamic_partition_control.h index a0701e7a..72eb030f 100644 --- a/mock_dynamic_partition_control.h +++ b/mock_dynamic_partition_control.h @@ -27,6 +27,30 @@ namespace chromeos_update_engine { +class MockDynamicPartitionControl : public DynamicPartitionControlInterface { + public: + MOCK_METHOD5(MapPartitionOnDeviceMapper, + bool(const std::string&, + const std::string&, + uint32_t, + bool, + std::string*)); + MOCK_METHOD0(Cleanup, void()); + MOCK_METHOD1(DeviceExists, bool(const std::string&)); + MOCK_METHOD1(GetState, ::android::dm::DmDeviceState(const std::string&)); + MOCK_METHOD2(GetDmDevicePathByName, bool(const std::string&, std::string*)); + MOCK_METHOD2(LoadMetadataBuilder, + std::unique_ptr<::android::fs_mgr::MetadataBuilder>( + const std::string&, uint32_t)); + MOCK_METHOD1(GetDeviceDir, bool(std::string*)); + MOCK_METHOD0(GetDynamicPartitionsFeatureFlag, FeatureFlag()); + MOCK_METHOD3(PreparePartitionsForUpdate, + bool(uint32_t, + uint32_t, + const BootControlInterface::PartitionMetadata&)); + MOCK_METHOD1(GetSuperPartitionName, std::string(uint32_t)); +}; + class MockDynamicPartitionControlAndroid : public DynamicPartitionControlAndroid { public: From 700d7c1194a0606454f53cef6ad4e457960402ea Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Tue, 23 Jul 2019 20:49:16 -0700 Subject: [PATCH 079/624] [REFACTOR] DynamicPartitionControl: Add GetSuperPartitionName fs_mgr_get_super_partition_name() should be mocked because it is an external dependency to libfs_mgr. In tests, deliberately make GetSuperDevice() to return "fake_super" instead of "super" to make sure it is mocked properly. Test: run unittests Test: manually apply OTA Change-Id: I0f05d99bf168b6e658052b4bd67dc1e82ab36471 --- boot_control_android.cc | 10 +++++----- boot_control_android_unittest.cc | 7 +++++++ dynamic_partition_control_android.cc | 9 +++++++-- dynamic_partition_control_android.h | 1 + dynamic_partition_control_android_unittest.cc | 7 +++++++ dynamic_partition_control_interface.h | 4 ++++ dynamic_partition_test_utils.h | 6 +----- mock_dynamic_partition_control.h | 1 + 8 files changed, 33 insertions(+), 12 deletions(-) diff --git a/boot_control_android.cc b/boot_control_android.cc index 44fc0faa..ce86666c 100644 --- a/boot_control_android.cc +++ b/boot_control_android.cc @@ -24,8 +24,8 @@ #include #include #include -#include #include +#include #include "update_engine/common/utils.h" #include "update_engine/dynamic_partition_control_android.h" @@ -109,7 +109,7 @@ bool BootControlAndroid::IsSuperBlockDevice( Slot slot, const string& partition_name_suffix) const { string source_device = - device_dir.Append(fs_mgr_get_super_partition_name(slot)).value(); + device_dir.Append(dynamic_control_->GetSuperPartitionName(slot)).value(); auto source_metadata = dynamic_control_->LoadMetadataBuilder(source_device, slot); return source_metadata->HasBlockDevice(partition_name_suffix); @@ -122,7 +122,7 @@ BootControlAndroid::GetDynamicPartitionDevice( Slot slot, string* device) const { string super_device = - device_dir.Append(fs_mgr_get_super_partition_name(slot)).value(); + device_dir.Append(dynamic_control_->GetSuperPartitionName(slot)).value(); auto builder = dynamic_control_->LoadMetadataBuilder(super_device, slot); @@ -140,8 +140,8 @@ BootControlAndroid::GetDynamicPartitionDevice( if (IsSuperBlockDevice(device_dir, current_slot, partition_name_suffix)) { LOG(ERROR) << "The static partition " << partition_name_suffix << " is a block device for current metadata (" - << fs_mgr_get_super_partition_name(current_slot) << ", slot " - << BootControlInterface::SlotName(current_slot) + << dynamic_control_->GetSuperPartitionName(current_slot) + << ", slot " << BootControlInterface::SlotName(current_slot) << "). It cannot be used as a logical partition."; return DynamicPartitionDeviceStatus::ERROR; } diff --git a/boot_control_android_unittest.cc b/boot_control_android_unittest.cc index 1a875474..6f02a070 100644 --- a/boot_control_android_unittest.cc +++ b/boot_control_android_unittest.cc @@ -73,6 +73,13 @@ class BootControlAndroidTest : public ::testing::Test { *device = GetDmDevice(partition_name_suffix); return true; })); + + ON_CALL(dynamicControl(), GetSuperPartitionName(_)) + .WillByDefault(Return(kFakeSuper)); + } + + std::string GetSuperDevice(uint32_t slot) { + return GetDevice(dynamicControl().GetSuperPartitionName(slot)); } // Return the mocked HAL module. diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc index 329ddd34..1a1e021f 100644 --- a/dynamic_partition_control_android.cc +++ b/dynamic_partition_control_android.cc @@ -309,7 +309,7 @@ bool DynamicPartitionControlAndroid::PreparePartitionsForUpdate( } base::FilePath device_dir(device_dir_str); auto source_device = - device_dir.Append(fs_mgr_get_super_partition_name(source_slot)).value(); + device_dir.Append(GetSuperPartitionName(source_slot)).value(); auto builder = LoadMetadataBuilder(source_device, source_slot, target_slot); if (builder == nullptr) { @@ -324,10 +324,15 @@ bool DynamicPartitionControlAndroid::PreparePartitionsForUpdate( } auto target_device = - device_dir.Append(fs_mgr_get_super_partition_name(target_slot)).value(); + device_dir.Append(GetSuperPartitionName(target_slot)).value(); return StoreMetadata(target_device, builder.get(), target_slot); } +std::string DynamicPartitionControlAndroid::GetSuperPartitionName( + uint32_t slot) { + return fs_mgr_get_super_partition_name(slot); +} + bool DynamicPartitionControlAndroid::UpdatePartitionMetadata( MetadataBuilder* builder, uint32_t target_slot, diff --git a/dynamic_partition_control_android.h b/dynamic_partition_control_android.h index dc152cc3..062a2d1a 100644 --- a/dynamic_partition_control_android.h +++ b/dynamic_partition_control_android.h @@ -48,6 +48,7 @@ class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { const BootControlInterface::PartitionMetadata& partition_metadata) override; bool GetDeviceDir(std::string* path) override; + std::string GetSuperPartitionName(uint32_t slot) override; protected: // These functions are exposed for testing. diff --git a/dynamic_partition_control_android_unittest.cc b/dynamic_partition_control_android_unittest.cc index 2fa0f16f..5b3dfe31 100644 --- a/dynamic_partition_control_android_unittest.cc +++ b/dynamic_partition_control_android_unittest.cc @@ -50,6 +50,9 @@ class DynamicPartitionControlAndroidTest : public ::testing::Test { *path = kFakeDevicePath; return true; })); + + ON_CALL(dynamicControl(), GetSuperPartitionName(_)) + .WillByDefault(Return(kFakeSuper)); } // Return the mocked DynamicPartitionControlInterface. @@ -57,6 +60,10 @@ class DynamicPartitionControlAndroidTest : public ::testing::Test { return static_cast&>(*module_); } + std::string GetSuperDevice(uint32_t slot) { + return GetDevice(dynamicControl().GetSuperPartitionName(slot)); + } + uint32_t source() { return slots_.source; } uint32_t target() { return slots_.target; } diff --git a/dynamic_partition_control_interface.h b/dynamic_partition_control_interface.h index 32fbbe4a..b3ce4ea6 100644 --- a/dynamic_partition_control_interface.h +++ b/dynamic_partition_control_interface.h @@ -95,6 +95,10 @@ class DynamicPartitionControlInterface { // Return a possible location for devices listed by name. virtual bool GetDeviceDir(std::string* path) = 0; + + // Return the name of the super partition (which stores super partition + // metadata) for a given slot. + virtual std::string GetSuperPartitionName(uint32_t slot) = 0; }; } // namespace chromeos_update_engine diff --git a/dynamic_partition_test_utils.h b/dynamic_partition_test_utils.h index 2cfdff5d..574d30e8 100644 --- a/dynamic_partition_test_utils.h +++ b/dynamic_partition_test_utils.h @@ -48,6 +48,7 @@ constexpr const char* kFakeDevicePath = "/fake/dev/path/"; constexpr const char* kFakeDmDevicePath = "/fake/dm/dev/path/"; constexpr const uint32_t kFakeMetadataSize = 65536; constexpr const char* kDefaultGroup = "foo"; +constexpr const char* kFakeSuper = "fake_super"; // A map describing the size of each partition. // "{name, size}" @@ -121,11 +122,6 @@ inline std::string GetDmDevice(const std::string& name) { return kFakeDmDevicePath + name; } -// TODO(elsk): fs_mgr_get_super_partition_name should be mocked. -inline std::string GetSuperDevice(uint32_t slot) { - return GetDevice(fs_mgr_get_super_partition_name(slot)); -} - // To support legacy tests, auto-convert {name_a: size} map to // PartitionMetadata. inline PartitionMetadata PartitionSuffixSizesToMetadata( diff --git a/mock_dynamic_partition_control.h b/mock_dynamic_partition_control.h index 72eb030f..26fc2469 100644 --- a/mock_dynamic_partition_control.h +++ b/mock_dynamic_partition_control.h @@ -74,6 +74,7 @@ class MockDynamicPartitionControlAndroid uint32_t)); MOCK_METHOD1(GetDeviceDir, bool(std::string*)); MOCK_METHOD0(GetDynamicPartitionsFeatureFlag, FeatureFlag()); + MOCK_METHOD1(GetSuperPartitionName, std::string(uint32_t)); }; } // namespace chromeos_update_engine From 2fdbcae7f09000a7e4ea065567547b18601a2add Mon Sep 17 00:00:00 2001 From: Jae Hoon Kim Date: Thu, 1 Aug 2019 09:48:48 -0700 Subject: [PATCH 080/624] update_engine: Bug fix for UpdateEngineStatus print Exempt-From-Owner-Approval: The auto_updater.py depends on the status of update_engine from the printout of `--status`. It finds the key `CURRENT_OP`, but the CL in chromium:1715978 set that to `CURRENT_OPERAITON`. Is required for `cros flash` to work properly. Revert CURRENT_OPERATION to previous CURRENT_OP. Output now: [0801/095624.227871:INFO:update_engine_client.cc(490)] Querying Update Engine status... CURRENT_OP=UPDATE_STATUS_IDLE IS_INSTALL=false LAST_CHECKED_TIME=0 NEW_SIZE=0 NEW_VERSION=0.0.0.0 PROGRESS=0.0 BUG=chromium:871340 TEST=FEATURES="test" emerge-$B update_engine TEST=update_engine_client --status Change-Id: I23142dab51894adc2aeeb06f0459c74287b1639b --- update_status_utils.cc | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/update_status_utils.cc b/update_status_utils.cc index b56d94a1..a0aa2ed8 100644 --- a/update_status_utils.cc +++ b/update_status_utils.cc @@ -73,8 +73,7 @@ string UpdateEngineStatusToString(const UpdateEngineStatus& status) { key_value_store.SetString("NEW_SIZE", base::NumberToString(status.new_size_bytes)); #endif - key_value_store.SetString("CURRENT_OPERATION", - UpdateStatusToString(status.status)); + key_value_store.SetString("CURRENT_OP", UpdateStatusToString(status.status)); key_value_store.SetString("NEW_VERSION", status.new_version); key_value_store.SetBoolean("IS_INSTALL", status.is_install); From a215b59432218842c99b244107b15d4894944577 Mon Sep 17 00:00:00 2001 From: Tianjie Xu Date: Fri, 2 Aug 2019 14:53:38 -0700 Subject: [PATCH 081/624] Report metrics to statsd from update engine Call the proper logging functions after an update attempt or a successful update. This is part of the effort for the new metrics mechanism migration. Bug: 137682371 Test: run statsd_testdrive and check events Change-Id: I1174ff37d049172a8a6b14d47aa40c54f26be183 --- Android.bp | 1 + metrics_reporter_android.cc | 35 +++++++++++++++++++++++++++++++++++ 2 files changed, 36 insertions(+) diff --git a/Android.bp b/Android.bp index a13a92b7..a346a2a6 100644 --- a/Android.bp +++ b/Android.bp @@ -246,6 +246,7 @@ cc_defaults { "liblog", "libmetricslogger", "libssl", + "libstatslog", "libutils", ], } diff --git a/metrics_reporter_android.cc b/metrics_reporter_android.cc index 9165f0d5..4165c143 100644 --- a/metrics_reporter_android.cc +++ b/metrics_reporter_android.cc @@ -16,10 +16,14 @@ #include "update_engine/metrics_reporter_android.h" +#include + #include #include +#include #include +#include #include "update_engine/common/constants.h" @@ -28,6 +32,16 @@ void LogHistogram(const std::string& metrics, int value) { android::metricslogger::LogHistogram(metrics, value); LOG(INFO) << "uploading " << value << " to histogram for metric " << metrics; } + +// A number offset adds on top of the enum value. e.g. ErrorCode::SUCCESS will +// be reported as 10000, and AttemptResult::UPDATE_CANCELED will be reported as +// 10011. The keeps the ordering of update engine's enum definition when statsd +// atoms reserve the value 0 for unknown state. +constexpr auto kMetricsReporterEnumOffset = 10000; + +int32_t GetStatsdEnumValue(int32_t value) { + return kMetricsReporterEnumOffset + value; +} } // namespace namespace chromeos_update_engine { @@ -100,6 +114,17 @@ void MetricsReporterAndroid::ReportUpdateAttemptMetrics( static_cast(attempt_result)); LogHistogram(metrics::kMetricsUpdateEngineAttemptErrorCode, static_cast(error_code)); + + android::util::stats_write( + android::util::UPDATE_ENGINE_UPDATE_ATTEMPT_REPORTED, + attempt_number, + GetStatsdEnumValue(static_cast(payload_type)), + duration.InMinutes(), + duration_uptime.InMinutes(), + payload_size_mib, + GetStatsdEnumValue(static_cast(attempt_result)), + GetStatsdEnumValue(static_cast(error_code)), + android::base::GetProperty("ro.build.fingerprint", "").c_str()); } void MetricsReporterAndroid::ReportUpdateAttemptDownloadMetrics( @@ -148,6 +173,16 @@ void MetricsReporterAndroid::ReportSuccessfulUpdateMetrics( total_duration.InMinutes()); LogHistogram(metrics::kMetricsUpdateEngineSuccessfulUpdateRebootCount, reboot_count); + + android::util::stats_write( + android::util::UPDATE_ENGINE_SUCCESSFUL_UPDATE_REPORTED, + attempt_count, + GetStatsdEnumValue(static_cast(payload_type)), + payload_size_mib, + total_bytes_downloaded, + download_overhead_percentage, + total_duration.InMinutes(), + reboot_count); } void MetricsReporterAndroid::ReportAbnormallyTerminatedUpdateAttemptMetrics() { From 916af851d46c3a546993ad639573fdb3c1c05ac9 Mon Sep 17 00:00:00 2001 From: Jae Hoon Kim Date: Thu, 1 Aug 2019 17:45:30 -0700 Subject: [PATCH 082/624] update_engine: Test update_engine printouts These tests are added to enforce sensitive variables stay invariant with no room or future mistakes to occur again on breaking autotest and cros flash process. BUG=chromium:871340 TEST=FEATURES="test" emerge-$BOARD update_engine update_engine-client TEST=/usr/bin/update_engine_client --status TEST=cros flash $TEST_IP ../build/image/... # works Change-Id: Ibcce5c1dee56cf5bca201a86a143a87b033605bc Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/1732410 Tested-by: Jae Hoon Kim Auto-Submit: Jae Hoon Kim Reviewed-by: Amin Hassani Commit-Queue: Jae Hoon Kim --- Android.bp | 1 + BUILD.gn | 1 + update_status_utils.cc | 31 ++++++++++++----- update_status_utils_unittest.cc | 59 +++++++++++++++++++++++++++++++++ 4 files changed, 83 insertions(+), 9 deletions(-) create mode 100644 update_status_utils_unittest.cc diff --git a/Android.bp b/Android.bp index e9b7b138..2e215c5a 100644 --- a/Android.bp +++ b/Android.bp @@ -661,6 +661,7 @@ cc_test { "payload_generator/zip_unittest.cc", "testrunner.cc", "update_attempter_android_unittest.cc", + "update_status_utils_unittest.cc", ], } diff --git a/BUILD.gn b/BUILD.gn index 5e76bfbf..5f5aa545 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -537,6 +537,7 @@ if (use.test) { "update_manager/update_time_restrictions_policy_impl_unittest.cc", "update_manager/variable_unittest.cc", "update_manager/weekly_time_unittest.cc", + "update_status_utils_unittest.cc", ] # //common-mk:test should be on the top. diff --git a/update_status_utils.cc b/update_status_utils.cc index a0aa2ed8..07583148 100644 --- a/update_status_utils.cc +++ b/update_status_utils.cc @@ -27,6 +27,19 @@ using update_engine::UpdateStatus; namespace chromeos_update_engine { +namespace { + +// Note: Do not change these, autotest depends on these string variables being +// exactly these matches. +const char kCurrentOp[] = "CURRENT_OP"; +const char kIsInstall[] = "IS_INSTALL"; +const char kLastCheckedTime[] = "LAST_CHECKED_TIME"; +const char kNewSize[] = "NEW_SIZE"; +const char kNewVersion[] = "NEW_VERSION"; +const char kProgress[] = "PROGRESS"; + +} // namespace + const char* UpdateStatusToString(const UpdateStatus& status) { switch (status) { case UpdateStatus::IDLE: @@ -61,21 +74,21 @@ string UpdateEngineStatusToString(const UpdateEngineStatus& status) { KeyValueStore key_value_store; #if BASE_VER < 576279 - key_value_store.SetString("LAST_CHECKED_TIME", + key_value_store.SetString(kLastCheckedTime, base::Int64ToString(status.last_checked_time)); - key_value_store.SetString("PROGRESS", base::DoubleToString(status.progress)); - key_value_store.SetString("NEW_SIZE", + key_value_store.SetString(kProgress, base::DoubleToString(status.progress)); + key_value_store.SetString(kNewSize, base::Uint64ToString(status.new_size_bytes)); #else - key_value_store.SetString("LAST_CHECKED_TIME", + key_value_store.SetString(kLastCheckedTime, base::NumberToString(status.last_checked_time)); - key_value_store.SetString("PROGRESS", base::NumberToString(status.progress)); - key_value_store.SetString("NEW_SIZE", + key_value_store.SetString(kProgress, base::NumberToString(status.progress)); + key_value_store.SetString(kNewSize, base::NumberToString(status.new_size_bytes)); #endif - key_value_store.SetString("CURRENT_OP", UpdateStatusToString(status.status)); - key_value_store.SetString("NEW_VERSION", status.new_version); - key_value_store.SetBoolean("IS_INSTALL", status.is_install); + key_value_store.SetString(kCurrentOp, UpdateStatusToString(status.status)); + key_value_store.SetString(kNewVersion, status.new_version); + key_value_store.SetBoolean(kIsInstall, status.is_install); return key_value_store.SaveToString(); } diff --git a/update_status_utils_unittest.cc b/update_status_utils_unittest.cc new file mode 100644 index 00000000..dbd80d7a --- /dev/null +++ b/update_status_utils_unittest.cc @@ -0,0 +1,59 @@ +// +// Copyright (C) 2019 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include "update_engine/update_status_utils.h" + +#include + +#include + +using std::string; + +namespace chromeos_update_engine { + +TEST(UpdateStatusUtilsTest, UpdateEngineStatusToStringDefaultTest) { + string print = + R"(CURRENT_OP=UPDATE_STATUS_IDLE +IS_INSTALL=false +LAST_CHECKED_TIME=0 +NEW_SIZE=0 +NEW_VERSION= +PROGRESS=0.0 +)"; + EXPECT_EQ(print, UpdateEngineStatusToString({})); +} + +TEST(UpdateStatusUtilsTest, UpdateEngineStatusToStringTest) { + update_engine::UpdateEngineStatus update_engine_status = { + .status = update_engine::UpdateStatus::CHECKING_FOR_UPDATE, + .is_install = true, + .last_checked_time = 156000000, + .new_size_bytes = 888, + .new_version = "12345.0.0", + .progress = 0.5, + }; + string print = + R"(CURRENT_OP=UPDATE_STATUS_CHECKING_FOR_UPDATE +IS_INSTALL=true +LAST_CHECKED_TIME=156000000 +NEW_SIZE=888 +NEW_VERSION=12345.0.0 +PROGRESS=0.5 +)"; + EXPECT_EQ(print, UpdateEngineStatusToString(update_engine_status)); +} + +} // namespace chromeos_update_engine From 30fa5f523f850cf425329642fcdfedbb2c47758f Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Mon, 5 Aug 2019 16:39:59 -0700 Subject: [PATCH 083/624] fix DynamicPartitionControlAndroid::LoadMetadataBuilder If target_slot is kInvalidSlot, call MetadataBuilder::New instead of NewForUpdate. Test: apply OTA on retrofit DAP devices Fixes: 138914539 Change-Id: I5642c051ad4d80b814617ea91fcd2aa060f9a498 --- dynamic_partition_control_android.cc | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc index 1a1e021f..d9d3be56 100644 --- a/dynamic_partition_control_android.cc +++ b/dynamic_partition_control_android.cc @@ -204,8 +204,14 @@ DynamicPartitionControlAndroid::LoadMetadataBuilder( const std::string& super_device, uint32_t source_slot, uint32_t target_slot) { - auto builder = MetadataBuilder::NewForUpdate( - PartitionOpener(), super_device, source_slot, target_slot); + std::unique_ptr builder; + if (target_slot == BootControlInterface::kInvalidSlot) { + builder = + MetadataBuilder::New(PartitionOpener(), super_device, source_slot); + } else { + builder = MetadataBuilder::NewForUpdate( + PartitionOpener(), super_device, source_slot, target_slot); + } if (builder == nullptr) { LOG(WARNING) << "No metadata slot " From 14715ced5bbe551e06009afdd3712f84dc5b1818 Mon Sep 17 00:00:00 2001 From: Tianjie Xu Date: Tue, 6 Aug 2019 17:24:43 -0700 Subject: [PATCH 084/624] Stop using unzip -p when extracting images brillo_update_payload uses `unzip -p` to extract images to a named temp file. But the command doesn't generate an error when the disk is full. This cl creates a helper function and switches to use `unzip -d` to extract the image; and then move the extracted image to the intended name. Bug: 138725843 Test: generate and verify a payload, check the extract error when disk is full Change-Id: I71c2d07de3c1c826f2e07fcc6497437c4051944f --- scripts/brillo_update_payload | 44 +++++++++++++++++++++++++++-------- 1 file changed, 34 insertions(+), 10 deletions(-) diff --git a/scripts/brillo_update_payload b/scripts/brillo_update_payload index f535185b..a238ddf5 100755 --- a/scripts/brillo_update_payload +++ b/scripts/brillo_update_payload @@ -327,6 +327,25 @@ cleanup_on_exit() { trap cleanup_on_error INT TERM ERR trap cleanup_on_exit EXIT +# extract_file +# +# Extracts |entry_name| from |zip_file| to |destination|. +extract_file() { + local zip_file="$1" + local entry_name="$2" + local destination="$3" + + # unzip -p won't report error upon ENOSPC. Therefore, create a temp directory + # as the destination of the unzip, and move the file to the intended + # destination. + local output_directory=$( + mktemp --directory --tmpdir="${FLAGS_work_dir}" "TEMP.XXXXXX") + unzip "${zip_file}" "${entry_name}" -d "${output_directory}" || + { rm -rf "${output_directory}"; die "Failed to extract ${entry_name}"; } + + mv "${output_directory}/${entry_name}" "${destination}" + rm -rf "${output_directory}" +} # extract_image [partitions_order] # @@ -417,7 +436,7 @@ extract_partition_brillo() { fi done [[ -n "${path_in_zip}" ]] || die "Failed to find ${part}.img" - unzip -p "${image}" "${path_in_zip}/${part}.img" >"${part_file}" + extract_file "${image}" "${path_in_zip}/${part}.img" "${part_file}" # If the partition is stored as an Android sparse image file, we need to # convert them to a raw image for the update. @@ -431,8 +450,9 @@ extract_partition_brillo() { fi # Extract the .map file (if one is available). - unzip -p "${image}" "${path_in_zip}/${part}.map" >"${part_map_file}" \ - 2>/dev/null || true + if unzip -l "${image}" "${path_in_zip}/${part}.map" > /dev/null; then + extract_file "${image}" "${path_in_zip}/${part}.map" "${part_map_file}" + fi # delta_generator only supports images multiple of 4 KiB. For target images # we pad the data with zeros if needed, but for source images we truncate @@ -466,7 +486,8 @@ extract_image_brillo() { local ab_partitions_list ab_partitions_list=$(create_tempfile "ab_partitions_list.XXXXXX") CLEANUP_FILES+=("${ab_partitions_list}") - if unzip -p "${image}" "META/ab_partitions.txt" >"${ab_partitions_list}"; then + if unzip -l "${image}" "META/ab_partitions.txt" > /dev/null; then + extract_file "${image}" "META/ab_partitions.txt" "${ab_partitions_list}" if grep -v -E '^[a-zA-Z0-9_-]*$' "${ab_partitions_list}" >&2; then die "Invalid partition names found in the partition list." fi @@ -491,8 +512,9 @@ extract_image_brillo() { # Source image local ue_config=$(create_tempfile "ue_config.XXXXXX") CLEANUP_FILES+=("${ue_config}") - if ! unzip -p "${image}" "META/update_engine_config.txt" \ - >"${ue_config}"; then + if unzip -l "${image}" "META/update_engine_config.txt" > /dev/null; then + extract_file "${image}" "META/update_engine_config.txt" "${ue_config}" + else warn "No update_engine_config.txt found. Assuming pre-release image, \ using payload minor version 2" fi @@ -513,14 +535,16 @@ Disabling deltas for this source version." # Target image local postinstall_config=$(create_tempfile "postinstall_config.XXXXXX") CLEANUP_FILES+=("${postinstall_config}") - if unzip -p "${image}" "META/postinstall_config.txt" \ - >"${postinstall_config}"; then + if unzip -l "${image}" "META/postinstall_config.txt" > /dev/null; then + extract_file "${image}" "META/postinstall_config.txt" \ + "${postinstall_config}" POSTINSTALL_CONFIG_FILE="${postinstall_config}" fi local dynamic_partitions_info=$(create_tempfile "dynamic_partitions_info.XXXXXX") CLEANUP_FILES+=("${dynamic_partitions_info}") - if unzip -p "${image}" "META/dynamic_partitions_info.txt" \ - >"${dynamic_partitions_info}"; then + if unzip -l "${image}" "META/dynamic_partitions_info.txt" > /dev/null; then + extract_file "${image}" "META/dynamic_partitions_info.txt" \ + "${dynamic_partitions_info}" DYNAMIC_PARTITION_INFO_FILE="${dynamic_partitions_info}" fi fi From a4fa660c3a73fa9693be1613ee9947a4bffcc3b0 Mon Sep 17 00:00:00 2001 From: Xiaochu Liu Date: Mon, 5 Aug 2019 17:06:35 +0000 Subject: [PATCH 085/624] Revert "update_engine: trigger a crash on unrecoverable condition" This reverts commit 1329fd880962c9441d4ca462e8cda3fbc29049c9. Reason for revert: https://crbug.com/962730#c18 Original change's description: > update_engine: trigger a crash on unrecoverable condition > > CURLM_INTERNAL_ERROR and CURLM_OUT_OF_MEMORY are two libcurl error codes > that caller (update_engine) has no way to recover on its own. Reference: https://curl.haxx.se/libcurl/c/libcurl-errors.html > > Since those error conditions aren't recoverable and might be responsible > for the failures to update observed in crbug.com/927039, we exit and let the > system respawn update_engine to start from a fresh state and recover. > > BUG=chromium:962730,chromium:927039 > TEST=unittest > > Change-Id: I55946e58e518da5bc5cb0c23690430c6298b8582 > Reviewed-on: https://chromium-review.googlesource.com/1616425 > Commit-Ready: ChromeOS CL Exonerator Bot > Tested-by: Xiaochu Liu > Legacy-Commit-Queue: Commit Bot > Reviewed-by: Amin Hassani TBR=senj@chromium.org,norvez@chromium.org,ahassani@chromium.org,xiaochu@chromium.org,chromiumos-cl-exonerator@appspot.gserviceaccount.com # Not skipping CQ checks because original CL landed > 1 day ago. Bug: chromium:962730, chromium:927039 Change-Id: I3d10173cdacfcd1edf22b4515dce6091e9d3b258 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/1737130 Reviewed-by: Xiaochu Liu Reviewed-by: Amin Hassani Tested-by: Xiaochu Liu Commit-Queue: Xiaochu Liu --- libcurl_http_fetcher.cc | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/libcurl_http_fetcher.cc b/libcurl_http_fetcher.cc index 6b30eeba..ad823cf2 100644 --- a/libcurl_http_fetcher.cc +++ b/libcurl_http_fetcher.cc @@ -442,11 +442,7 @@ void LibcurlHttpFetcher::CurlPerformOnce() { metrics::CheckResult::kUnset, metrics::CheckReaction::kUnset, metrics::DownloadErrorCode::kInternalError); - // According to https://curl.haxx.se/libcurl/c/libcurl-errors.html: - // CURLM_INTERNAL_ERROR and CURLM_OUT_OF_MEMORY are two libcurl error codes - // that caller has no way to recover on its own. Thus, we exit and let the - // system respawn update_engine to start from a fresh state and recover. - LOG(FATAL) << "curl_multi_perform is in an unrecoverable error condition: " + LOG(ERROR) << "curl_multi_perform is in an unrecoverable error condition: " << retcode; } else if (retcode != CURLM_OK) { LOG(ERROR) << "curl_multi_perform returns error: " << retcode; From bfa822665191388523149ee20a237135fd26f63f Mon Sep 17 00:00:00 2001 From: Alex Khouderchah Date: Tue, 13 Aug 2019 15:00:34 -0700 Subject: [PATCH 086/624] update_engine: Remove references to bluetooth and wimax Shill does not support bluetooth (kTypeBluetooth is left over from the flimflam API) and has stopped supporting Wimax. BUG=chromium:954635 TEST=`FEATURES="test" emerge-$BOARD update_engine update_engine-client` Change-Id: I3e7d4f0b0a7625067585b6f9fdeec196b87f7026 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/1752329 Reviewed-by: Amin Hassani Tested-by: Alex Khouderchah Commit-Queue: Alex Khouderchah --- connection_manager.cc | 103 ++++++++---------- connection_manager_unittest.cc | 27 +---- connection_utils.cc | 8 -- connection_utils.h | 2 - metrics_constants.h | 4 +- metrics_utils.cc | 6 - metrics_utils_unittest.cc | 6 - update_manager/boxed_value_unittest.cc | 9 +- update_manager/chromeos_policy.cc | 6 +- update_manager/chromeos_policy_unittest.cc | 41 ------- .../real_device_policy_provider_unittest.cc | 8 +- .../real_shill_provider_unittest.cc | 17 --- 12 files changed, 57 insertions(+), 180 deletions(-) diff --git a/connection_manager.cc b/connection_manager.cc index 7263a742..ad7e5f65 100644 --- a/connection_manager.cc +++ b/connection_manager.cc @@ -54,66 +54,57 @@ ConnectionManager::ConnectionManager(ShillProxyInterface* shill_proxy, bool ConnectionManager::IsUpdateAllowedOver( ConnectionType type, ConnectionTethering tethering) const { - switch (type) { - case ConnectionType::kBluetooth: - return false; - - case ConnectionType::kCellular: { - set allowed_types; - - const policy::DevicePolicy* device_policy = - system_state_->device_policy(); - - // The device_policy is loaded in a lazy way before an update check. Load - // it now from the libbrillo cache if it wasn't already loaded. - if (!device_policy) { - UpdateAttempter* update_attempter = system_state_->update_attempter(); - if (update_attempter) { - update_attempter->RefreshDevicePolicy(); - device_policy = system_state_->device_policy(); - } - } - - if (!device_policy) { - // Device policy fails to be loaded (possibly due to guest account). We - // do not check the local user setting here, which should be checked by - // |OmahaRequestAction| during checking for update. - LOG(INFO) << "Allowing updates over cellular as device policy " - "fails to be loaded."; - return true; - } - - if (device_policy->GetAllowedConnectionTypesForUpdate(&allowed_types)) { - // The update setting is enforced by the device policy. - - if (!base::ContainsKey(allowed_types, shill::kTypeCellular)) { - LOG(INFO) << "Disabling updates over cellular connection as it's not " - "allowed in the device policy."; - return false; - } - - LOG(INFO) << "Allowing updates over cellular per device policy."; - return true; - } - - // If there's no update setting in the device policy, we do not check - // the local user setting here, which should be checked by - // |OmahaRequestAction| during checking for update. - LOG(INFO) << "Allowing updates over cellular as device policy does " - "not include update setting."; + if (type != ConnectionType::kCellular) { + if (tethering != ConnectionTethering::kConfirmed) { return true; } - default: - if (tethering == ConnectionTethering::kConfirmed) { - // Treat this connection as if it is a cellular connection. - LOG(INFO) << "Current connection is confirmed tethered, using Cellular " - "setting."; - return IsUpdateAllowedOver(ConnectionType::kCellular, - ConnectionTethering::kUnknown); - } - return true; + // Treat this connection as if it is a cellular connection. + LOG(INFO) + << "Current connection is confirmed tethered, using Cellular setting."; + } + + const policy::DevicePolicy* device_policy = system_state_->device_policy(); + + // The device_policy is loaded in a lazy way before an update check. Load + // it now from the libbrillo cache if it wasn't already loaded. + if (!device_policy) { + UpdateAttempter* update_attempter = system_state_->update_attempter(); + if (update_attempter) { + update_attempter->RefreshDevicePolicy(); + device_policy = system_state_->device_policy(); + } } + + if (!device_policy) { + // Device policy fails to be loaded (possibly due to guest account). We + // do not check the local user setting here, which should be checked by + // |OmahaRequestAction| during checking for update. + LOG(INFO) << "Allowing updates over cellular as device policy fails to be " + "loaded."; + return true; + } + + set allowed_types; + if (device_policy->GetAllowedConnectionTypesForUpdate(&allowed_types)) { + // The update setting is enforced by the device policy. + + if (!base::ContainsKey(allowed_types, shill::kTypeCellular)) { + LOG(INFO) << "Disabling updates over cellular connection as it's not " + "allowed in the device policy."; + return false; + } + + LOG(INFO) << "Allowing updates over cellular per device policy."; + return true; + } + + // If there's no update setting in the device policy, we do not check + // the local user setting here, which should be checked by + // |OmahaRequestAction| during checking for update. + LOG(INFO) << "Allowing updates over cellular as device policy does " + "not include update setting."; + return true; } bool ConnectionManager::IsAllowedConnectionTypesForUpdateSet() const { diff --git a/connection_manager_unittest.cc b/connection_manager_unittest.cc index 3cdaf4cd..97436c9c 100644 --- a/connection_manager_unittest.cc +++ b/connection_manager_unittest.cc @@ -184,9 +184,6 @@ void ConnectionManagerTest::TestWithServiceDisconnected( TEST_F(ConnectionManagerTest, SimpleTest) { TestWithServiceType(shill::kTypeEthernet, nullptr, ConnectionType::kEthernet); TestWithServiceType(shill::kTypeWifi, nullptr, ConnectionType::kWifi); - TestWithServiceType(shill::kTypeWimax, nullptr, ConnectionType::kWimax); - TestWithServiceType( - shill::kTypeBluetooth, nullptr, ConnectionType::kBluetooth); TestWithServiceType(shill::kTypeCellular, nullptr, ConnectionType::kCellular); } @@ -195,8 +192,6 @@ TEST_F(ConnectionManagerTest, PhysicalTechnologyTest) { TestWithServiceType( shill::kTypeVPN, shill::kTypeVPN, ConnectionType::kUnknown); TestWithServiceType(shill::kTypeVPN, shill::kTypeWifi, ConnectionType::kWifi); - TestWithServiceType( - shill::kTypeVPN, shill::kTypeWimax, ConnectionType::kWimax); } TEST_F(ConnectionManagerTest, TetheringTest) { @@ -229,16 +224,6 @@ TEST_F(ConnectionManagerTest, AllowUpdatesOverWifiTest) { ConnectionTethering::kUnknown)); } -TEST_F(ConnectionManagerTest, AllowUpdatesOverWimaxTest) { - EXPECT_TRUE(cmut_.IsUpdateAllowedOver(ConnectionType::kWimax, - ConnectionTethering::kUnknown)); -} - -TEST_F(ConnectionManagerTest, BlockUpdatesOverBluetoothTest) { - EXPECT_FALSE(cmut_.IsUpdateAllowedOver(ConnectionType::kBluetooth, - ConnectionTethering::kUnknown)); -} - TEST_F(ConnectionManagerTest, AllowUpdatesOnlyOver3GPerPolicyTest) { policy::MockDevicePolicy allow_3g_policy; @@ -263,10 +248,9 @@ TEST_F(ConnectionManagerTest, AllowUpdatesOver3GAndOtherTypesPerPolicyTest) { // This test tests multiple connection types being allowed, with // 3G one among them. Only Cellular is currently enforced by the policy - // setting, the others are ignored (see Bluetooth for example). + // setting. set allowed_set; allowed_set.insert(StringForConnectionType(ConnectionType::kCellular)); - allowed_set.insert(StringForConnectionType(ConnectionType::kBluetooth)); EXPECT_CALL(allow_3g_policy, GetAllowedConnectionTypesForUpdate(_)) .Times(3) @@ -280,10 +264,6 @@ TEST_F(ConnectionManagerTest, AllowUpdatesOver3GAndOtherTypesPerPolicyTest) { ConnectionTethering::kUnknown)); EXPECT_TRUE(cmut_.IsUpdateAllowedOver(ConnectionType::kWifi, ConnectionTethering::kUnknown)); - EXPECT_TRUE(cmut_.IsUpdateAllowedOver(ConnectionType::kWimax, - ConnectionTethering::kUnknown)); - EXPECT_FALSE(cmut_.IsUpdateAllowedOver(ConnectionType::kBluetooth, - ConnectionTethering::kUnknown)); // Tethered networks are treated in the same way as Cellular networks and // thus allowed. @@ -325,7 +305,6 @@ TEST_F(ConnectionManagerTest, BlockUpdatesOver3GPerPolicyTest) { set allowed_set; allowed_set.insert(StringForConnectionType(ConnectionType::kEthernet)); allowed_set.insert(StringForConnectionType(ConnectionType::kWifi)); - allowed_set.insert(StringForConnectionType(ConnectionType::kWimax)); EXPECT_CALL(block_3g_policy, GetAllowedConnectionTypesForUpdate(_)) .Times(1) @@ -363,10 +342,6 @@ TEST_F(ConnectionManagerTest, StringForConnectionTypeTest) { StringForConnectionType(ConnectionType::kEthernet)); EXPECT_STREQ(shill::kTypeWifi, StringForConnectionType(ConnectionType::kWifi)); - EXPECT_STREQ(shill::kTypeWimax, - StringForConnectionType(ConnectionType::kWimax)); - EXPECT_STREQ(shill::kTypeBluetooth, - StringForConnectionType(ConnectionType::kBluetooth)); EXPECT_STREQ(shill::kTypeCellular, StringForConnectionType(ConnectionType::kCellular)); EXPECT_STREQ("Unknown", StringForConnectionType(ConnectionType::kUnknown)); diff --git a/connection_utils.cc b/connection_utils.cc index aeb01634..5af7341f 100644 --- a/connection_utils.cc +++ b/connection_utils.cc @@ -32,10 +32,6 @@ ConnectionType ParseConnectionType(const std::string& type_str) { return ConnectionType::kEthernet; } else if (type_str == shill::kTypeWifi) { return ConnectionType::kWifi; - } else if (type_str == shill::kTypeWimax) { - return ConnectionType::kWimax; - } else if (type_str == shill::kTypeBluetooth) { - return ConnectionType::kBluetooth; } else if (type_str == shill::kTypeCellular) { return ConnectionType::kCellular; } else if (type_str == kTypeDisconnected) { @@ -61,10 +57,6 @@ const char* StringForConnectionType(ConnectionType type) { return shill::kTypeEthernet; case ConnectionType::kWifi: return shill::kTypeWifi; - case ConnectionType::kWimax: - return shill::kTypeWimax; - case ConnectionType::kBluetooth: - return shill::kTypeBluetooth; case ConnectionType::kCellular: return shill::kTypeCellular; case ConnectionType::kDisconnected: diff --git a/connection_utils.h b/connection_utils.h index d5133a14..4e71fcf7 100644 --- a/connection_utils.h +++ b/connection_utils.h @@ -25,8 +25,6 @@ enum class ConnectionType { kDisconnected, kEthernet, kWifi, - kWimax, - kBluetooth, kCellular, kUnknown }; diff --git a/metrics_constants.h b/metrics_constants.h index 161d5856..167e577c 100644 --- a/metrics_constants.h +++ b/metrics_constants.h @@ -119,12 +119,12 @@ enum class ConnectionType { kUnknown = 0, // Unknown. kEthernet = 1, // Ethernet. kWifi = 2, // Wireless. - kWimax = 3, // WiMax. - kBluetooth = 4, // Bluetooth. kCellular = 5, // Cellular. kTetheredEthernet = 6, // Tethered (Ethernet). kTetheredWifi = 7, // Tethered (Wifi). kDisconnected = 8, // Disconnected. + // deprecated: kWimax = 3, + // deprecated: kBluetooth = 4, kNumConstants, kUnset = -1 diff --git a/metrics_utils.cc b/metrics_utils.cc index 070626a4..88c8d524 100644 --- a/metrics_utils.cc +++ b/metrics_utils.cc @@ -266,12 +266,6 @@ metrics::ConnectionType GetConnectionType(ConnectionType type, else return metrics::ConnectionType::kWifi; - case ConnectionType::kWimax: - return metrics::ConnectionType::kWimax; - - case ConnectionType::kBluetooth: - return metrics::ConnectionType::kBluetooth; - case ConnectionType::kCellular: return metrics::ConnectionType::kCellular; } diff --git a/metrics_utils_unittest.cc b/metrics_utils_unittest.cc index e7c4c26b..6ea996fa 100644 --- a/metrics_utils_unittest.cc +++ b/metrics_utils_unittest.cc @@ -41,12 +41,6 @@ TEST(MetricsUtilsTest, GetConnectionType) { EXPECT_EQ( metrics::ConnectionType::kWifi, GetConnectionType(ConnectionType::kWifi, ConnectionTethering::kUnknown)); - EXPECT_EQ( - metrics::ConnectionType::kWimax, - GetConnectionType(ConnectionType::kWimax, ConnectionTethering::kUnknown)); - EXPECT_EQ(metrics::ConnectionType::kBluetooth, - GetConnectionType(ConnectionType::kBluetooth, - ConnectionTethering::kUnknown)); EXPECT_EQ(metrics::ConnectionType::kCellular, GetConnectionType(ConnectionType::kCellular, ConnectionTethering::kUnknown)); diff --git a/update_manager/boxed_value_unittest.cc b/update_manager/boxed_value_unittest.cc index f98b6b65..5b87a7b6 100644 --- a/update_manager/boxed_value_unittest.cc +++ b/update_manager/boxed_value_unittest.cc @@ -168,11 +168,6 @@ TEST(UmBoxedValueTest, ConnectionTypeToString) { BoxedValue(new ConnectionType(ConnectionType::kEthernet)).ToString()); EXPECT_EQ("wifi", BoxedValue(new ConnectionType(ConnectionType::kWifi)).ToString()); - EXPECT_EQ("wimax", - BoxedValue(new ConnectionType(ConnectionType::kWimax)).ToString()); - EXPECT_EQ( - "bluetooth", - BoxedValue(new ConnectionType(ConnectionType::kBluetooth)).ToString()); EXPECT_EQ( "cellular", BoxedValue(new ConnectionType(ConnectionType::kCellular)).ToString()); @@ -219,9 +214,9 @@ TEST(UmBoxedValueTest, RollbackToTargetVersionToString) { TEST(UmBoxedValueTest, SetConnectionTypeToString) { set* set1 = new set; - set1->insert(ConnectionType::kWimax); + set1->insert(ConnectionType::kCellular); set1->insert(ConnectionType::kEthernet); - EXPECT_EQ("ethernet,wimax", BoxedValue(set1).ToString()); + EXPECT_EQ("ethernet,cellular", BoxedValue(set1).ToString()); set* set2 = new set; set2->insert(ConnectionType::kWifi); diff --git a/update_manager/chromeos_policy.cc b/update_manager/chromeos_policy.cc index 08c355ea..12d443d9 100644 --- a/update_manager/chromeos_policy.cc +++ b/update_manager/chromeos_policy.cc @@ -457,7 +457,7 @@ EvalStatus ChromeOSPolicy::UpdateCanStart( // TODO(garnold) The current logic generally treats the list of allowed // connections coming from the device policy as a whitelist, meaning that it // can only be used for enabling connections, but not disable them. Further, -// certain connection types (like Bluetooth) cannot be enabled even by policy. +// certain connection types cannot be enabled even by policy. // In effect, the only thing that device policy can change is to enable // updates over a cellular network (disabled by default). We may want to // revisit this semantics, allowing greater flexibility in defining specific @@ -488,10 +488,6 @@ EvalStatus ChromeOSPolicy::UpdateDownloadAllowed(EvaluationContext* ec, *result = true; bool device_policy_can_override = false; switch (conn_type) { - case ConnectionType::kBluetooth: - *result = false; - break; - case ConnectionType::kCellular: *result = false; device_policy_can_override = true; diff --git a/update_manager/chromeos_policy_unittest.cc b/update_manager/chromeos_policy_unittest.cc index 25c91fa2..414ac0d0 100644 --- a/update_manager/chromeos_policy_unittest.cc +++ b/update_manager/chromeos_policy_unittest.cc @@ -1440,47 +1440,6 @@ TEST_F(UmChromeOSPolicyTest, UpdateDownloadAllowedWifiTetheredPolicyOverride) { EXPECT_TRUE(result); } -TEST_F(UmChromeOSPolicyTest, UpdateDownloadAllowedWimaxDefault) { - // Wimax is always allowed. - - fake_state_.shill_provider()->var_conn_type()->reset( - new ConnectionType(ConnectionType::kWifi)); - - bool result; - ExpectPolicyStatus( - EvalStatus::kSucceeded, &Policy::UpdateDownloadAllowed, &result); - EXPECT_TRUE(result); -} - -TEST_F(UmChromeOSPolicyTest, - UpdateCurrentConnectionNotAllowedBluetoothDefault) { - // Bluetooth is never allowed. - - fake_state_.shill_provider()->var_conn_type()->reset( - new ConnectionType(ConnectionType::kBluetooth)); - - bool result; - ExpectPolicyStatus( - EvalStatus::kAskMeAgainLater, &Policy::UpdateDownloadAllowed, &result); -} - -TEST_F(UmChromeOSPolicyTest, - UpdateCurrentConnectionNotAllowedBluetoothPolicyCannotOverride) { - // Bluetooth cannot be allowed even by policy. - - fake_state_.shill_provider()->var_conn_type()->reset( - new ConnectionType(ConnectionType::kBluetooth)); - set allowed_connections; - allowed_connections.insert(ConnectionType::kBluetooth); - fake_state_.device_policy_provider() - ->var_allowed_connection_types_for_update() - ->reset(new set(allowed_connections)); - - bool result; - ExpectPolicyStatus( - EvalStatus::kAskMeAgainLater, &Policy::UpdateDownloadAllowed, &result); -} - TEST_F(UmChromeOSPolicyTest, UpdateCurrentConnectionNotAllowedCellularDefault) { // Cellular is not allowed by default. diff --git a/update_manager/real_device_policy_provider_unittest.cc b/update_manager/real_device_policy_provider_unittest.cc index 8f2c377b..84debd11 100644 --- a/update_manager/real_device_policy_provider_unittest.cc +++ b/update_manager/real_device_policy_provider_unittest.cc @@ -344,14 +344,14 @@ TEST_F(UmRealDevicePolicyProviderTest, AllowedTypesConverted) { #else .Times(1) #endif // USE_DBUS - .WillRepeatedly(DoAll( - SetArgPointee<0>(set{"bluetooth", "wifi", "not-a-type"}), - Return(true))); + .WillRepeatedly( + DoAll(SetArgPointee<0>(set{"ethernet", "wifi", "not-a-type"}), + Return(true))); EXPECT_TRUE(provider_->Init()); loop_.RunOnce(false); UmTestUtils::ExpectVariableHasValue( - set{ConnectionType::kWifi, ConnectionType::kBluetooth}, + set{ConnectionType::kWifi, ConnectionType::kEthernet}, provider_->var_allowed_connection_types_for_update()); } diff --git a/update_manager/real_shill_provider_unittest.cc b/update_manager/real_shill_provider_unittest.cc index dcc729ab..505f2f80 100644 --- a/update_manager/real_shill_provider_unittest.cc +++ b/update_manager/real_shill_provider_unittest.cc @@ -51,8 +51,6 @@ namespace { // Fake service paths. const char* const kFakeEthernetServicePath = "/fake/ethernet/service"; const char* const kFakeWifiServicePath = "/fake/wifi/service"; -const char* const kFakeWimaxServicePath = "/fake/wimax/service"; -const char* const kFakeBluetoothServicePath = "/fake/bluetooth/service"; const char* const kFakeCellularServicePath = "/fake/cellular/service"; const char* const kFakeVpnServicePath = "/fake/vpn/service"; const char* const kFakeUnknownServicePath = "/fake/unknown/service"; @@ -317,21 +315,6 @@ TEST_F(UmRealShillProviderTest, ReadConnTypeWifi) { kFakeWifiServicePath, shill::kTypeWifi, ConnectionType::kWifi); } -// Test that Wimax connection is identified correctly. -TEST_F(UmRealShillProviderTest, ReadConnTypeWimax) { - InitWithDefaultService("/"); - SetupConnectionAndTestType( - kFakeWimaxServicePath, shill::kTypeWimax, ConnectionType::kWimax); -} - -// Test that Bluetooth connection is identified correctly. -TEST_F(UmRealShillProviderTest, ReadConnTypeBluetooth) { - InitWithDefaultService("/"); - SetupConnectionAndTestType(kFakeBluetoothServicePath, - shill::kTypeBluetooth, - ConnectionType::kBluetooth); -} - // Test that Cellular connection is identified correctly. TEST_F(UmRealShillProviderTest, ReadConnTypeCellular) { InitWithDefaultService("/"); From 8c4d0088faf0df0a93608f5e26f1881754c72bd8 Mon Sep 17 00:00:00 2001 From: Tao Bao Date: Thu, 8 Aug 2019 08:56:16 -0700 Subject: [PATCH 087/624] Don't spam the log when there's no mapped device. This avoids writing "Destroying [] from device mapper" to log. Test: TreeHugger Change-Id: I5f8a0a62823a682d5a4511162c3731cc7d87fb9d --- dynamic_partition_control_android.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc index d9d3be56..7f9c7932 100644 --- a/dynamic_partition_control_android.cc +++ b/dynamic_partition_control_android.cc @@ -165,6 +165,9 @@ bool DynamicPartitionControlAndroid::UnmapPartitionOnDeviceMapper( } void DynamicPartitionControlAndroid::CleanupInternal(bool wait) { + if (mapped_devices_.empty()) { + return; + } // UnmapPartitionOnDeviceMapper removes objects from mapped_devices_, hence // a copy is needed for the loop. std::set mapped = mapped_devices_; From bb90dfb5de8e6f70083e4db5b289acdde21f8684 Mon Sep 17 00:00:00 2001 From: David Anderson Date: Tue, 13 Aug 2019 14:14:56 -0700 Subject: [PATCH 088/624] Update dynamic_partition_control_android for libfs_mgr API changes. CreateLogicalPartition now uses a helper struct rather than having 6+ parameters. Bug: 135752105 Test: manual test Change-Id: I33147a536c528ad71289c46cbe05ff0b94f34cbc --- dynamic_partition_control_android.cc | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc index 7f9c7932..5a2ccb1d 100644 --- a/dynamic_partition_control_android.cc +++ b/dynamic_partition_control_android.cc @@ -39,6 +39,7 @@ using android::base::Join; using android::dm::DeviceMapper; using android::dm::DmDeviceState; using android::fs_mgr::CreateLogicalPartition; +using android::fs_mgr::CreateLogicalPartitionParams; using android::fs_mgr::DestroyLogicalPartition; using android::fs_mgr::MetadataBuilder; using android::fs_mgr::Partition; @@ -86,12 +87,15 @@ bool DynamicPartitionControlAndroid::MapPartitionInternal( uint32_t slot, bool force_writable, std::string* path) { - if (!CreateLogicalPartition(super_device.c_str(), - slot, - target_partition_name, - force_writable, - std::chrono::milliseconds(kMapTimeoutMillis), - path)) { + CreateLogicalPartitionParams params = { + .block_device = super_device, + .metadata_slot = slot, + .partition_name = target_partition_name, + .force_writable = force_writable, + .timeout_ms = std::chrono::milliseconds(kMapTimeoutMillis), + }; + + if (!CreateLogicalPartition(params, path)) { LOG(ERROR) << "Cannot map " << target_partition_name << " in " << super_device << " on device mapper."; return false; From 72d512c227264fcd7f5156dd64ec09316aa77f8b Mon Sep 17 00:00:00 2001 From: Tianjie Xu Date: Wed, 21 Aug 2019 15:20:35 -0700 Subject: [PATCH 089/624] Add a flag to disable the fec data computation The write of fec data is currently mandatory in the update engine. This step reduces the update package size but it's also time comsuming. So we add a flag to allow partners make the trade off and disable the on device fec data computation. Bug: 139723500 Test: generate and apply incremental updates for verified boot 1.0 & 2.0 Change-Id: Ic7c63396bb4d4fbbc3c3b2e9ff3804c9ff941f2f --- payload_generator/generate_delta_main.cc | 5 ++++ payload_generator/payload_generation_config.h | 3 +++ .../payload_generation_config_android.cc | 15 +++++++----- ...load_generation_config_android_unittest.cc | 18 +++++++++++++++ scripts/brillo_update_payload | 23 ++++++++++++++----- 5 files changed, 52 insertions(+), 12 deletions(-) diff --git a/payload_generator/generate_delta_main.cc b/payload_generator/generate_delta_main.cc index 3cb891f0..7c304cec 100644 --- a/payload_generator/generate_delta_main.cc +++ b/payload_generator/generate_delta_main.cc @@ -421,6 +421,9 @@ int Main(int argc, char** argv) { "", "An info file specifying dynamic partition metadata. " "Only allowed in major version 2 or newer."); + DEFINE_bool(disable_fec_computation, + false, + "Disables the fec data computation on device."); brillo::FlagHelper::Init( argc, @@ -527,6 +530,8 @@ int Main(int argc, char** argv) { << "Partition name can't be empty, see --partition_names."; payload_config.target.partitions.emplace_back(partition_names[i]); payload_config.target.partitions.back().path = new_partitions[i]; + payload_config.target.partitions.back().disable_fec_computation = + FLAGS_disable_fec_computation; if (i < new_mapfiles.size()) payload_config.target.partitions.back().mapfile_path = new_mapfiles[i]; } diff --git a/payload_generator/payload_generation_config.h b/payload_generator/payload_generation_config.h index 584ac7b6..e90edde7 100644 --- a/payload_generator/payload_generation_config.h +++ b/payload_generator/payload_generation_config.h @@ -116,6 +116,9 @@ struct PartitionConfig { PostInstallConfig postinstall; VerityConfig verity; + + // Enables the on device fec data computation by default. + bool disable_fec_computation = false; }; // The ImageConfig struct describes a pair of binaries kernel and rootfs and the diff --git a/payload_generator/payload_generation_config_android.cc b/payload_generator/payload_generation_config_android.cc index 90c053f4..d950092b 100644 --- a/payload_generator/payload_generation_config_android.cc +++ b/payload_generator/payload_generation_config_android.cc @@ -63,11 +63,13 @@ bool AvbDescriptorCallback(const AvbDescriptor* descriptor, void* user_data) { part->verity.hash_tree_extent = ExtentForBytes( hashtree.hash_block_size, hashtree.tree_offset, hashtree.tree_size); - part->verity.fec_data_extent = - ExtentForBytes(hashtree.data_block_size, 0, hashtree.fec_offset); - part->verity.fec_extent = ExtentForBytes( - hashtree.data_block_size, hashtree.fec_offset, hashtree.fec_size); - part->verity.fec_roots = hashtree.fec_num_roots; + if (!part->disable_fec_computation) { + part->verity.fec_data_extent = + ExtentForBytes(hashtree.data_block_size, 0, hashtree.fec_offset); + part->verity.fec_extent = ExtentForBytes( + hashtree.data_block_size, hashtree.fec_offset, hashtree.fec_size); + part->verity.fec_roots = hashtree.fec_num_roots; + } return true; } @@ -205,7 +207,8 @@ bool ImageConfig::LoadVerityConfig() { ExtentForRange(hash_start_block, tree_size / block_size); } fec_ecc_metadata ecc_data; - if (fh.get_ecc_metadata(ecc_data) && ecc_data.valid) { + if (!part.disable_fec_computation && fh.get_ecc_metadata(ecc_data) && + ecc_data.valid) { TEST_AND_RETURN_FALSE(block_size == FEC_BLOCKSIZE); part.verity.fec_data_extent = ExtentForRange(0, ecc_data.blocks); part.verity.fec_extent = diff --git a/payload_generator/payload_generation_config_android_unittest.cc b/payload_generator/payload_generation_config_android_unittest.cc index 53378c22..44eaf55e 100644 --- a/payload_generator/payload_generation_config_android_unittest.cc +++ b/payload_generator/payload_generation_config_android_unittest.cc @@ -160,6 +160,24 @@ TEST_F(PayloadGenerationConfigAndroidTest, LoadVerityConfigSimpleTest) { EXPECT_EQ(2u, verity.fec_roots); } +TEST_F(PayloadGenerationConfigAndroidTest, LoadVerityConfigDisableFecTest) { + brillo::Blob part = GetAVBPartition(); + test_utils::WriteFileVector(temp_file_.path(), part); + image_config_.partitions[0].disable_fec_computation = true; + EXPECT_TRUE(image_config_.LoadImageSize()); + EXPECT_TRUE(image_config_.partitions[0].OpenFilesystem()); + EXPECT_TRUE(image_config_.LoadVerityConfig()); + const VerityConfig& verity = image_config_.partitions[0].verity; + EXPECT_FALSE(verity.IsEmpty()); + EXPECT_EQ(ExtentForRange(0, 2), verity.hash_tree_data_extent); + EXPECT_EQ(ExtentForRange(2, 1), verity.hash_tree_extent); + EXPECT_EQ("sha1", verity.hash_tree_algorithm); + brillo::Blob salt(kHashTreeSalt, std::end(kHashTreeSalt)); + EXPECT_EQ(salt, verity.hash_tree_salt); + EXPECT_EQ(0u, verity.fec_data_extent.num_blocks()); + EXPECT_EQ(0u, verity.fec_extent.num_blocks()); +} + TEST_F(PayloadGenerationConfigAndroidTest, LoadVerityConfigInvalidHashTreeTest) { brillo::Blob part = GetAVBPartition(); diff --git a/scripts/brillo_update_payload b/scripts/brillo_update_payload index a238ddf5..23d2d7ec 100755 --- a/scripts/brillo_update_payload +++ b/scripts/brillo_update_payload @@ -28,12 +28,16 @@ # check verify a payload using paycheck (static testing) # # Generate command arguments: -# --payload generated unsigned payload output file -# --source_image if defined, generate a delta payload from the specified -# image to the target_image -# --target_image the target image that should be sent to clients -# --metadata_size_file if defined, generate a file containing the size of the -# payload metadata in bytes to the specified file +# --payload generated unsigned payload output file +# --source_image if defined, generate a delta payload from the +# specified image to the target_image +# --target_image the target image that should be sent to clients +# --metadata_size_file if defined, generate a file containing the size +# of the ayload metadata in bytes to the specified +# file +# --disable_fec_computation Disable the on device fec data computation for +# incremental update. This feature is enabled by +# default # # Hash command arguments: # --unsigned_payload the input unsigned payload to generate the hash from @@ -182,6 +186,9 @@ if [[ "${COMMAND}" == "generate" ]]; then "Optional: The maximum unix timestamp of the OS allowed to apply this \ payload, should be set to a number higher than the build timestamp of the \ system running on the device, 0 if not specified." + DEFINE_string disable_fec_computation "" \ + "Optional: Disables the on device fec data computation for incremental \ +update. This feature is enabled by default." fi if [[ "${COMMAND}" == "hash" || "${COMMAND}" == "sign" ]]; then DEFINE_string unsigned_payload "" "Path to the input unsigned payload." @@ -656,6 +663,10 @@ cmd_generate() { if [[ -n "${FORCE_MINOR_VERSION}" ]]; then GENERATOR_ARGS+=( --minor_version="${FORCE_MINOR_VERSION}" ) fi + if [[ -n "${FLAGS_disable_fec_computation}" ]]; then + GENERATOR_ARGS+=( + --disable_fec_computation="${FLAGS_disable_fec_computation}" ) + fi fi if [[ -n "${FORCE_MAJOR_VERSION}" ]]; then From 893cae4b0e5141bcaf9f56b1775e681c8f523630 Mon Sep 17 00:00:00 2001 From: Jae Hoon Kim Date: Fri, 16 Aug 2019 17:13:49 -0700 Subject: [PATCH 090/624] update_engine: Support milestones to EOL from Omaha Initiative to show EOL message on Chrome OS devices require that update_engine parses the fields within Omaha response that pertain to the new milestones to EOL field. The Omaha response will include a new field called "milestones_to_eol" which will be an integer value string. The job of update_engine when it comes to milestones to EOL from Omaha is to merely forward. No checks and no modifications of fields are done within update_engine besides being able to convert the milestones to EOL from a string to integer. BUG=chromium:994999 TEST=FEATURES="test" emerge-$BOARD update_engine update_engine-client TEST=cros deploy $IP update_engine update_engine-client TEST=test_that -b $BOARD $IP autoupdate_EOL # from Cq-Depend TEST=test_that -b $BOARD $IP autoupdate_EOL.milestones # from Cq-Depend Cq-Depend:chromium:1761371 Change-Id: I268e4c8e641b17d6a727a50f53285cc97c76eb22 Reviewed-on: https://chromium-review.googlesource.com/1759285 Tested-by: Jae Hoon Kim Commit-Ready: ChromeOS CL Exonerator Bot Legacy-Commit-Queue: Commit Bot Reviewed-by: Nicolas Norvez Reviewed-by: Amin Hassani --- client_library/client_dbus.cc | 5 +- client_library/client_dbus.h | 3 +- client_library/include/update_engine/client.h | 6 +- common/constants.cc | 1 + common/constants.h | 1 + common_service.cc | 19 ++++- common_service.h | 9 ++- common_service_unittest.cc | 81 +++++++++++++++++-- ...rg.chromium.UpdateEngineInterface.dbus-xml | 1 + dbus_service.cc | 5 +- dbus_service.h | 9 ++- omaha_request_action.cc | 28 +++++-- omaha_request_action.h | 6 +- omaha_request_action_unittest.cc | 59 ++++++++++++++ omaha_utils.cc | 23 ++++-- omaha_utils.h | 17 ++++ omaha_utils_unittest.cc | 8 ++ update_engine_client.cc | 17 +++- 18 files changed, 261 insertions(+), 37 deletions(-) diff --git a/client_library/client_dbus.cc b/client_library/client_dbus.cc index e2defe7a..d0f7f81b 100644 --- a/client_library/client_dbus.cc +++ b/client_library/client_dbus.cc @@ -270,8 +270,9 @@ bool DBusUpdateEngineClient::GetLastAttemptError( return proxy_->GetLastAttemptError(last_attempt_error, nullptr); } -bool DBusUpdateEngineClient::GetEolStatus(int32_t* eol_status) const { - return proxy_->GetEolStatus(eol_status, nullptr); +bool DBusUpdateEngineClient::GetEolStatus(int32_t* eol_status, + int32_t* milestones_to_eol) const { + return proxy_->GetEolStatus(eol_status, milestones_to_eol, nullptr); } } // namespace internal diff --git a/client_library/client_dbus.h b/client_library/client_dbus.h index c9631cf7..3f782e91 100644 --- a/client_library/client_dbus.h +++ b/client_library/client_dbus.h @@ -85,7 +85,8 @@ class DBusUpdateEngineClient : public UpdateEngineClient { bool GetLastAttemptError(int32_t* last_attempt_error) const override; - bool GetEolStatus(int32_t* eol_status) const override; + bool GetEolStatus(int32_t* eol_status, + int32_t* milestones_to_eol) const override; private: void DBusStatusHandlersRegistered(const std::string& interface, diff --git a/client_library/include/update_engine/client.h b/client_library/include/update_engine/client.h index 89f36af6..65a32675 100644 --- a/client_library/include/update_engine/client.h +++ b/client_library/include/update_engine/client.h @@ -135,8 +135,10 @@ class UpdateEngineClient { // Get the last UpdateAttempt error code. virtual bool GetLastAttemptError(int32_t* last_attempt_error) const = 0; - // Get the current end-of-life status code. See EolStatus enum for details. - virtual bool GetEolStatus(int32_t* eol_status) const = 0; + // Get the current end-of-life status code and milestones to end-of-life. + // See |EolStatus| enum and |MilestonesToEol| enum for details. + virtual bool GetEolStatus(int32_t* eol_status, + int32_t* milestones_to_eol) const = 0; protected: // Use CreateInstance(). diff --git a/common/constants.cc b/common/constants.cc index 87bdf911..6f37e16e 100644 --- a/common/constants.cc +++ b/common/constants.cc @@ -56,6 +56,7 @@ const char kPrefsOmahaCohort[] = "omaha-cohort"; const char kPrefsOmahaCohortHint[] = "omaha-cohort-hint"; const char kPrefsOmahaCohortName[] = "omaha-cohort-name"; const char kPrefsOmahaEolStatus[] = "omaha-eol-status"; +const char kPrefsOmahaMilestonesToEol[] = "omaha-milestones-to-eol"; const char kPrefsP2PEnabled[] = "p2p-enabled"; const char kPrefsP2PFirstAttemptTimestamp[] = "p2p-first-attempt-timestamp"; const char kPrefsP2PNumAttempts[] = "p2p-num-attempts"; diff --git a/common/constants.h b/common/constants.h index d95a56a1..6034dbd6 100644 --- a/common/constants.h +++ b/common/constants.h @@ -57,6 +57,7 @@ extern const char kPrefsOmahaCohort[]; extern const char kPrefsOmahaCohortHint[]; extern const char kPrefsOmahaCohortName[]; extern const char kPrefsOmahaEolStatus[]; +extern const char kPrefsOmahaMilestonesToEol[]; extern const char kPrefsP2PEnabled[]; extern const char kPrefsP2PFirstAttemptTimestamp[]; extern const char kPrefsP2PNumAttempts[]; diff --git a/common_service.cc b/common_service.cc index 0d5ee6dc..466007f5 100644 --- a/common_service.cc +++ b/common_service.cc @@ -413,18 +413,31 @@ bool UpdateEngineService::GetLastAttemptError(ErrorPtr* /* error */, } bool UpdateEngineService::GetEolStatus(ErrorPtr* error, - int32_t* out_eol_status) { + int32_t* out_eol_status, + int32_t* out_milestones_to_eol) { PrefsInterface* prefs = system_state_->prefs(); + // Set EOL. string str_eol_status; if (prefs->Exists(kPrefsOmahaEolStatus) && !prefs->GetString(kPrefsOmahaEolStatus, &str_eol_status)) { LogAndSetError(error, FROM_HERE, "Error getting the end-of-life status."); return false; } - - // StringToEolStatus will return kSupported for invalid values. + // |StringToEolStatus()| will return |kSupported| for invalid values. *out_eol_status = static_cast(StringToEolStatus(str_eol_status)); + + // Set milestones to EOL. + string str_milestones_to_eol; + if (prefs->Exists(kPrefsOmahaMilestonesToEol) && + !prefs->GetString(kPrefsOmahaMilestonesToEol, &str_milestones_to_eol)) { + LogAndSetError(error, FROM_HERE, "Error getting the milestones to EOL."); + return false; + } + // |StringToMilestonesToEol()| will return |kMilestonesToEolNone| for invalid + // values. + *out_milestones_to_eol = StringToMilestonesToEol(str_milestones_to_eol); + return true; } diff --git a/common_service.h b/common_service.h index f93855d9..5244e99f 100644 --- a/common_service.h +++ b/common_service.h @@ -153,9 +153,12 @@ class UpdateEngineService { bool GetLastAttemptError(brillo::ErrorPtr* error, int32_t* out_last_attempt_error); - // Returns the current end-of-life status of the device. This value is updated - // on every update check and persisted on disk across reboots. - bool GetEolStatus(brillo::ErrorPtr* error, int32_t* out_eol_status); + // Returns the current EOL status of the device and the milestones to + // EOL if marked EOL. The values are updated on every update check and + // persisted on disk across reboots. + bool GetEolStatus(brillo::ErrorPtr* error, + int32_t* out_eol_status, + int32_t* out_milestones_to_eol); private: SystemState* system_state_; diff --git a/common_service_unittest.cc b/common_service_unittest.cc index 65202a06..68b24684 100644 --- a/common_service_unittest.cc +++ b/common_service_unittest.cc @@ -169,19 +169,90 @@ TEST_F(UpdateEngineServiceTest, ResetStatusFails) { UpdateEngineService::kErrorFailed)); } -TEST_F(UpdateEngineServiceTest, GetEolStatusTest) { +TEST_F(UpdateEngineServiceTest, GetEolStatusTestWithMilestonesDefault) { FakePrefs fake_prefs; fake_system_state_.set_prefs(&fake_prefs); - // The default value should be "supported". + // The default value for EOL be |kSupported| and milestone + // |kMilestonesToEolNone|. int32_t eol_status = static_cast(EolStatus::kEol); - EXPECT_TRUE(common_service_.GetEolStatus(&error_, &eol_status)); + MilestonesToEol milestones_to_eol = kMilestonesToEolNone; + EXPECT_TRUE( + common_service_.GetEolStatus(&error_, &eol_status, &milestones_to_eol)); + EXPECT_EQ(nullptr, error_); + EXPECT_EQ(EolStatus::kSupported, static_cast(eol_status)); + EXPECT_EQ(kMilestonesToEolNone, milestones_to_eol); +} + +TEST_F(UpdateEngineServiceTest, GetEolStatusMilestonesToEolNone) { + FakePrefs fake_prefs; + fake_system_state_.set_prefs(&fake_prefs); + int32_t eol_status = static_cast(EolStatus::kEol); + MilestonesToEol milestones_to_eol = kMilestonesToEolNone; + + // Device is supported and no milestones to EOL set. + fake_prefs.SetString(kPrefsOmahaEolStatus, kEolStatusSupported); + EXPECT_TRUE( + common_service_.GetEolStatus(&error_, &eol_status, &milestones_to_eol)); EXPECT_EQ(nullptr, error_); EXPECT_EQ(EolStatus::kSupported, static_cast(eol_status)); + EXPECT_EQ(kMilestonesToEolNone, milestones_to_eol); - fake_prefs.SetString(kPrefsOmahaEolStatus, "security-only"); - EXPECT_TRUE(common_service_.GetEolStatus(&error_, &eol_status)); + // Device is security only and no milestones to EOL set. + fake_prefs.SetString(kPrefsOmahaEolStatus, kEolStatusSecurityOnly); + EXPECT_TRUE( + common_service_.GetEolStatus(&error_, &eol_status, &milestones_to_eol)); EXPECT_EQ(nullptr, error_); EXPECT_EQ(EolStatus::kSecurityOnly, static_cast(eol_status)); + EXPECT_EQ(kMilestonesToEolNone, milestones_to_eol); + + // Device is EOL and no milestones to EOL set. + fake_prefs.SetString(kPrefsOmahaEolStatus, kEolStatusEol); + EXPECT_TRUE( + common_service_.GetEolStatus(&error_, &eol_status, &milestones_to_eol)); + EXPECT_EQ(nullptr, error_); + EXPECT_EQ(EolStatus::kEol, static_cast(eol_status)); + EXPECT_EQ(kMilestonesToEolNone, milestones_to_eol); +} + +TEST_F(UpdateEngineServiceTest, GetEolStatusMilestonesToEolTwo) { + FakePrefs fake_prefs; + fake_system_state_.set_prefs(&fake_prefs); + int32_t eol_status = static_cast(EolStatus::kEol); + MilestonesToEol milestones_to_eol = kMilestonesToEolNone; + + // Device is supported and milestones to EOL is n-2. + fake_prefs.SetString(kPrefsOmahaEolStatus, kEolStatusSupported); + fake_prefs.SetString(kPrefsOmahaMilestonesToEol, "2"); + EXPECT_TRUE( + common_service_.GetEolStatus(&error_, &eol_status, &milestones_to_eol)); + EXPECT_EQ(nullptr, error_); + EXPECT_EQ(EolStatus::kSupported, static_cast(eol_status)); + EXPECT_EQ(2, milestones_to_eol); + + // Device is security only and milestones to EOL is n-2. + fake_prefs.SetString(kPrefsOmahaEolStatus, kEolStatusSecurityOnly); + fake_prefs.SetString(kPrefsOmahaMilestonesToEol, "2"); + EXPECT_TRUE( + common_service_.GetEolStatus(&error_, &eol_status, &milestones_to_eol)); + EXPECT_EQ(nullptr, error_); + EXPECT_EQ(EolStatus::kSecurityOnly, static_cast(eol_status)); + EXPECT_EQ(2, milestones_to_eol); +} + +TEST_F(UpdateEngineServiceTest, GetEolStatusMilestonesToEolZero) { + FakePrefs fake_prefs; + fake_system_state_.set_prefs(&fake_prefs); + int32_t eol_status = static_cast(EolStatus::kEol); + MilestonesToEol milestones_to_eol = kMilestonesToEolNone; + + // Device is EOL and milestones to EOL is n. + fake_prefs.SetString(kPrefsOmahaEolStatus, kEolStatusEol); + fake_prefs.SetString(kPrefsOmahaMilestonesToEol, "0"); + EXPECT_TRUE( + common_service_.GetEolStatus(&error_, &eol_status, &milestones_to_eol)); + EXPECT_EQ(nullptr, error_); + EXPECT_EQ(EolStatus::kEol, static_cast(eol_status)); + EXPECT_EQ(0, milestones_to_eol); } } // namespace chromeos_update_engine diff --git a/dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml b/dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml index a1831476..5671fde9 100644 --- a/dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml +++ b/dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml @@ -139,6 +139,7 @@ + diff --git a/dbus_service.cc b/dbus_service.cc index b3796030..168265d8 100644 --- a/dbus_service.cc +++ b/dbus_service.cc @@ -212,8 +212,9 @@ bool DBusUpdateEngineService::GetLastAttemptError( } bool DBusUpdateEngineService::GetEolStatus(ErrorPtr* error, - int32_t* out_eol_status) { - return common_->GetEolStatus(error, out_eol_status); + int32_t* out_eol_status, + int32_t* out_milestone_to_eol) { + return common_->GetEolStatus(error, out_eol_status, out_milestone_to_eol); } UpdateEngineAdaptor::UpdateEngineAdaptor(SystemState* system_state) diff --git a/dbus_service.h b/dbus_service.h index 2babf8c7..9d796094 100644 --- a/dbus_service.h +++ b/dbus_service.h @@ -157,8 +157,13 @@ class DBusUpdateEngineService bool GetLastAttemptError(brillo::ErrorPtr* error, int32_t* out_last_attempt_error) override; - // Returns the current end-of-life status of the device in |out_eol_status|. - bool GetEolStatus(brillo::ErrorPtr* error, int32_t* out_eol_status) override; + // Returns the current EOL status of the device in |out_eol_status| and the + // milestones to EOL of the device in |out_milestones_to_eol| for EOL devices. + // In the case that milestones to EOL doesn't exists for EOL, it will default + // to |kMilestonesToEolNone|. + bool GetEolStatus(brillo::ErrorPtr* error, + int32_t* out_eol_status, + int32_t* out_milestones_to_eol) override; private: std::unique_ptr common_; diff --git a/omaha_request_action.cc b/omaha_request_action.cc index 6c67a3b7..8da7e299 100644 --- a/omaha_request_action.cc +++ b/omaha_request_action.cc @@ -109,6 +109,7 @@ constexpr char kValNoUpdate[] = "noupdate"; // updatecheck attributes (without the underscore prefix). constexpr char kAttrEol[] = "eol"; +constexpr char kAttrMilestonesToEol[] = "milestones_to_eol"; constexpr char kAttrRollback[] = "rollback"; constexpr char kAttrFirmwareVersion[] = "firmware_version"; constexpr char kAttrKernelVersion[] = "kernel_version"; @@ -1315,13 +1316,28 @@ bool OmahaRequestAction::PersistCohortData(const string& prefs_key, bool OmahaRequestAction::PersistEolStatus(const map& attrs) { auto eol_attr = attrs.find(kAttrEol); - if (eol_attr != attrs.end()) { - return system_state_->prefs()->SetString(kPrefsOmahaEolStatus, - eol_attr->second); - } else if (system_state_->prefs()->Exists(kPrefsOmahaEolStatus)) { - return system_state_->prefs()->Delete(kPrefsOmahaEolStatus); + auto milestones_to_eol_attr = attrs.find(kAttrMilestonesToEol); + + bool ret = true; + if (milestones_to_eol_attr == attrs.end()) { + system_state_->prefs()->Delete(kPrefsOmahaMilestonesToEol); + if (eol_attr != attrs.end()) { + LOG(WARNING) << "Milestones to EOL missing when EOL."; + } + } else if (!system_state_->prefs()->SetString( + kPrefsOmahaMilestonesToEol, milestones_to_eol_attr->second)) { + LOG(ERROR) << "Setting milestones to EOL failed."; + ret = false; } - return true; + + if (eol_attr == attrs.end()) { + system_state_->prefs()->Delete(kPrefsOmahaEolStatus); + } else if (!system_state_->prefs()->SetString(kPrefsOmahaEolStatus, + eol_attr->second)) { + LOG(ERROR) << "Setting EOL failed."; + ret = false; + } + return ret; } void OmahaRequestAction::ActionCompleted(ErrorCode code) { diff --git a/omaha_request_action.h b/omaha_request_action.h index 8dffb5c0..0c256a8e 100644 --- a/omaha_request_action.h +++ b/omaha_request_action.h @@ -144,6 +144,7 @@ class OmahaRequestAction : public Action, GetInstallDateWhenOOBECompletedWithValidDate); FRIEND_TEST(OmahaRequestActionTest, GetInstallDateWhenOOBECompletedDateChanges); + FRIEND_TEST(OmahaRequestActionTest, PersistEolStatusTest); friend class UpdateAttempterTest; FRIEND_TEST(UpdateAttempterTest, SessionIdTestEnforceEmptyStrPingOmaha); FRIEND_TEST(UpdateAttempterTest, SessionIdTestConsistencyInUpdateFlow); @@ -188,8 +189,9 @@ class OmahaRequestAction : public Action, bool PersistCohortData(const std::string& prefs_key, const std::string& new_value); - // Parse and persist the end-of-life status flag sent back in the updatecheck - // tag attributes. The flag will be validated and stored in the Prefs. + // Parse and persist the end-of-life status flag and milestones to EOL sent + // back in the updatecheck tag attributes. The flag will be validated and + // stored in the Prefs. bool PersistEolStatus(const std::map& attrs); // If this is an update check request, initializes diff --git a/omaha_request_action_unittest.cc b/omaha_request_action_unittest.cc index 8008e008..11633b67 100644 --- a/omaha_request_action_unittest.cc +++ b/omaha_request_action_unittest.cc @@ -2012,6 +2012,65 @@ TEST_F(OmahaRequestActionTest, ParseUpdateCheckAttributesTest) { EXPECT_EQ("security-only", eol_pref); } +TEST_F(OmahaRequestActionTest, ParseUpdateCheckAttributesEolTest) { + tuc_params_.http_response = + "" + ""; + tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; + + ASSERT_TRUE(TestUpdateCheck()); + + string eol_pref, milestones_to_eol_pref; + EXPECT_TRUE( + fake_system_state_.prefs()->GetString(kPrefsOmahaEolStatus, &eol_pref)); + EXPECT_EQ("eol", eol_pref); + EXPECT_TRUE(fake_system_state_.prefs()->GetString(kPrefsOmahaMilestonesToEol, + &milestones_to_eol_pref)); + EXPECT_EQ("0", milestones_to_eol_pref); +} + +TEST_F(OmahaRequestActionTest, + ParseUpdateCheckAttributesMissingMilestonesToEolTest) { + tuc_params_.http_response = + "" + ""; + tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; + + ASSERT_TRUE(TestUpdateCheck()); + + string eol_pref, milestones_to_eol_pref; + EXPECT_TRUE( + fake_system_state_.prefs()->GetString(kPrefsOmahaEolStatus, &eol_pref)); + EXPECT_EQ("eol", eol_pref); + EXPECT_FALSE(fake_system_state_.prefs()->Exists(kPrefsOmahaMilestonesToEol)); +} + +TEST_F(OmahaRequestActionTest, ParseUpdateCheckAttributesMilestonesToEolTest) { + tuc_params_.http_response = + "" + ""; + tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; + + ASSERT_TRUE(TestUpdateCheck()); + + string eol_pref, milestones_to_eol_pref; + EXPECT_TRUE( + fake_system_state_.prefs()->GetString(kPrefsOmahaEolStatus, &eol_pref)); + EXPECT_EQ("supported", eol_pref); + EXPECT_TRUE(fake_system_state_.prefs()->GetString(kPrefsOmahaMilestonesToEol, + &milestones_to_eol_pref)); + EXPECT_EQ("3", milestones_to_eol_pref); +} + TEST_F(OmahaRequestActionTest, NoUniqueIDTest) { tuc_params_.http_response = "invalid xml>"; tuc_params_.expected_code = ErrorCode::kOmahaRequestXMLParseError; diff --git a/omaha_utils.cc b/omaha_utils.cc index 6bd75250..dffa5d3a 100644 --- a/omaha_utils.cc +++ b/omaha_utils.cc @@ -17,17 +17,15 @@ #include "update_engine/omaha_utils.h" #include +#include namespace chromeos_update_engine { -namespace { - -// The possible string values for the end-of-life status. const char kEolStatusSupported[] = "supported"; const char kEolStatusSecurityOnly[] = "security-only"; const char kEolStatusEol[] = "eol"; -} // namespace +const MilestonesToEol kMilestonesToEolNone = -1; const char* EolStatusToString(EolStatus eol_status) { switch (eol_status) { @@ -50,8 +48,23 @@ EolStatus StringToEolStatus(const std::string& eol_status) { return EolStatus::kSecurityOnly; if (eol_status == kEolStatusEol) return EolStatus::kEol; - LOG(WARNING) << "Invalid end-of-life attribute: " << eol_status; + LOG(WARNING) << "Invalid EOL attribute: " << eol_status; return EolStatus::kSupported; } +std::string MilestonesToEolToString(MilestonesToEol milestones_to_eol) { + return base::IntToString(milestones_to_eol); +} + +MilestonesToEol StringToMilestonesToEol(const std::string& milestones_to_eol) { + MilestonesToEol milestone = kMilestonesToEolNone; + if (!base::StringToInt(milestones_to_eol, &milestone)) { + LOG(WARNING) << "Invalid milestones to EOL attribute: " + << milestones_to_eol; + return kMilestonesToEolNone; + } + + return milestone; +} + } // namespace chromeos_update_engine diff --git a/omaha_utils.h b/omaha_utils.h index 86145403..60004515 100644 --- a/omaha_utils.h +++ b/omaha_utils.h @@ -21,6 +21,11 @@ namespace chromeos_update_engine { +// The possible string values for the end-of-life status. +extern const char kEolStatusSupported[]; +extern const char kEolStatusSecurityOnly[]; +extern const char kEolStatusEol[]; + // The end-of-life status of the device. enum class EolStatus { kSupported = 0, @@ -28,6 +33,10 @@ enum class EolStatus { kEol, }; +using MilestonesToEol = int; +// The default milestones to EOL. +extern const MilestonesToEol kMilestonesToEolNone; + // Returns the string representation of the |eol_status|. const char* EolStatusToString(EolStatus eol_status); @@ -35,6 +44,14 @@ const char* EolStatusToString(EolStatus eol_status); // of an invalid string, the default "supported" value will be used instead. EolStatus StringToEolStatus(const std::string& eol_status); +// Returns the string representation of the |milestones_to_eol|. +std::string MilestonesToEolToString(int milestones_to_eol); + +// Converts the milestones to EOL string to an |MilestonesToEol| enum class. +// When the milestones to EOL is not an integer, the default +// |kMilestonesToEolNone| will be returned. +MilestonesToEol StringToMilestonesToEol(const std::string& milestones_to_eol); + } // namespace chromeos_update_engine #endif // UPDATE_ENGINE_OMAHA_UTILS_H_ diff --git a/omaha_utils_unittest.cc b/omaha_utils_unittest.cc index 8ceb76bf..59c03660 100644 --- a/omaha_utils_unittest.cc +++ b/omaha_utils_unittest.cc @@ -39,4 +39,12 @@ TEST(OmahaUtilsTest, EolStatusTest) { EXPECT_EQ(EolStatus::kSupported, StringToEolStatus("hello, world!")); } +TEST(OmahaUtilsTest, MilestonesToEolTest) { + EXPECT_EQ(kMilestonesToEolNone, StringToMilestonesToEol("")); + EXPECT_EQ(kMilestonesToEolNone, StringToMilestonesToEol("not_a_number")); + EXPECT_EQ(1, StringToMilestonesToEol("1")); + EXPECT_EQ(0, StringToMilestonesToEol("0")); + EXPECT_EQ(-1, StringToMilestonesToEol("-1")); +} + } // namespace chromeos_update_engine diff --git a/update_engine_client.cc b/update_engine_client.cc index 954e856d..9748c4d4 100644 --- a/update_engine_client.cc +++ b/update_engine_client.cc @@ -41,6 +41,7 @@ using chromeos_update_engine::EolStatus; using chromeos_update_engine::ErrorCode; +using chromeos_update_engine::MilestonesToEol; using chromeos_update_engine::UpdateEngineStatusToString; using chromeos_update_engine::UpdateStatusToString; using chromeos_update_engine::utils::ErrorCodeToString; @@ -559,12 +560,20 @@ int UpdateEngineClient::ProcessFlags() { } if (FLAGS_eol_status) { - int eol_status; - if (!client_->GetEolStatus(&eol_status)) { - LOG(ERROR) << "Error getting the end-of-life status."; + int eol_status, milestones_to_eol; + if (!client_->GetEolStatus(&eol_status, &milestones_to_eol)) { + LOG(ERROR) << "Error getting the end-of-life status and milestones to " + "end-of-life."; } else { EolStatus eol_status_code = static_cast(eol_status); - printf("EOL_STATUS=%s\n", EolStatusToString(eol_status_code)); + MilestonesToEol milestones_to_eol_code = milestones_to_eol; + printf( + "EOL_STATUS=%s\n" + "MILESTONES_TO_EOL=%s\n", + EolStatusToString(eol_status_code), + chromeos_update_engine::MilestonesToEolToString( + milestones_to_eol_code) + .c_str()); } } From 3b7544b621970037dd295b0435595d231a2ff5c4 Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Mon, 19 Aug 2019 01:02:18 -0700 Subject: [PATCH 091/624] update_engine: Add README.md documentation Add the most awaited documentation for update_engine. BUG=none TEST=docs/scripts/review_docs README.md Change-Id: Iaca57856ac5ef54d15cbba95467e6a25165678ed Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/1760688 Reviewed-by: Mike Frysinger Commit-Queue: Amin Hassani Tested-by: Amin Hassani --- README.md | 633 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 633 insertions(+) create mode 100644 README.md diff --git a/README.md b/README.md new file mode 100644 index 00000000..7071cf10 --- /dev/null +++ b/README.md @@ -0,0 +1,633 @@ +# Chrome OS Update Process + +[TOC] + +System updates in more modern operating systems like Chrome OS and Android are +called A/B updates, over-the-air ([OTA]) updates, seamless updates, or simply +auto updates. In contrast to more primitive system updates (like Windows or +macOS) where the system is booted into a special mode to override the system +partitions with newer updates and may take several minutes or hours, A/B updates +have several advantages including but not limited to: + +* Updates maintain a workable system that remains on the disk during and after + an update. Hence, reducing the likelihood of corrupting a device into a + non-usable state. And reducing the need for flashing devices manually or at + repair and warranty centers, etc. +* Updates can happen while the system is running (normally with minimum + overhead) without interrupting the user. The only downside for users is a + required reboot (or, in Chrome OS, a sign out which automatically causes a + reboot if an update was performed where the reboot duration is about 10 + seconds and is no different than a normal reboot). +* The user does not need (although they can) to request for an update. The + update checks happen periodically in the background. +* If the update fails to apply, the user is not affected. The user will + continue on the old version of the system and the system will attempt to + apply the update again at a later time. +* If the update applies correctly but fails to boot, the system will rollback + to the old partition and the user can still use the system as usual. +* The user does not need to reserve enough space for the update. The system + has already reserved enough space in terms of two copies (A and B) of a + partition. The system doesn’t even need any cache space on the disk, + everything happens seamlessly from network to memory to the inactive + partitions. + +## Life of an A/B Update + +In A/B update capable systems, each partition, such as the kernel or root (or +other artifacts like [DLC]), has two copies. We call these two copies active (A) +and inactive (B). The system is booted into the active partition (depending on +which copy has the higher priority at boot time) and when a new update is +available, it is written into the inactive partition. After a successful reboot, +the previously inactive partition becomes active and the old active partition +becomes inactive. + +But everything starts with generating update payloads in (Google) servers for +each new system image. Once the update payloads are generated, they are signed +with specific keys and stored in a location known to an update server (Omaha). + +When the updater client initiates an update (either periodically or user +initiated), it first consults different device policies to see if the update +check is allowed. For example, device policies can prevent an update check +during certain times of a day or they require the update check time to be +scattered throughout the day randomly, etc. + +Once policies allow for the update check, the updater client sends a request to +the update server (all this communication happens over HTTPS) and identifies its +parameters like its Application ID, hardware ID, version, board, etc. Then if +the update server decides to serve an update payload, it will respond with all +the parameters needed to perform an update like the URLs to download the +payloads, the metadata signatures, the payload size and hash, etc. The updater +client continues communicating with the update server after different state +changes, like reporting that it started to download the payload or it finished +the update, or reports that the update failed with specific error codes, etc. + +Each payload consists of two main sections: metadata and extra data. The +metadata is basically a list of operations that should be performed for an +update. The extra data contains the data blobs needed by some or all of these +operations. The updater client first downloads the metadata and +cryptographically verifies it using the provided signatures from the update +server’s response. Once the metadata is verified as valid, the rest of the +payload can easily be verified cryptographically (mostly through SHA256 hashes). + +Next, the updater client marks the inactive partition as unbootable (because it +needs to write the new updates into it). At this point the system cannot +rollback to the inactive partition anymore. + +Then, the updater client performs the operations defined in the metadata (in the +order they appear in the metadata) and the rest of the payload is gradually +downloaded when these operations require their data. Once an operation is +finished its data is discarded. This eliminates the need for caching the entire +payload before applying it. During this process the updater client periodically +checkpoints the last operation performed so in the event of failure or system +shutdown, etc. it can continue from the point it missed without redoing all +operations from the beginning. + +During the download, the updater client hashes the downloaded bytes and when the +download finishes, it checks the payload signature (located at the end of the +payload). If the signature cannot be verified, the update is rejected. + +After the inactive partition is updated, the entire partition is re-read, hashed +and compared to a hash value passed in the metadata to make sure the update was +successfully written into the partition. + +In the next step, the [Postinstall] process (if any) is called. The postinstall +reconstructs the dm-verity tree hash of the ROOT partition and writes it at the +end of the partition (after the last block of the file system). The postinstall +can also perform any board specific or firmware update tasks necessary. If +postinstall fails, the entire update is considered failed. + +Then the updater client goes into a state that identifies the update has +completed and the user needs to reboot the system. At this point, until the user +reboots (or signs out), the updater client will not do any more system updates +even if newer updates are available. However, it does continue to perform +periodic update checks so we can have statistics on the number of active devices +in the field. + +After the update proved successful, the inactive partition is marked to have a +higher priority (on a boot, a partition with higher priority is booted +first). Once the user reboots the system, it will boot into the updated +partition and it is marked as active. At this point, after the reboot, The +updater client calls into the [`chromeos-setgoodkernel`] program. The program +verifies the integrity of the system partitions using the dm-verity and marks +the active partition as healthy. At this point the system is basically updated +successfully. + +## Update Engine Daemon + +The `update_engine` is a single-threaded daemon process that runs all the +times. This process is the heart of the auto updates. It runs with lower +priorities in the background and is one of the last processes to start after a +system boot. Different clients (like Chrome or other services) can send requests +for update checks to the update engine. The details of how requests are passed +to the update engine is system dependent, but in Chrome OS it is D-Bus. Look at +the [D-Bus interface] for a list of all available methods. + +There are many resiliency features embedded in the update engine that makes auto +updates robust including but not limited to: + +* If the update engine crashes, it will restart automatically. +* During an active update it periodically checkpoints the state of the update + and if it fails to continue the update or crashes in the middle, it will + continue from the last checkpoint. +* It retries failed network communication. +* If it fails to apply a delta payload (due to bit changes on the active + partition) for a few times, it switches to full payload. + +The updater clients writes its active preferences in +`/var/lib/update_engine/prefs`. These preferences help with tracking changes +during the lifetime of the updater client and allows properly continuing the +update process after failed attempts or crashes. + +The core update engine code base in a Chromium OS checkout is located in +`src/aosp/system/update_engine` fetching [this repository]. + +### Policy Management + +In Chrome OS, devices are allowed to accept different policies from their +managing organizations. Some of these policies affect how/when updates should be +performed. For example, an organization may want to scatter the update checks +during certain times of the day so as not to interfere with normal +business. Within the update engine daemon, [UpdateManager] has the +responsibility of loading such policies and making different decisions based on +them. For example, some policies may allow the act of checking for updates to +happen, while they prevent downloading the update payload. Or some policies +don’t allow the update check within certain time frames, etc. Anything that +relates to the Chrome OS update policies should be contained within the +[update_manager] directory in the source code. + +### Rollback vs. Enterprise Rollback + +Chrome OS defines a concept for Rollback: Whenever a newly updated system does +not work as it is intended, under certain circumstances the device can be rolled +back to a previously working version. There are two types of rollback supported +in Chrome OS: A (legacy, original) rollback and an enterprise rollback (I know, +naming is confusing). + +A normal rollback, which has existed for as long as Chrome OS had auto updater, +is performed by switching the currently inactive partition into the active +partition and rebooting into it. It is as simple as running a successful +postinstall on the inactive partition, and rebooting the device. It is a feature +used by Chrome that happens under certain circumstances. Of course rollback +can’t happen if the inactive partition has been tampered with or has been nuked +by the updater client to install an even newer update. Normally a rollback is +followed by a Powerwash which clobbers the stateful partition. + +Enterprise rollback is a new feature added to allow enterprise users to +downgrade the installed image to an older version. It is very similar to a +normal system update, except that an older update payload is downloaded and +installed. There is no direct API for entering into the enterprise rollback. It +is managed by the enterprise device policies only. + +Developers should be careful when touching any rollback related feature and make +sure they know exactly which of these two features they are trying to adapt. + +### Interactive vs Non-Interactive vs. Forced Updates + +Non-interactive updates are updates that are scheduled periodically by the +update engine and happen in the background. Interactive updates, on the other +hand, happen when a user specifically requests an update check (e.g. by clicking +on “Check For Update” button in Chrome OS’s About page). Depending on the update +servers policies, interactive updates have higher priority than non-interactive +updates (by carrying marker hints). They may decide to not provide an update if +they have busy server load, etc. There are other internal differences between +these two types of updates too. For example, interactive updates try to install +the update faster. + +Forced updates are similar to interactive updates (initiated by some kind of +user action), but they can also be configured to act as non-interactive. Since +non-interactive updates happen periodically, a forced-non-interactive update +causes a non-interactive update at the moment of the request, not at a later +time. We can call a forced non-interactive update with: + +```bash +update_engine_client --interactive=false --check_for_update +``` + +### P2P Updates + +Many organizations might not have the external bandwidth requirements that +system updates need for all their devices. To help with this, Chrome OS can act +as a payload server to other client devices in the same network subnet. This is +basically a peer-to-peer update system that allows the devices to download the +update payloads from other devices in the network. This has to be enabled +explicitly in the organization through device policies and specific network +configurations to be enabled for P2P updates to work. Regardless of the location +of update payloads, all update requests go through update servers in HTTPS. + +Check out the [P2P update related code] for both the server and the client side. + +### Network + +The updater client has the capability to download the payloads using Ethernet, +WiFi, or Cellular networks depending on which one the device is connected +to. Downloading over Cellular networks will prompt permission from the user as +it can consume a considerable amount of data. + +### Logs + +In Chrome OS the `update_engine` logs are located in `/var/log/update_engine` +directory. Whenever `update_engine` starts, it starts a new log file with the +current data-time format in the log file’s name +(`update_engine.log-DATE-TIME`). Many log files can be seen in +`/var/log/update_engine` after a few restarts of the update engine or after the +system reboots. The latest active log is symlinked to +`/var/log/update_engine.log`. + +## Update Payload Generation + +The update payload generation is the process of converting a set of +partitions/files into a format that is both understandable by the updater client +(especially if it's much older versions) and is securely verifiable. This +process involves breaking the input partitions into smaller components and +compressing them in order to help with network bandwidth when downloading the +payloads. + +For each generated payload, there is corresponding properties file which +contains the metadata information of the payload in JSON format. Normally the +file is located in the same location as the generated payload and its file name +is the same as the payload file name plus `.json` +postfix. e.g. `/path/to/payload.bin` and `/path/to/payload.bin.json`. This +properties file is necessary in order to do any kind of auto update in [`cros +flash`], AU autotests, etc. Similarly the updater server uses this file to +dispatch the payload properties to the updater clients. + +Once update payloads are generated, their original images cannot be changed +anymore otherwise the update payloads may not be able to be applied. + +`delta_generator` is a tool with a wide range of options for generating +different types of update payloads. Its code is located in +`update_engine/payload_generator`. This directory contains all the source code +related to mechanics of generating an update payload. None of the files in this +directory should be included or used in any other library/executable other than +the `delta_generator` which means this directory does not get compiled into the +rest of the update engine tools. + +However, it is not recommended to use `delta_generator` directly. To manually +generate payloads easier, [`cros_generate_update_payloads`] should be used. Most +of the higher level policies and tools for generating payloads reside as a +library in [`chromite/lib/paygen`]. Whenever calls to the update payload +generation API are needed, this library should be used instead. + +### Update Payload File Specification + +Each update payload file has a specific structure defined in the table below: + +|Field|Size (bytes)|Type|Description| +|-----|------------|----|-----------| +|Magic Number|4|char[4]|Magic string "CrAU" identifying this is an update payload.| +|Major Version|8|uint64|Payload major version number.| +|Manifest Size|8|uint64|Manifest size in bytes.| +|Manifest Signature Size|4|uint32|Manifest signature blob size in bytes (only in major version 2).| +|Manifest|Varies|[DeltaArchiveManifest]|The list of operations to be performed.| +|Manifest Signature|Varies|[Signatures]|The signature of the first five fields. There could be multiple signatures if the key has changed.| +|Payload Data|Varies|List of raw or compressed data blobs|The list of binary blobs used by operations in the metadata.| +|Payload Signature Size|Varies|uint64|The size of the payload signature.| +|Payload Signature|Varies|[Signatures]|The signature of the entire payload except the metadata signature. There could be multiple signatures if the key has changed.| + +### Delta vs. Full Update Payloads + +There are two types of payload: Full and Delta. A full payload is generated +solely from the target image (the image we want to update to) and has all the +data necessary to update the inactive partition. Hence, full payloads can be +quite large in size. A delta payload, on the other hand, is a differential +update generated by comparing the source image (the active partitions) and the +target image and producing the diffs between these two images. It is basically a +differential update similar to applications like `diff` or `bsdiff`. Hence, +updating the system using the delta payloads requires the system to read parts +of the active partition in order to update the inactive partition (or +reconstruct the target partition). The delta payloads are significantly smaller +than the full payloads. The structure of the payload is equal for both types. + +Payload generation is quite resource intensive and its tools are implemented +with high parallelism. + +#### Generating Full Payloads + +A full payload is generated by breaking the partition into 2MiB (configurable) +chunks and either compressing them using bzip2 or XZ algorithms or keeping it as +raw data depending on which produces smaller data. Full payloads are much larger +in comparison to delta payloads hence require longer download time if the +network bandwidth is limited. On the other hand, full payloads are a bit faster +to apply because the system doesn’t need to read data from the source partition. + +#### Generating Delta Payloads + +Delta payloads are generated by looking at both the source and target images +data on a file and metadata basis (more precisely, the file system level on each +appropriate partition). The reason we can generate delta payloads is that Chrome +OS partitions are read only. So with high certainty we can assume the active +partitions on the client’s device is bit-by-bit equal to the original partitions +generated in the image generation/signing phase. The process for generating a +delta payload is roughly as follows: + +1. Find all the zero-filled blocks on the target partition and produce `ZERO` + operation for them. `ZERO` operation basically discards the associated + blocks (depending on the implementation). +2. Find all the blocks that have not changed between the source and target + partitions by directly comparing one-to-one source and target blocks and + produce `SOURCE_COPY` operation. +3. List all the files (and their associated blocks) in the source and target + partitions and remove blocks (and files) which we have already generated + operations for in the last two steps. Assign the remaining metadata (inodes, + etc) of each partition as a file. +4. If a file is new, generate a `REPLACE`, `REPLACE_XZ`, or `REPLACE_BZ` + operation for its data blocks depending on which one generates a smaller + data blob. +5. For each other file, compare the source and target blocks and produce a + `SOURCE_BSDIFF` or `PUFFDIFF` operation depending on which one generates a + smaller data blob. These two operations produce binary diffs between a + source and target data blob. (Look at [bsdiff] and [puffin] for details of + such binary differential programs!) +6. Sort the operations based on their target partitions’ block offset. +7. Optionally merge same or similar operations next to each other into larger + operations for better efficiency and potentially smaller payloads. + +Full payloads can only contain `REPLACE`, `REPLACE_BZ`, and `REPLACE_XZ` +operations. Delta payloads can contain any operations. + +### Major and Minor versions + +The major and minor versions specify the update payload file format and the +capability of the updater client to accept certain types of update payloads +respectively. These numbers are [hard coded] in the updater client. + +Major version is basically the update payload file version specified in the +[update payload file specification] above (second field). Each updater client +supports a range of major versions. Currently, there are only two major +versions: 1, and 2. And both Chrome OS and Android are on major version 2 (major +version 1 is being deprecated). Whenever there are new additions that cannot be +fitted in the [Manifest protobuf], we need to uprev the major version. Upreving +major version should be done with utmost care because older clients do not know +how to handle the newer versions. Any major version uprev in Chrome OS should be +associated with a GoldenEye stepping stone. + +Minor version defines the capability of the updater client to accept certain +operations or perform certain actions. Each updater client supports a range of +minor versions. For example, the updater client with minor version 4 (or less) +does not know how to handle a `PUFFDIFF` operation. So when generating a delta +payload for an image which has an updater client with minor version 4 (or less) +we cannot produce PUFFDIFF operation for it. The payload generation process +looks at the source image’s minor version to decide the type of operations it +supports and only a payload that confirms to those restrictions. Similarly, if +there is a bug in a client with a specific minor version, an uprev in the minor +version helps with avoiding to generate payloads that cause that bug to +manifest. However, upreving minor versions is quite expensive too in terms of +maintainability and it can be error prone. So one should practice caution when +making such a change. + +Minor versions are irrelevant in full payloads. Full payloads should always be +able to be applied for very old clients. The reason is that the updater clients +may not send their current version, so if we had different types of full +payloads, we would not have known which version to serve to the client. + +### Signed vs Unsigned Payloads + +Update payloads can be signed (with private/public key pairs) for use in +production or be kept unsigned for use in testing. Tools like `delta_generator` +help with generating metadata and payload hashes or signing the payloads given +private keys. + +## update_payload Scripts + +[update_payload] contains a set of python scripts mostly to validate payload +generation and application. We normally test the update payloads using an actual +device (live tests). [`brillo_update_payload`] script can be used to generate +and test applying of a payload on a host device machine. These tests can be +viewed as dynamic tests without the need for an actual device. Other +`update_payload` scripts (like [`check_update_payload`]) can be used to +statically check that a payload is in the correct state and its application +works correctly. These scripts actually apply the payload statically without +running the code in payload_consumer. + +## Postinstall + +[Postinstall] is a process called after the updater client writes the new image +artifacts to the inactive partitions. One of postinstall's main responsibilities +is to recreate the dm-verity tree hash at the end of the root partition. Among +other things, it installs new firmware updates or any board specific +processes. Postinstall runs in separate chroot inside the newly installed +partition. So it is quite separated from the rest of the active running +system. Anything that needs to be done after an update and before the device is +rebooted, should be implemented inside the postinstall. + +## Building Update Engine + +You can build `update_engine` the same as other platform applications: + +```bash +(chroot) $ emerge-${BOARD} update_engine +``` +or to build without the source copy: + +```bash +(chroot) $ cros_workon_make --board=${BOARD} update_engine +``` + +After a change in the `update_engine` daemon, either build an image and install +the image on the device using cros flash, etc. or use `cros deploy` to only +install the `update_engine` service on the device: + +```bash +(chroot) $ cros deploy update_engine +``` + +You need to restart the `update_engine` daemon in order to see the affected +changes: + +```bash +# SSH into the device. +restart update-engine # with a dash not underscore. +``` + +Other payload generation tools like `delta_generator` are board agnostic and +only available in the SDK. So in order to make any changes to the +`delta_generator`, you should build the SDK: + +```bash +# Do it only once to start building the 9999 ebuild from ToT. +(chroot) $ cros_workon --host start update_engine + +(chroot) $ sudo emerge update_engine +``` + +If you make any changes to the D-Bus interface make sure `system_api`, +`update_engine-client`, and `update_engine` packages are marked to build from +9999 ebuild and then build both packages in that order: + +```bash +(chroot) $ emerge-${BOARD} system_api update_engine-client update_engine +``` + +If you make any changes to [`update_engine` protobufs] in the `system_api`, +build the `system_api` package first. + +## Running Unit Tests + +[Running unit tests similar to other platforms]: + +```bash +(chroot) $ FEATURES=test emerge- update_engine +``` + +or + +```bash +(chroot) $ cros_workon_make --board= --test update_engine +``` + +or + +```bash +(chroot) $ cros_run_unit_tests --board ${BOARD} --test --packages update_engine +``` + +The above commands run all the unit tests, but `update_engine` package is quite +large and it takes a long time to run all the unit tests. To run all unit tests +in a test class run: + +```bash +(chroot) $ FEATURES=test \ + P2_TEST_FILTER="*OmahaRequestActionTest.*-*RunAsRoot*" \ + emerge-amd64-generic update_engine +``` + +To run one exact unit test fixture (e.g. `MultiAppUpdateTest`), run: + +```bash +(chroot) $ FEATURES=test \ + P2_TEST_FILTER="*OmahaRequestActionTest.MultiAppUpdateTest-*RunAsRoot*" \ + emerge-amd64-generic update_engine +``` + +To run `update_payload` unit tests enter `update_engine/scripts` directory and +run the desired `unittest.p`y files. + +## Initiating a Configured Update + +There are different methods to initiate an update: + +* Click on the “Check For Update” button in setting’s About page. There is no + way to configure this way of update check. +* Use the [`update_engine_client`] program. There are a few configurations you + can do. +* Call `autest` in the crosh. Mainly used by the QA team and is not intended + to be used by any other team. + +`update_engine_client` is a client application that can help initiate an update +or get more information about the status of the updater client. It has several +options like initiating an interactive vs. non-interactive update, changing +channels, getting the current status of update process, doing a rollback, +changing the Omaha URL to download the payload (the most important one), etc. + +`update_engine` daemon reads the `/etc/lsb-release` file on the device to +identify different update parameters like the updater server (Omaha) URL, the +current channel, etc. However, to override any of these parameters, create the +file `/mnt/stateful_partition/etc/lsb-release` with desired customized +parameters. For example, this can be used to point to a developer version of the +update server and allow the update_engine to schedule a periodic update form +that specific server. + +If you have some changes in the protocol that communicates with Omaha, but you +don’t have those changes in the update server, or you have some specific +payloads that do not exist on the production update server you can use +[Nebraska] to help with doing an update. + +## Note to Developers and Maintainers + +When changing the update engine source code be extra careful about these things: + +### Do NOT Break Backward Compatibility + +At each release cycle we should be able to generate full and delta payloads that +can correctly be applied to older devices that run older versions of the update +engine client. So for example, removing or not passing arguments in the metadata +proto file might break older clients. Or passing operations that are not +understood in older clients will break them. Whenever changing anything in the +payload generation process, ask yourself this question: Would it work on older +clients? If not, do I need to control it with minor versions or any other means. + +Especially regarding enterprise rollback, a newer updater client should be able +to accept an older update payload. Normally this happens using a full payload, +but care should be taken in order to not break this compatibility. + +### Think About The Future + +When creating a change in the update engine, think about 5 years from now: + +* How can the change be implemented that five years from now older clients + don’t break? +* How is it going to be maintained five years from now? +* How can it make it easier for future changes without breaking older clients + or incurring heavy maintenance costs? + +### Prefer Not To Implement Your Feature In The Updater Client +If a feature can be implemented from server side, Do NOT implement it in the +client updater. Because the client updater can be fragile at points and small +mistakes can have catastrophic consequences. For example, if a bug is introduced +in the updater client that causes it to crash right before checking for update +and we can't quite catch this bug early in the release process, then the +production devices which have already moved to the new buggy system, may no +longer receive automatic updates anymore. So, always think if the feature is +being implemented can be done form the server side (with potentially minimal +changes to the client updater)? Or can the feature be moved to another service +with minimal interface to the updater client. Answering these questions will pay +off greatly in the future. + +### Be Respectful Of Other Code Bases + +The current update engine code base is used in many projects like Android. We +sync the code base among these two projects frequently. Try to not break Android +or other systems that share the update engine code. Whenever landing a change, +always think about whether Android needs that change: + +* How will it affect Android? +* Can the change be moved to an interface and stubs implementations be + implemented so as not to affect Android? +* Can Chrome OS or Android specific code be guarded by macros? + +As a basic measure, if adding/removing/renaming code, make sure to change both +`build.gn` and `Android.bp`. Do not bring Chrome OS specific code (for example +other libraries that live in `system_api` or `dlcservice`) into the common code +of update_engine. Try to separate these concerns using best software engineering +practices. + +### Merging from Android (or other code bases) + +Chrome OS tracks the Android code as an [upstream branch]. To merge the Android +code to Chrome OS (or vice versa) just do a `git merge` of that branch into +Chrome OS, test it using whatever means and upload a merge commit. + +```bash +repo start merge-aosp +git merge --no-ff --strategy=recursive -X patience cros/upstream +repo upload --cbr --no-verify . +``` + +[Postinstall]: #postinstall +[update payload file specification]: #update-payload-file-specification +[OTA]: https://source.android.com/devices/tech/ota +[DLC]: https://chromium.googlesource.com/chromiumos/platform2/+/master/dlcservice +[`chromeos-setgoodkernel`]: https://chromium.googlesource.com/chromiumos/platform2/+/master/installer/chromeos-setgoodkernel +[D-Bus interface]: /dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml +[this repository]: / +[UpdateManager]: /update_manager/update_manager.cc +[update_manager]: /update_manager/ +[P2P update related code]: https://chromium.googlesource.com/chromiumos/platform2/+/master/p2p/ +[`cros_generate_update_payloads`]: https://chromium.googlesource.com/chromiumos/chromite/+/master/scripts/cros_generate_update_payload.py +[`chromite/lib/paygen`]: https://chromium.googlesource.com/chromiumos/chromite/+/master/lib/paygen/ +[DeltaArchiveManifest]: /update_metadata.proto#302 +[Signatures]: /update_metadata.proto#122 +[hard coded]: /update_engine.conf +[Manifest protobuf]: /update_metadata.proto +[update_payload]: /scripts/ +[Postinstall]: https://chromium.googlesource.com/chromiumos/platform2/+/master/installer/chromeos-postinst +[`update_engine` protobufs]: https://chromium.googlesource.com/chromiumos/platform2/+/master/system_api/dbus/update_engine/ +[Running unit tests similar to other platforms]: https://chromium.googlesource.com/chromiumos/docs/+/master/testing/running_unit_tests.md +[Nebraska]: https://chromium.googlesource.com/chromiumos/platform/dev-util/+/master/nebraska/ +[upstream branch]: https://chromium.googlesource.com/aosp/platform/system/update_engine/+/upstream +[`cros flash`]: https://chromium.googlesource.com/chromiumos/docs/+/master/cros_flash.md +[bsdiff]: https://android.googlesource.com/platform/external/bsdiff/+/master +[puffin]: https://android.googlesource.com/platform/external/puffin/+/master +[`update_engine_client`]: /update_engine_client.cc +[`brillo_update_payload`]: /scripts/brillo_update_payload +[`check_update_payload`]: /scripts/paycheck.py From 21030c18fc636a8887c76824ffb68ef41a3ebdd3 Mon Sep 17 00:00:00 2001 From: Tianjie Xu Date: Wed, 14 Aug 2019 13:00:23 -0700 Subject: [PATCH 092/624] Use the payload size as the base to calculate download overhead The download_overhead_percentage was calculated based on the bytes downloaded during the current update attempt. This may leads to large number when the update is interrupted; and the resumed update attempt only downloads a small portion of the payload. For example, for a 10M payload; if we download 9M, interrupt, and resume to download the remaining 1M; the current code will report the overhead as (10-1)/1*100% as 900. We should switch to use the payload size as it was the intention of the metrics. Test: run update engine, interrupt and check the metrics Change-Id: Ic3e1c0a0a4671ee6e7751d53fc40fd2dc8072d63 --- update_attempter_android.cc | 10 +++++++--- update_attempter_android_unittest.cc | 23 +++++++++++++++++++---- 2 files changed, 26 insertions(+), 7 deletions(-) diff --git a/update_attempter_android.cc b/update_attempter_android.cc index 97c53ec9..08f6c20f 100644 --- a/update_attempter_android.cc +++ b/update_attempter_android.cc @@ -750,11 +750,15 @@ void UpdateAttempterAndroid::CollectAndReportUpdateMetricsOnUpdateFinished( total_bytes_downloaded; int download_overhead_percentage = 0; - if (current_bytes_downloaded > 0) { + if (total_bytes_downloaded >= payload_size) { + CHECK_GT(payload_size, 0); download_overhead_percentage = - (total_bytes_downloaded - current_bytes_downloaded) * 100ull / - current_bytes_downloaded; + (total_bytes_downloaded - payload_size) * 100ull / payload_size; + } else { + LOG(WARNING) << "Downloaded bytes " << total_bytes_downloaded + << " is smaller than the payload size " << payload_size; } + metrics_reporter_->ReportSuccessfulUpdateMetrics( static_cast(attempt_number), 0, // update abandoned count diff --git a/update_attempter_android_unittest.cc b/update_attempter_android_unittest.cc index 3be0b7ea..721b7352 100644 --- a/update_attempter_android_unittest.cc +++ b/update_attempter_android_unittest.cc @@ -18,6 +18,7 @@ #include #include +#include #include #include @@ -57,6 +58,11 @@ class UpdateAttempterAndroidTest : public ::testing::Test { update_attempter_android_.status_ = status; } + void AddPayload(InstallPlan::Payload&& payload) { + update_attempter_android_.install_plan_.payloads.push_back( + std::move(payload)); + } + UpdateAttempterAndroid update_attempter_android_{ &daemon_state_, &prefs_, &boot_control_, &hardware_}; @@ -143,9 +149,13 @@ TEST_F(UpdateAttempterAndroidTest, ReportMetricsOnUpdateTerminated) { .Times(1); EXPECT_CALL(*metrics_reporter_, ReportSuccessfulUpdateMetrics( - 2, 0, _, _, _, _, duration, duration_uptime, 3, _)) + 2, 0, _, 50, _, _, duration, duration_uptime, 3, _)) .Times(1); + // Adds a payload of 50 bytes to the InstallPlan. + InstallPlan::Payload payload; + payload.size = 50; + AddPayload(std::move(payload)); SetUpdateStatus(UpdateStatus::UPDATE_AVAILABLE); update_attempter_android_.ProcessingDone(nullptr, ErrorCode::kSuccess); @@ -179,15 +189,20 @@ TEST_F(UpdateAttempterAndroidTest, ReportMetricsForBytesDownloaded) { _, _, _, - _, + 50, test_utils::DownloadSourceMatcher(total_bytes), - 125, + 80, _, _, _, _)) .Times(1); + // Adds a payload of 50 bytes to the InstallPlan. + InstallPlan::Payload payload; + payload.size = 50; + AddPayload(std::move(payload)); + // The first update fails after receiving 50 bytes in total. update_attempter_android_.BytesReceived(30, 50, 200); update_attempter_android_.ProcessingDone(nullptr, ErrorCode::kError); @@ -199,7 +214,7 @@ TEST_F(UpdateAttempterAndroidTest, ReportMetricsForBytesDownloaded) { metrics_utils::GetPersistedValue(kPrefsTotalBytesDownloaded, &prefs_)); // The second update succeeds after receiving 40 bytes, which leads to a - // overhead of 50 / 40 = 125%. + // overhead of (90 - 50) / 50 = 80%. update_attempter_android_.BytesReceived(40, 40, 50); update_attempter_android_.ProcessingDone(nullptr, ErrorCode::kSuccess); // Both prefs should be cleared. From fbb600fa0499c0bfdc9923814c430a6e629f4d6a Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Wed, 14 Aug 2019 19:52:30 -0700 Subject: [PATCH 093/624] update_engine: Break instantiating system policies into their own files This removes the use of __ANDROID__ for instantiating system policies (Chrome OS vs. Android) by defining an interface and implementing the interface in each individual code base. BUG=none TEST=FEATURES=test emerge update_engine Did not build the android side, but hopefully it is correct as the code change is minimal and problems can get caught in reviews. Change-Id: I3931a9bed9ee4a9edfba5d712b05d487af1af813 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/1755263 Tested-by: Amin Hassani Commit-Queue: Amin Hassani Reviewed-by: Sen Jiang --- update_manager/android_things_policy.cc | 6 ++++++ update_manager/chromeos_policy.cc | 6 ++++++ update_manager/policy.h | 6 ++++++ update_manager/update_manager.cc | 17 +++-------------- 4 files changed, 21 insertions(+), 14 deletions(-) diff --git a/update_manager/android_things_policy.cc b/update_manager/android_things_policy.cc index 26bd0ba9..a76ea482 100644 --- a/update_manager/android_things_policy.cc +++ b/update_manager/android_things_policy.cc @@ -16,6 +16,7 @@ #include "update_engine/update_manager/android_things_policy.h" +#include #include #include @@ -30,10 +31,15 @@ using base::Time; using chromeos_update_engine::ErrorCode; using std::string; +using std::unique_ptr; using std::vector; namespace chromeos_update_manager { +unique_ptr GetSystemPolicy() { + return std::make_unique(); +} + const NextUpdateCheckPolicyConstants AndroidThingsPolicy::kNextUpdateCheckPolicyConstants = { .timeout_initial_interval = 7 * 60, diff --git a/update_manager/chromeos_policy.cc b/update_manager/chromeos_policy.cc index 12d443d9..fab111a2 100644 --- a/update_manager/chromeos_policy.cc +++ b/update_manager/chromeos_policy.cc @@ -17,6 +17,7 @@ #include "update_engine/update_manager/chromeos_policy.h" #include +#include #include #include #include @@ -48,6 +49,7 @@ using std::get; using std::min; using std::set; using std::string; +using std::unique_ptr; using std::vector; namespace { @@ -185,6 +187,10 @@ bool IsUrlUsable(const string& url, bool http_allowed) { namespace chromeos_update_manager { +unique_ptr GetSystemPolicy() { + return std::make_unique(); +} + const NextUpdateCheckPolicyConstants ChromeOSPolicy::kNextUpdateCheckPolicyConstants = { .timeout_initial_interval = 7 * 60, diff --git a/update_manager/policy.h b/update_manager/policy.h index 9e7df10d..844a4d0a 100644 --- a/update_manager/policy.h +++ b/update_manager/policy.h @@ -17,6 +17,7 @@ #ifndef UPDATE_ENGINE_UPDATE_MANAGER_POLICY_H_ #define UPDATE_ENGINE_UPDATE_MANAGER_POLICY_H_ +#include #include #include #include @@ -310,6 +311,11 @@ class Policy { DISALLOW_COPY_AND_ASSIGN(Policy); }; +// Get system dependent (Chrome OS vs. Android) policy +// implementation. Implementations can be found in chromeos_policy.cc and +// android_things_policy.cc. +std::unique_ptr GetSystemPolicy(); + } // namespace chromeos_update_manager #endif // UPDATE_ENGINE_UPDATE_MANAGER_POLICY_H_ diff --git a/update_manager/update_manager.cc b/update_manager/update_manager.cc index 00694969..5664a5cd 100644 --- a/update_manager/update_manager.cc +++ b/update_manager/update_manager.cc @@ -15,12 +15,6 @@ // #include "update_engine/update_manager/update_manager.h" - -#ifdef __ANDROID__ -#include "update_engine/update_manager/android_things_policy.h" -#else -#include "update_engine/update_manager/chromeos_policy.h" -#endif // __ANDROID__ #include "update_engine/update_manager/state.h" namespace chromeos_update_manager { @@ -29,18 +23,13 @@ UpdateManager::UpdateManager(chromeos_update_engine::ClockInterface* clock, base::TimeDelta evaluation_timeout, base::TimeDelta expiration_timeout, State* state) - : default_policy_(clock), + : policy_(GetSystemPolicy()), + default_policy_(clock), state_(state), clock_(clock), evaluation_timeout_(evaluation_timeout), expiration_timeout_(expiration_timeout), - weak_ptr_factory_(this) { -#ifdef __ANDROID__ - policy_.reset(new AndroidThingsPolicy()); -#else - policy_.reset(new ChromeOSPolicy()); -#endif // __ANDROID__ -} + weak_ptr_factory_(this) {} UpdateManager::~UpdateManager() { // Remove pending main loop events associated with any of the outstanding From a2c8b92227ddf33fd934357d0aea39bbe36e6293 Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Wed, 14 Aug 2019 19:41:03 -0700 Subject: [PATCH 094/624] update_engine: Replace scoped_refptr with shared_ptr in update_manager It seems like scoped_refptr was a substitute for shared_ptr before chromium was on C++11: https://www.chromium.org/developers/smart-pointer-guidelines But that is not the case anymore as we are already on C++14. So just replace it in update_manager with shared_ptr. There is still another use case of it for keeping dbus connections but that can't easily be changed because brillo::DBusConnection is still using scoped_refptr. BUG=chromium:994048 TEST=FEATURES=test emerge update_engine Change-Id: I1fab0408399d678d2851731aea40fc02be459295 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/1755262 Reviewed-by: Jae Hoon Kim Reviewed-by: Sen Jiang Tested-by: Amin Hassani Commit-Queue: Amin Hassani --- update_manager/evaluation_context.h | 6 ++---- update_manager/evaluation_context_unittest.cc | 11 ++++++----- update_manager/policy_test_utils.cc | 3 ++- update_manager/policy_test_utils.h | 2 +- update_manager/update_manager-inl.h | 9 ++++----- update_manager/update_manager.cc | 6 +++++- update_manager/update_manager.h | 16 ++-------------- 7 files changed, 22 insertions(+), 31 deletions(-) diff --git a/update_manager/evaluation_context.h b/update_manager/evaluation_context.h index c68c4308..5c5b013c 100644 --- a/update_manager/evaluation_context.h +++ b/update_manager/evaluation_context.h @@ -23,7 +23,6 @@ #include #include -#include #include #include #include @@ -46,7 +45,7 @@ namespace chromeos_update_manager { // // Example: // -// scoped_refptr ec = new EvaluationContext(...); +// auto ec = std::make_shared(...); // // ... // // The following call to ResetEvaluation() is optional. Use it to reset the @@ -62,8 +61,7 @@ namespace chromeos_update_manager { // // If the provided |closure| wants to re-evaluate the policy, it should // // call ec->ResetEvaluation() to start a new evaluation. // -class EvaluationContext : public base::RefCounted, - private BaseVariable::ObserverInterface { +class EvaluationContext : private BaseVariable::ObserverInterface { public: EvaluationContext( chromeos_update_engine::ClockInterface* clock, diff --git a/update_manager/evaluation_context_unittest.cc b/update_manager/evaluation_context_unittest.cc index 151b0b55..a50defd3 100644 --- a/update_manager/evaluation_context_unittest.cc +++ b/update_manager/evaluation_context_unittest.cc @@ -39,6 +39,7 @@ using brillo::MessageLoop; using brillo::MessageLoopRunMaxIterations; using brillo::MessageLoopRunUntil; using chromeos_update_engine::FakeClock; +using std::shared_ptr; using std::string; using std::unique_ptr; using testing::_; @@ -59,14 +60,14 @@ bool GetBoolean(bool* value) { } template -void ReadVar(scoped_refptr ec, Variable* var) { +void ReadVar(shared_ptr ec, Variable* var) { ec->GetValue(var); } // Runs |evaluation|; if the value pointed by |count_p| is greater than zero, // decrement it and schedule a reevaluation; otherwise, writes true to |done_p|. void EvaluateRepeatedly(Closure evaluation, - scoped_refptr ec, + shared_ptr ec, int* count_p, bool* done_p) { evaluation.Run(); @@ -92,11 +93,11 @@ class UmEvaluationContextTest : public ::testing::Test { fake_clock_.SetMonotonicTime(Time::FromTimeT(1240428300)); // Mar 2, 2006 1:23:45 UTC. fake_clock_.SetWallclockTime(Time::FromTimeT(1141262625)); - eval_ctx_ = new EvaluationContext( + eval_ctx_.reset(new EvaluationContext( &fake_clock_, default_timeout_, default_timeout_, - unique_ptr>(nullptr)); + unique_ptr>(nullptr))); } void TearDown() override { @@ -126,7 +127,7 @@ class UmEvaluationContextTest : public ::testing::Test { brillo::FakeMessageLoop loop_{nullptr}; FakeClock fake_clock_; - scoped_refptr eval_ctx_; + shared_ptr eval_ctx_; // FakeVariables used for testing the EvaluationContext. These are required // here to prevent them from going away *before* the EvaluationContext under diff --git a/update_manager/policy_test_utils.cc b/update_manager/policy_test_utils.cc index 5491e007..653592ac 100644 --- a/update_manager/policy_test_utils.cc +++ b/update_manager/policy_test_utils.cc @@ -34,7 +34,8 @@ namespace chromeos_update_manager { void UmPolicyTestBase::SetUp() { loop_.SetAsCurrent(); SetUpDefaultClock(); - eval_ctx_ = new EvaluationContext(&fake_clock_, TimeDelta::FromSeconds(5)); + eval_ctx_.reset( + new EvaluationContext(&fake_clock_, TimeDelta::FromSeconds(5))); SetUpDefaultState(); } diff --git a/update_manager/policy_test_utils.h b/update_manager/policy_test_utils.h index eb5758f0..cd94907d 100644 --- a/update_manager/policy_test_utils.h +++ b/update_manager/policy_test_utils.h @@ -93,7 +93,7 @@ class UmPolicyTestBase : public ::testing::Test { brillo::FakeMessageLoop loop_{nullptr}; chromeos_update_engine::FakeClock fake_clock_; FakeState fake_state_; - scoped_refptr eval_ctx_; + std::shared_ptr eval_ctx_; std::unique_ptr policy_; }; diff --git a/update_manager/update_manager-inl.h b/update_manager/update_manager-inl.h index e9dee3f2..a1d172d5 100644 --- a/update_manager/update_manager-inl.h +++ b/update_manager/update_manager-inl.h @@ -78,7 +78,7 @@ EvalStatus UpdateManager::EvaluatePolicy( template void UpdateManager::OnPolicyReadyToEvaluate( - scoped_refptr ec, + std::shared_ptr ec, base::Callback callback, EvalStatus (Policy::*policy_method)( EvaluationContext*, State*, std::string*, R*, Args...) const, @@ -119,8 +119,7 @@ EvalStatus UpdateManager::PolicyRequest( EvaluationContext*, State*, std::string*, R*, ExpectedArgs...) const, R* result, ActualArgs... args) { - scoped_refptr ec( - new EvaluationContext(clock_, evaluation_timeout_)); + auto ec = std::make_shared(clock_, evaluation_timeout_); // A PolicyRequest always consists on a single evaluation on a new // EvaluationContext. // IMPORTANT: To ensure that ActualArgs can be converted to ExpectedArgs, we @@ -141,7 +140,7 @@ void UpdateManager::AsyncPolicyRequest( EvalStatus (Policy::*policy_method)( EvaluationContext*, State*, std::string*, R*, ExpectedArgs...) const, ActualArgs... args) { - scoped_refptr ec = new EvaluationContext( + auto ec = std::make_shared( clock_, evaluation_timeout_, expiration_timeout_, @@ -149,7 +148,7 @@ void UpdateManager::AsyncPolicyRequest( new base::Callback( base::Bind(&UpdateManager::UnregisterEvalContext, weak_ptr_factory_.GetWeakPtr())))); - if (!ec_repo_.insert(ec.get()).second) { + if (!ec_repo_.insert(ec).second) { LOG(ERROR) << "Failed to register evaluation context; this is a bug."; } diff --git a/update_manager/update_manager.cc b/update_manager/update_manager.cc index 5664a5cd..2974d7d2 100644 --- a/update_manager/update_manager.cc +++ b/update_manager/update_manager.cc @@ -47,7 +47,11 @@ void UpdateManager::AsyncPolicyRequestUpdateCheckAllowed( } void UpdateManager::UnregisterEvalContext(EvaluationContext* ec) { - if (!ec_repo_.erase(ec)) { + // Since |ec_repo_|'s compare function is based on the value of the raw + // pointer |ec|, we can just create a |shared_ptr| here and pass it along to + // be erased. + if (!ec_repo_.erase( + std::shared_ptr(ec, [](EvaluationContext*) {}))) { LOG(ERROR) << "Unregistering an unknown evaluation context, this is a bug."; } } diff --git a/update_manager/update_manager.h b/update_manager/update_manager.h index 732175fe..8ab61d0f 100644 --- a/update_manager/update_manager.h +++ b/update_manager/update_manager.h @@ -22,7 +22,6 @@ #include #include -#include #include #include "update_engine/common/clock_interface.h" @@ -33,15 +32,6 @@ namespace chromeos_update_manager { -// Comparator for scoped_refptr objects. -template -struct ScopedRefPtrLess { - bool operator()(const scoped_refptr& first, - const scoped_refptr& second) const { - return first.get() < second.get(); - } -}; - // Please do not move this class into a new file for simplicity. // This pure virtual class is purely created for purpose of testing. The reason // was that |UpdateManager|'s member functions are templatized, which does not @@ -152,7 +142,7 @@ class UpdateManager : public SpecializedPolicyRequestInterface { // the evaluation will be re-scheduled to be called later. template void OnPolicyReadyToEvaluate( - scoped_refptr ec, + std::shared_ptr ec, base::Callback callback, EvalStatus (Policy::*policy_method)( EvaluationContext*, State*, std::string*, R*, Args...) const, @@ -186,9 +176,7 @@ class UpdateManager : public SpecializedPolicyRequestInterface { // destructed; alternatively, when the UpdateManager instance is destroyed, it // will remove all pending events associated with all outstanding contexts // (which should, in turn, trigger their destruction). - std::set, - ScopedRefPtrLess> - ec_repo_; + std::set> ec_repo_; base::WeakPtrFactory weak_ptr_factory_; From 348b9f6c83d82893caf82849f06397cdcf5768cc Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Wed, 28 Aug 2019 23:19:48 -0700 Subject: [PATCH 095/624] update_engine: Add kimjae@ as an owner Also xiaochu@ has left the team. This removes him so he doesn't get assigned to bugs/CLs, etc. BUG=chromium:995893 TEST=None Change-Id: Iafcb319f6471b0c462fc25741656c34cd2e2808b Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/1775819 Tested-by: Amin Hassani Auto-Submit: Amin Hassani Reviewed-by: Nicolas Norvez Commit-Queue: Nicolas Norvez --- OWNERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/OWNERS b/OWNERS index 6709d7ab..4e476058 100644 --- a/OWNERS +++ b/OWNERS @@ -7,7 +7,7 @@ senj@google.com # Chromium OS maintainers: benchan@google.com ahassani@google.com -xiaochu@google.com +kimjae@google.com # Chromium OS only: # COMPONENT: Internals>Installer From 413d572453017abc52022290f757cc86c55720a7 Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Tue, 23 Jul 2019 14:21:09 -0700 Subject: [PATCH 096/624] DynamicPartitionsControl: Add Virtual A/B feature flag. Test: unittest Bug: 138816109 Change-Id: I7ae65ba0bf36a6ca5085bc4ec2c46245288b4703 --- boot_control_android_unittest.cc | 2 ++ dynamic_partition_control_android.cc | 6 ++++++ dynamic_partition_control_android.h | 1 + dynamic_partition_control_android_unittest.cc | 2 ++ dynamic_partition_control_interface.h | 3 +++ mock_dynamic_partition_control.h | 2 ++ 6 files changed, 16 insertions(+) diff --git a/boot_control_android_unittest.cc b/boot_control_android_unittest.cc index 6f02a070..3b921912 100644 --- a/boot_control_android_unittest.cc +++ b/boot_control_android_unittest.cc @@ -62,6 +62,8 @@ class BootControlAndroidTest : public ::testing::Test { ON_CALL(dynamicControl(), GetDynamicPartitionsFeatureFlag()) .WillByDefault(Return(FeatureFlag(FeatureFlag::Value::LAUNCH))); + ON_CALL(dynamicControl(), GetVirtualAbFeatureFlag()) + .WillByDefault(Return(FeatureFlag(FeatureFlag::Value::NONE))); ON_CALL(dynamicControl(), DeviceExists(_)).WillByDefault(Return(true)); ON_CALL(dynamicControl(), GetDeviceDir(_)) .WillByDefault(Invoke([](auto path) { diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc index 5a2ccb1d..b9732322 100644 --- a/dynamic_partition_control_android.cc +++ b/dynamic_partition_control_android.cc @@ -53,6 +53,8 @@ using PartitionMetadata = BootControlInterface::PartitionMetadata; constexpr char kUseDynamicPartitions[] = "ro.boot.dynamic_partitions"; constexpr char kRetrfoitDynamicPartitions[] = "ro.boot.dynamic_partitions_retrofit"; +constexpr char kVirtualAbEnabled[] = "ro.virtual_ab.enabled"; +constexpr char kVirtualAbRetrofit[] = "ro.virtual_ab.retrofit"; constexpr uint64_t kMapTimeoutMillis = 1000; DynamicPartitionControlAndroid::~DynamicPartitionControlAndroid() { @@ -81,6 +83,10 @@ FeatureFlag DynamicPartitionControlAndroid::GetDynamicPartitionsFeatureFlag() { return GetFeatureFlag(kUseDynamicPartitions, kRetrfoitDynamicPartitions); } +FeatureFlag DynamicPartitionControlAndroid::GetVirtualAbFeatureFlag() { + return GetFeatureFlag(kVirtualAbEnabled, kVirtualAbRetrofit); +} + bool DynamicPartitionControlAndroid::MapPartitionInternal( const std::string& super_device, const std::string& target_partition_name, diff --git a/dynamic_partition_control_android.h b/dynamic_partition_control_android.h index 062a2d1a..d743e6e5 100644 --- a/dynamic_partition_control_android.h +++ b/dynamic_partition_control_android.h @@ -30,6 +30,7 @@ class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { DynamicPartitionControlAndroid() = default; ~DynamicPartitionControlAndroid(); FeatureFlag GetDynamicPartitionsFeatureFlag() override; + FeatureFlag GetVirtualAbFeatureFlag() override; bool MapPartitionOnDeviceMapper(const std::string& super_device, const std::string& target_partition_name, uint32_t slot, diff --git a/dynamic_partition_control_android_unittest.cc b/dynamic_partition_control_android_unittest.cc index 5b3dfe31..1a3f6647 100644 --- a/dynamic_partition_control_android_unittest.cc +++ b/dynamic_partition_control_android_unittest.cc @@ -44,6 +44,8 @@ class DynamicPartitionControlAndroidTest : public ::testing::Test { ON_CALL(dynamicControl(), GetDynamicPartitionsFeatureFlag()) .WillByDefault(Return(FeatureFlag(FeatureFlag::Value::LAUNCH))); + ON_CALL(dynamicControl(), GetVirtualAbFeatureFlag()) + .WillByDefault(Return(FeatureFlag(FeatureFlag::Value::NONE))); ON_CALL(dynamicControl(), GetDeviceDir(_)) .WillByDefault(Invoke([](auto path) { diff --git a/dynamic_partition_control_interface.h b/dynamic_partition_control_interface.h index b3ce4ea6..9c7b8d07 100644 --- a/dynamic_partition_control_interface.h +++ b/dynamic_partition_control_interface.h @@ -50,6 +50,9 @@ class DynamicPartitionControlInterface { // NONE iff dynamic partitions is disabled on this device. virtual FeatureFlag GetDynamicPartitionsFeatureFlag() = 0; + // Return the feature flags of Virtual A/B on this device. + virtual FeatureFlag GetVirtualAbFeatureFlag() = 0; + // Map logical partition on device-mapper. // |super_device| is the device path of the physical partition ("super"). // |target_partition_name| is the identifier used in metadata; for example, diff --git a/mock_dynamic_partition_control.h b/mock_dynamic_partition_control.h index 26fc2469..aab3c4d8 100644 --- a/mock_dynamic_partition_control.h +++ b/mock_dynamic_partition_control.h @@ -49,6 +49,7 @@ class MockDynamicPartitionControl : public DynamicPartitionControlInterface { uint32_t, const BootControlInterface::PartitionMetadata&)); MOCK_METHOD1(GetSuperPartitionName, std::string(uint32_t)); + MOCK_METHOD0(GetVirtualAbFeatureFlag, FeatureFlag()); }; class MockDynamicPartitionControlAndroid @@ -75,6 +76,7 @@ class MockDynamicPartitionControlAndroid MOCK_METHOD1(GetDeviceDir, bool(std::string*)); MOCK_METHOD0(GetDynamicPartitionsFeatureFlag, FeatureFlag()); MOCK_METHOD1(GetSuperPartitionName, std::string(uint32_t)); + MOCK_METHOD0(GetVirtualAbFeatureFlag, FeatureFlag()); }; } // namespace chromeos_update_engine From 13bdba8cdca020c6731907b37b17c623091ba0e5 Mon Sep 17 00:00:00 2001 From: Rick Yiu Date: Wed, 21 Aug 2019 12:52:06 +0800 Subject: [PATCH 097/624] Put update_engine to blkio background group To lower the IO priority of update_engine. It only works on devices that support blkio cgroup. Bug: 140151970 Test: update_engine put to blkio background group correctly Change-Id: I8b688c7625c64366b066603996572413ba6451ee --- update_engine.rc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/update_engine.rc b/update_engine.rc index 90ca4c60..b9f80fc1 100644 --- a/update_engine.rc +++ b/update_engine.rc @@ -2,7 +2,7 @@ service update_engine /system/bin/update_engine --logtostderr --logtofile --fore class late_start user root group root system wakelock inet cache media_rw - writepid /dev/cpuset/system-background/tasks + writepid /dev/cpuset/system-background/tasks /dev/blkio/background/tasks disabled on property:ro.boot.slot_suffix=* From 0199b7544974a4d61ddb52a0591d8f3e2d9757b9 Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Wed, 28 Aug 2019 23:56:16 -0700 Subject: [PATCH 098/624] update_engine: Add more methods of initiating an update to README.md BUG=None TEST=docs/preview_markdown README.md Change-Id: I0c63d16cb0143882f5996ffe55a6fc5295b6a35b Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/1775820 Tested-by: Amin Hassani Reviewed-by: Nicolas Norvez Commit-Queue: Amin Hassani --- README.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/README.md b/README.md index 7071cf10..0c769267 100644 --- a/README.md +++ b/README.md @@ -512,6 +512,14 @@ There are different methods to initiate an update: can do. * Call `autest` in the crosh. Mainly used by the QA team and is not intended to be used by any other team. +* Use [`cros flash`]. It internally uses the update_engine to flash a device + with a given image. +* Run one of many auto update autotests. +* Start a [Dev Server] on your host machine and send a specific HTTP request + (look at `cros_au` API in the Dev Server code), that has the information + like the IP address of your Chromebook and where the update payloads are + located to the Dev Server to start an update on your device (**Warning:** + complicated to do, not recommended). `update_engine_client` is a client application that can help initiate an update or get more information about the status of the updater client. It has several @@ -631,3 +639,4 @@ repo upload --cbr --no-verify . [`update_engine_client`]: /update_engine_client.cc [`brillo_update_payload`]: /scripts/brillo_update_payload [`check_update_payload`]: /scripts/paycheck.py +[Dev Server]: https://chromium.googlesource.com/chromiumos/chromite/+/master/docs/devserver.md From 9be122effd4022e14cc158191e11ca230a605544 Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Thu, 29 Aug 2019 09:20:12 -0700 Subject: [PATCH 099/624] update_engine: Pass is_enterprise_rollback in the StatusResult Currently Chrome uses some sort of version comparison to define whether an update is a rollback or not. But that is not very robust. The correct way is the return this value in the StatusResult. We already have this value as a placeholder in the update_engine.proto. So this is good to go. BUG=chromium:864672 TEST=FEATUERS=test emerge-reef update_engine Change-Id: I8bd3af0d94abd656dc00a9e67550ea6c6913de91 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/1775116 Tested-by: Amin Hassani Commit-Queue: Amin Hassani Reviewed-by: Jae Hoon Kim --- client_library/client_dbus.cc | 1 + .../include/update_engine/update_status.h | 3 +++ dbus_service.cc | 1 + update_attempter.cc | 2 ++ update_attempter.h | 3 +++ update_attempter_unittest.cc | 24 +++++++++++++++++++ update_status_utils.cc | 3 +++ update_status_utils_unittest.cc | 14 ++--------- 8 files changed, 39 insertions(+), 12 deletions(-) diff --git a/client_library/client_dbus.cc b/client_library/client_dbus.cc index d0f7f81b..3c23de4c 100644 --- a/client_library/client_dbus.cc +++ b/client_library/client_dbus.cc @@ -54,6 +54,7 @@ void ConvertToUpdateEngineStatus(const StatusResult& status, out_status->new_version = status.new_version(); out_status->new_size_bytes = status.new_size(); out_status->status = static_cast(status.current_operation()); + out_status->is_enterprise_rollback = status.is_enterprise_rollback(); out_status->is_install = status.is_install(); } } // namespace diff --git a/client_library/include/update_engine/update_status.h b/client_library/include/update_engine/update_status.h index bc14e675..c877df6d 100644 --- a/client_library/include/update_engine/update_status.h +++ b/client_library/include/update_engine/update_status.h @@ -83,6 +83,9 @@ struct UpdateEngineStatus { uint64_t new_size_bytes; // New product version. std::string new_version; + // Wether the update is an enterprise rollback. The value is valid only if the + // current operation is passed CHECKING_FOR_UPDATE. + bool is_enterprise_rollback; // Indication of install for DLC(s). bool is_install; }; diff --git a/dbus_service.cc b/dbus_service.cc index 168265d8..c4de3e72 100644 --- a/dbus_service.cc +++ b/dbus_service.cc @@ -47,6 +47,7 @@ void ConvertToStatusResult(const UpdateEngineStatus& ue_status, out_status->set_current_operation(static_cast(ue_status.status)); out_status->set_new_version(ue_status.new_version); out_status->set_new_size(ue_status.new_size_bytes); + out_status->set_is_enterprise_rollback(ue_status.is_enterprise_rollback); out_status->set_is_install(ue_status.is_install); } } // namespace diff --git a/update_attempter.cc b/update_attempter.cc index 71463b5c..780ba7bf 100644 --- a/update_attempter.cc +++ b/update_attempter.cc @@ -1382,6 +1382,8 @@ bool UpdateAttempter::GetStatus(UpdateEngineStatus* out_status) { out_status->progress = download_progress_; out_status->new_size_bytes = new_payload_size_; out_status->new_version = new_version_; + out_status->is_enterprise_rollback = + install_plan_ && install_plan_->is_rollback; out_status->is_install = is_install_; return true; } diff --git a/update_attempter.h b/update_attempter.h index 3db40978..51b672d0 100644 --- a/update_attempter.h +++ b/update_attempter.h @@ -257,6 +257,9 @@ class UpdateAttempter : public ActionProcessorDelegate, FRIEND_TEST(UpdateAttempterTest, DisableDeltaUpdateIfNeededTest); FRIEND_TEST(UpdateAttempterTest, DownloadProgressAccumulationTest); FRIEND_TEST(UpdateAttempterTest, InstallSetsStatusIdle); + FRIEND_TEST(UpdateAttempterTest, IsEnterpriseRollbackInGetStatusDefault); + FRIEND_TEST(UpdateAttempterTest, IsEnterpriseRollbackInGetStatusTrue); + FRIEND_TEST(UpdateAttempterTest, IsEnterpriseRollbackInGetStatusFalse); FRIEND_TEST(UpdateAttempterTest, MarkDeltaUpdateFailureTest); FRIEND_TEST(UpdateAttempterTest, PingOmahaTest); FRIEND_TEST(UpdateAttempterTest, ProcessingDoneInstallError); diff --git a/update_attempter_unittest.cc b/update_attempter_unittest.cc index 4b9bc750..0e743535 100644 --- a/update_attempter_unittest.cc +++ b/update_attempter_unittest.cc @@ -2220,4 +2220,28 @@ TEST_F(UpdateAttempterTest, OnUpdatesScheduledSucceeded) { TestOnUpdateScheduled(); } +TEST_F(UpdateAttempterTest, IsEnterpriseRollbackInGetStatusDefault) { + UpdateEngineStatus status; + attempter_.GetStatus(&status); + EXPECT_FALSE(status.is_enterprise_rollback); +} + +TEST_F(UpdateAttempterTest, IsEnterpriseRollbackInGetStatusFalse) { + attempter_.install_plan_.reset(new InstallPlan); + attempter_.install_plan_->is_rollback = false; + + UpdateEngineStatus status; + attempter_.GetStatus(&status); + EXPECT_FALSE(status.is_enterprise_rollback); +} + +TEST_F(UpdateAttempterTest, IsEnterpriseRollbackInGetStatusTrue) { + attempter_.install_plan_.reset(new InstallPlan); + attempter_.install_plan_->is_rollback = true; + + UpdateEngineStatus status; + attempter_.GetStatus(&status); + EXPECT_TRUE(status.is_enterprise_rollback); +} + } // namespace chromeos_update_engine diff --git a/update_status_utils.cc b/update_status_utils.cc index 07583148..639dc8be 100644 --- a/update_status_utils.cc +++ b/update_status_utils.cc @@ -33,6 +33,7 @@ namespace { // exactly these matches. const char kCurrentOp[] = "CURRENT_OP"; const char kIsInstall[] = "IS_INSTALL"; +const char kIsEnterpriseRollback[] = "IS_ENTERPRISE_ROLLBACK"; const char kLastCheckedTime[] = "LAST_CHECKED_TIME"; const char kNewSize[] = "NEW_SIZE"; const char kNewVersion[] = "NEW_VERSION"; @@ -88,6 +89,8 @@ string UpdateEngineStatusToString(const UpdateEngineStatus& status) { #endif key_value_store.SetString(kCurrentOp, UpdateStatusToString(status.status)); key_value_store.SetString(kNewVersion, status.new_version); + key_value_store.SetBoolean(kIsEnterpriseRollback, + status.is_enterprise_rollback); key_value_store.SetBoolean(kIsInstall, status.is_install); return key_value_store.SaveToString(); diff --git a/update_status_utils_unittest.cc b/update_status_utils_unittest.cc index dbd80d7a..3af30c78 100644 --- a/update_status_utils_unittest.cc +++ b/update_status_utils_unittest.cc @@ -24,22 +24,11 @@ using std::string; namespace chromeos_update_engine { -TEST(UpdateStatusUtilsTest, UpdateEngineStatusToStringDefaultTest) { - string print = - R"(CURRENT_OP=UPDATE_STATUS_IDLE -IS_INSTALL=false -LAST_CHECKED_TIME=0 -NEW_SIZE=0 -NEW_VERSION= -PROGRESS=0.0 -)"; - EXPECT_EQ(print, UpdateEngineStatusToString({})); -} - TEST(UpdateStatusUtilsTest, UpdateEngineStatusToStringTest) { update_engine::UpdateEngineStatus update_engine_status = { .status = update_engine::UpdateStatus::CHECKING_FOR_UPDATE, .is_install = true, + .is_enterprise_rollback = true, .last_checked_time = 156000000, .new_size_bytes = 888, .new_version = "12345.0.0", @@ -47,6 +36,7 @@ TEST(UpdateStatusUtilsTest, UpdateEngineStatusToStringTest) { }; string print = R"(CURRENT_OP=UPDATE_STATUS_CHECKING_FOR_UPDATE +IS_ENTERPRISE_ROLLBACK=true IS_INSTALL=true LAST_CHECKED_TIME=156000000 NEW_SIZE=888 From a1f4a7dcaa921fcb0ab395214a9558a62ca083f2 Mon Sep 17 00:00:00 2001 From: Jae Hoon Kim Date: Tue, 3 Sep 2019 18:12:33 +0000 Subject: [PATCH 100/624] Revert "update_engine: Support milestones to EOL from Omaha" This reverts commit 893cae4b0e5141bcaf9f56b1775e681c8f523630. Reason for revert: use of EOL date and EOL notification date instead milestones to EOL. Original change's description: > update_engine: Support milestones to EOL from Omaha > > Initiative to show EOL message on Chrome OS devices require that > update_engine parses the fields within Omaha response that pertain to the > new milestones to EOL field. The Omaha response will include a new > field called "milestones_to_eol" which will be an integer value > string. > > The job of update_engine when it comes to milestones to EOL from Omaha > is to merely forward. No checks and no modifications of fields are > done within update_engine besides being able to convert the milestones > to EOL from a string to integer. > > BUG=chromium:994999 > TEST=FEATURES="test" emerge-$BOARD update_engine update_engine-client > TEST=cros deploy $IP update_engine update_engine-client > TEST=test_that -b $BOARD $IP autoupdate_EOL # from Cq-Depend > TEST=test_that -b $BOARD $IP autoupdate_EOL.milestones # from Cq-Depend > > Cq-Depend:chromium:1761371 > Change-Id: I268e4c8e641b17d6a727a50f53285cc97c76eb22 > Reviewed-on: https://chromium-review.googlesource.com/1759285 > Tested-by: Jae Hoon Kim > Commit-Ready: ChromeOS CL Exonerator Bot > Legacy-Commit-Queue: Commit Bot > Reviewed-by: Nicolas Norvez > Reviewed-by: Amin Hassani TBR=maybelle@chromium.org,norvez@chromium.org,ahassani@chromium.org,abutzier@chromium.org,chromiumos-cl-exonerator@appspot.gserviceaccount.com,kimjae@chromium.org BUG=chromium:994999 TEST=none Cq-Depend:chromium:1782971 Change-Id: I42e75e22948b3653500d355027dc3312015c9ebf Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/1782970 Tested-by: Jae Hoon Kim Commit-Queue: Jae Hoon Kim Legacy-Commit-Queue: Commit Bot Reviewed-by: Amin Hassani --- client_library/client_dbus.cc | 5 +- client_library/client_dbus.h | 3 +- client_library/include/update_engine/client.h | 6 +- common/constants.cc | 1 - common/constants.h | 1 - common_service.cc | 19 +---- common_service.h | 9 +-- common_service_unittest.cc | 81 ++----------------- ...rg.chromium.UpdateEngineInterface.dbus-xml | 1 - dbus_service.cc | 5 +- dbus_service.h | 9 +-- omaha_request_action.cc | 28 ++----- omaha_request_action.h | 6 +- omaha_request_action_unittest.cc | 59 -------------- omaha_utils.cc | 23 ++---- omaha_utils.h | 17 ---- omaha_utils_unittest.cc | 8 -- update_engine_client.cc | 17 +--- 18 files changed, 37 insertions(+), 261 deletions(-) diff --git a/client_library/client_dbus.cc b/client_library/client_dbus.cc index 3c23de4c..e6aba923 100644 --- a/client_library/client_dbus.cc +++ b/client_library/client_dbus.cc @@ -271,9 +271,8 @@ bool DBusUpdateEngineClient::GetLastAttemptError( return proxy_->GetLastAttemptError(last_attempt_error, nullptr); } -bool DBusUpdateEngineClient::GetEolStatus(int32_t* eol_status, - int32_t* milestones_to_eol) const { - return proxy_->GetEolStatus(eol_status, milestones_to_eol, nullptr); +bool DBusUpdateEngineClient::GetEolStatus(int32_t* eol_status) const { + return proxy_->GetEolStatus(eol_status, nullptr); } } // namespace internal diff --git a/client_library/client_dbus.h b/client_library/client_dbus.h index 3f782e91..c9631cf7 100644 --- a/client_library/client_dbus.h +++ b/client_library/client_dbus.h @@ -85,8 +85,7 @@ class DBusUpdateEngineClient : public UpdateEngineClient { bool GetLastAttemptError(int32_t* last_attempt_error) const override; - bool GetEolStatus(int32_t* eol_status, - int32_t* milestones_to_eol) const override; + bool GetEolStatus(int32_t* eol_status) const override; private: void DBusStatusHandlersRegistered(const std::string& interface, diff --git a/client_library/include/update_engine/client.h b/client_library/include/update_engine/client.h index 65a32675..89f36af6 100644 --- a/client_library/include/update_engine/client.h +++ b/client_library/include/update_engine/client.h @@ -135,10 +135,8 @@ class UpdateEngineClient { // Get the last UpdateAttempt error code. virtual bool GetLastAttemptError(int32_t* last_attempt_error) const = 0; - // Get the current end-of-life status code and milestones to end-of-life. - // See |EolStatus| enum and |MilestonesToEol| enum for details. - virtual bool GetEolStatus(int32_t* eol_status, - int32_t* milestones_to_eol) const = 0; + // Get the current end-of-life status code. See EolStatus enum for details. + virtual bool GetEolStatus(int32_t* eol_status) const = 0; protected: // Use CreateInstance(). diff --git a/common/constants.cc b/common/constants.cc index 6f37e16e..87bdf911 100644 --- a/common/constants.cc +++ b/common/constants.cc @@ -56,7 +56,6 @@ const char kPrefsOmahaCohort[] = "omaha-cohort"; const char kPrefsOmahaCohortHint[] = "omaha-cohort-hint"; const char kPrefsOmahaCohortName[] = "omaha-cohort-name"; const char kPrefsOmahaEolStatus[] = "omaha-eol-status"; -const char kPrefsOmahaMilestonesToEol[] = "omaha-milestones-to-eol"; const char kPrefsP2PEnabled[] = "p2p-enabled"; const char kPrefsP2PFirstAttemptTimestamp[] = "p2p-first-attempt-timestamp"; const char kPrefsP2PNumAttempts[] = "p2p-num-attempts"; diff --git a/common/constants.h b/common/constants.h index 6034dbd6..d95a56a1 100644 --- a/common/constants.h +++ b/common/constants.h @@ -57,7 +57,6 @@ extern const char kPrefsOmahaCohort[]; extern const char kPrefsOmahaCohortHint[]; extern const char kPrefsOmahaCohortName[]; extern const char kPrefsOmahaEolStatus[]; -extern const char kPrefsOmahaMilestonesToEol[]; extern const char kPrefsP2PEnabled[]; extern const char kPrefsP2PFirstAttemptTimestamp[]; extern const char kPrefsP2PNumAttempts[]; diff --git a/common_service.cc b/common_service.cc index 466007f5..0d5ee6dc 100644 --- a/common_service.cc +++ b/common_service.cc @@ -413,31 +413,18 @@ bool UpdateEngineService::GetLastAttemptError(ErrorPtr* /* error */, } bool UpdateEngineService::GetEolStatus(ErrorPtr* error, - int32_t* out_eol_status, - int32_t* out_milestones_to_eol) { + int32_t* out_eol_status) { PrefsInterface* prefs = system_state_->prefs(); - // Set EOL. string str_eol_status; if (prefs->Exists(kPrefsOmahaEolStatus) && !prefs->GetString(kPrefsOmahaEolStatus, &str_eol_status)) { LogAndSetError(error, FROM_HERE, "Error getting the end-of-life status."); return false; } - // |StringToEolStatus()| will return |kSupported| for invalid values. - *out_eol_status = static_cast(StringToEolStatus(str_eol_status)); - - // Set milestones to EOL. - string str_milestones_to_eol; - if (prefs->Exists(kPrefsOmahaMilestonesToEol) && - !prefs->GetString(kPrefsOmahaMilestonesToEol, &str_milestones_to_eol)) { - LogAndSetError(error, FROM_HERE, "Error getting the milestones to EOL."); - return false; - } - // |StringToMilestonesToEol()| will return |kMilestonesToEolNone| for invalid - // values. - *out_milestones_to_eol = StringToMilestonesToEol(str_milestones_to_eol); + // StringToEolStatus will return kSupported for invalid values. + *out_eol_status = static_cast(StringToEolStatus(str_eol_status)); return true; } diff --git a/common_service.h b/common_service.h index 5244e99f..f93855d9 100644 --- a/common_service.h +++ b/common_service.h @@ -153,12 +153,9 @@ class UpdateEngineService { bool GetLastAttemptError(brillo::ErrorPtr* error, int32_t* out_last_attempt_error); - // Returns the current EOL status of the device and the milestones to - // EOL if marked EOL. The values are updated on every update check and - // persisted on disk across reboots. - bool GetEolStatus(brillo::ErrorPtr* error, - int32_t* out_eol_status, - int32_t* out_milestones_to_eol); + // Returns the current end-of-life status of the device. This value is updated + // on every update check and persisted on disk across reboots. + bool GetEolStatus(brillo::ErrorPtr* error, int32_t* out_eol_status); private: SystemState* system_state_; diff --git a/common_service_unittest.cc b/common_service_unittest.cc index 68b24684..65202a06 100644 --- a/common_service_unittest.cc +++ b/common_service_unittest.cc @@ -169,90 +169,19 @@ TEST_F(UpdateEngineServiceTest, ResetStatusFails) { UpdateEngineService::kErrorFailed)); } -TEST_F(UpdateEngineServiceTest, GetEolStatusTestWithMilestonesDefault) { +TEST_F(UpdateEngineServiceTest, GetEolStatusTest) { FakePrefs fake_prefs; fake_system_state_.set_prefs(&fake_prefs); - // The default value for EOL be |kSupported| and milestone - // |kMilestonesToEolNone|. + // The default value should be "supported". int32_t eol_status = static_cast(EolStatus::kEol); - MilestonesToEol milestones_to_eol = kMilestonesToEolNone; - EXPECT_TRUE( - common_service_.GetEolStatus(&error_, &eol_status, &milestones_to_eol)); - EXPECT_EQ(nullptr, error_); - EXPECT_EQ(EolStatus::kSupported, static_cast(eol_status)); - EXPECT_EQ(kMilestonesToEolNone, milestones_to_eol); -} - -TEST_F(UpdateEngineServiceTest, GetEolStatusMilestonesToEolNone) { - FakePrefs fake_prefs; - fake_system_state_.set_prefs(&fake_prefs); - int32_t eol_status = static_cast(EolStatus::kEol); - MilestonesToEol milestones_to_eol = kMilestonesToEolNone; - - // Device is supported and no milestones to EOL set. - fake_prefs.SetString(kPrefsOmahaEolStatus, kEolStatusSupported); - EXPECT_TRUE( - common_service_.GetEolStatus(&error_, &eol_status, &milestones_to_eol)); + EXPECT_TRUE(common_service_.GetEolStatus(&error_, &eol_status)); EXPECT_EQ(nullptr, error_); EXPECT_EQ(EolStatus::kSupported, static_cast(eol_status)); - EXPECT_EQ(kMilestonesToEolNone, milestones_to_eol); - // Device is security only and no milestones to EOL set. - fake_prefs.SetString(kPrefsOmahaEolStatus, kEolStatusSecurityOnly); - EXPECT_TRUE( - common_service_.GetEolStatus(&error_, &eol_status, &milestones_to_eol)); + fake_prefs.SetString(kPrefsOmahaEolStatus, "security-only"); + EXPECT_TRUE(common_service_.GetEolStatus(&error_, &eol_status)); EXPECT_EQ(nullptr, error_); EXPECT_EQ(EolStatus::kSecurityOnly, static_cast(eol_status)); - EXPECT_EQ(kMilestonesToEolNone, milestones_to_eol); - - // Device is EOL and no milestones to EOL set. - fake_prefs.SetString(kPrefsOmahaEolStatus, kEolStatusEol); - EXPECT_TRUE( - common_service_.GetEolStatus(&error_, &eol_status, &milestones_to_eol)); - EXPECT_EQ(nullptr, error_); - EXPECT_EQ(EolStatus::kEol, static_cast(eol_status)); - EXPECT_EQ(kMilestonesToEolNone, milestones_to_eol); -} - -TEST_F(UpdateEngineServiceTest, GetEolStatusMilestonesToEolTwo) { - FakePrefs fake_prefs; - fake_system_state_.set_prefs(&fake_prefs); - int32_t eol_status = static_cast(EolStatus::kEol); - MilestonesToEol milestones_to_eol = kMilestonesToEolNone; - - // Device is supported and milestones to EOL is n-2. - fake_prefs.SetString(kPrefsOmahaEolStatus, kEolStatusSupported); - fake_prefs.SetString(kPrefsOmahaMilestonesToEol, "2"); - EXPECT_TRUE( - common_service_.GetEolStatus(&error_, &eol_status, &milestones_to_eol)); - EXPECT_EQ(nullptr, error_); - EXPECT_EQ(EolStatus::kSupported, static_cast(eol_status)); - EXPECT_EQ(2, milestones_to_eol); - - // Device is security only and milestones to EOL is n-2. - fake_prefs.SetString(kPrefsOmahaEolStatus, kEolStatusSecurityOnly); - fake_prefs.SetString(kPrefsOmahaMilestonesToEol, "2"); - EXPECT_TRUE( - common_service_.GetEolStatus(&error_, &eol_status, &milestones_to_eol)); - EXPECT_EQ(nullptr, error_); - EXPECT_EQ(EolStatus::kSecurityOnly, static_cast(eol_status)); - EXPECT_EQ(2, milestones_to_eol); -} - -TEST_F(UpdateEngineServiceTest, GetEolStatusMilestonesToEolZero) { - FakePrefs fake_prefs; - fake_system_state_.set_prefs(&fake_prefs); - int32_t eol_status = static_cast(EolStatus::kEol); - MilestonesToEol milestones_to_eol = kMilestonesToEolNone; - - // Device is EOL and milestones to EOL is n. - fake_prefs.SetString(kPrefsOmahaEolStatus, kEolStatusEol); - fake_prefs.SetString(kPrefsOmahaMilestonesToEol, "0"); - EXPECT_TRUE( - common_service_.GetEolStatus(&error_, &eol_status, &milestones_to_eol)); - EXPECT_EQ(nullptr, error_); - EXPECT_EQ(EolStatus::kEol, static_cast(eol_status)); - EXPECT_EQ(0, milestones_to_eol); } } // namespace chromeos_update_engine diff --git a/dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml b/dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml index 5671fde9..a1831476 100644 --- a/dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml +++ b/dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml @@ -139,7 +139,6 @@ - diff --git a/dbus_service.cc b/dbus_service.cc index c4de3e72..b0dc0766 100644 --- a/dbus_service.cc +++ b/dbus_service.cc @@ -213,9 +213,8 @@ bool DBusUpdateEngineService::GetLastAttemptError( } bool DBusUpdateEngineService::GetEolStatus(ErrorPtr* error, - int32_t* out_eol_status, - int32_t* out_milestone_to_eol) { - return common_->GetEolStatus(error, out_eol_status, out_milestone_to_eol); + int32_t* out_eol_status) { + return common_->GetEolStatus(error, out_eol_status); } UpdateEngineAdaptor::UpdateEngineAdaptor(SystemState* system_state) diff --git a/dbus_service.h b/dbus_service.h index 9d796094..2babf8c7 100644 --- a/dbus_service.h +++ b/dbus_service.h @@ -157,13 +157,8 @@ class DBusUpdateEngineService bool GetLastAttemptError(brillo::ErrorPtr* error, int32_t* out_last_attempt_error) override; - // Returns the current EOL status of the device in |out_eol_status| and the - // milestones to EOL of the device in |out_milestones_to_eol| for EOL devices. - // In the case that milestones to EOL doesn't exists for EOL, it will default - // to |kMilestonesToEolNone|. - bool GetEolStatus(brillo::ErrorPtr* error, - int32_t* out_eol_status, - int32_t* out_milestones_to_eol) override; + // Returns the current end-of-life status of the device in |out_eol_status|. + bool GetEolStatus(brillo::ErrorPtr* error, int32_t* out_eol_status) override; private: std::unique_ptr common_; diff --git a/omaha_request_action.cc b/omaha_request_action.cc index 8da7e299..6c67a3b7 100644 --- a/omaha_request_action.cc +++ b/omaha_request_action.cc @@ -109,7 +109,6 @@ constexpr char kValNoUpdate[] = "noupdate"; // updatecheck attributes (without the underscore prefix). constexpr char kAttrEol[] = "eol"; -constexpr char kAttrMilestonesToEol[] = "milestones_to_eol"; constexpr char kAttrRollback[] = "rollback"; constexpr char kAttrFirmwareVersion[] = "firmware_version"; constexpr char kAttrKernelVersion[] = "kernel_version"; @@ -1316,28 +1315,13 @@ bool OmahaRequestAction::PersistCohortData(const string& prefs_key, bool OmahaRequestAction::PersistEolStatus(const map& attrs) { auto eol_attr = attrs.find(kAttrEol); - auto milestones_to_eol_attr = attrs.find(kAttrMilestonesToEol); - - bool ret = true; - if (milestones_to_eol_attr == attrs.end()) { - system_state_->prefs()->Delete(kPrefsOmahaMilestonesToEol); - if (eol_attr != attrs.end()) { - LOG(WARNING) << "Milestones to EOL missing when EOL."; - } - } else if (!system_state_->prefs()->SetString( - kPrefsOmahaMilestonesToEol, milestones_to_eol_attr->second)) { - LOG(ERROR) << "Setting milestones to EOL failed."; - ret = false; - } - - if (eol_attr == attrs.end()) { - system_state_->prefs()->Delete(kPrefsOmahaEolStatus); - } else if (!system_state_->prefs()->SetString(kPrefsOmahaEolStatus, - eol_attr->second)) { - LOG(ERROR) << "Setting EOL failed."; - ret = false; + if (eol_attr != attrs.end()) { + return system_state_->prefs()->SetString(kPrefsOmahaEolStatus, + eol_attr->second); + } else if (system_state_->prefs()->Exists(kPrefsOmahaEolStatus)) { + return system_state_->prefs()->Delete(kPrefsOmahaEolStatus); } - return ret; + return true; } void OmahaRequestAction::ActionCompleted(ErrorCode code) { diff --git a/omaha_request_action.h b/omaha_request_action.h index 0c256a8e..8dffb5c0 100644 --- a/omaha_request_action.h +++ b/omaha_request_action.h @@ -144,7 +144,6 @@ class OmahaRequestAction : public Action, GetInstallDateWhenOOBECompletedWithValidDate); FRIEND_TEST(OmahaRequestActionTest, GetInstallDateWhenOOBECompletedDateChanges); - FRIEND_TEST(OmahaRequestActionTest, PersistEolStatusTest); friend class UpdateAttempterTest; FRIEND_TEST(UpdateAttempterTest, SessionIdTestEnforceEmptyStrPingOmaha); FRIEND_TEST(UpdateAttempterTest, SessionIdTestConsistencyInUpdateFlow); @@ -189,9 +188,8 @@ class OmahaRequestAction : public Action, bool PersistCohortData(const std::string& prefs_key, const std::string& new_value); - // Parse and persist the end-of-life status flag and milestones to EOL sent - // back in the updatecheck tag attributes. The flag will be validated and - // stored in the Prefs. + // Parse and persist the end-of-life status flag sent back in the updatecheck + // tag attributes. The flag will be validated and stored in the Prefs. bool PersistEolStatus(const std::map& attrs); // If this is an update check request, initializes diff --git a/omaha_request_action_unittest.cc b/omaha_request_action_unittest.cc index 11633b67..8008e008 100644 --- a/omaha_request_action_unittest.cc +++ b/omaha_request_action_unittest.cc @@ -2012,65 +2012,6 @@ TEST_F(OmahaRequestActionTest, ParseUpdateCheckAttributesTest) { EXPECT_EQ("security-only", eol_pref); } -TEST_F(OmahaRequestActionTest, ParseUpdateCheckAttributesEolTest) { - tuc_params_.http_response = - "" - ""; - tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable; - tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; - - ASSERT_TRUE(TestUpdateCheck()); - - string eol_pref, milestones_to_eol_pref; - EXPECT_TRUE( - fake_system_state_.prefs()->GetString(kPrefsOmahaEolStatus, &eol_pref)); - EXPECT_EQ("eol", eol_pref); - EXPECT_TRUE(fake_system_state_.prefs()->GetString(kPrefsOmahaMilestonesToEol, - &milestones_to_eol_pref)); - EXPECT_EQ("0", milestones_to_eol_pref); -} - -TEST_F(OmahaRequestActionTest, - ParseUpdateCheckAttributesMissingMilestonesToEolTest) { - tuc_params_.http_response = - "" - ""; - tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable; - tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; - - ASSERT_TRUE(TestUpdateCheck()); - - string eol_pref, milestones_to_eol_pref; - EXPECT_TRUE( - fake_system_state_.prefs()->GetString(kPrefsOmahaEolStatus, &eol_pref)); - EXPECT_EQ("eol", eol_pref); - EXPECT_FALSE(fake_system_state_.prefs()->Exists(kPrefsOmahaMilestonesToEol)); -} - -TEST_F(OmahaRequestActionTest, ParseUpdateCheckAttributesMilestonesToEolTest) { - tuc_params_.http_response = - "" - ""; - tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable; - tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; - - ASSERT_TRUE(TestUpdateCheck()); - - string eol_pref, milestones_to_eol_pref; - EXPECT_TRUE( - fake_system_state_.prefs()->GetString(kPrefsOmahaEolStatus, &eol_pref)); - EXPECT_EQ("supported", eol_pref); - EXPECT_TRUE(fake_system_state_.prefs()->GetString(kPrefsOmahaMilestonesToEol, - &milestones_to_eol_pref)); - EXPECT_EQ("3", milestones_to_eol_pref); -} - TEST_F(OmahaRequestActionTest, NoUniqueIDTest) { tuc_params_.http_response = "invalid xml>"; tuc_params_.expected_code = ErrorCode::kOmahaRequestXMLParseError; diff --git a/omaha_utils.cc b/omaha_utils.cc index dffa5d3a..6bd75250 100644 --- a/omaha_utils.cc +++ b/omaha_utils.cc @@ -17,15 +17,17 @@ #include "update_engine/omaha_utils.h" #include -#include namespace chromeos_update_engine { +namespace { + +// The possible string values for the end-of-life status. const char kEolStatusSupported[] = "supported"; const char kEolStatusSecurityOnly[] = "security-only"; const char kEolStatusEol[] = "eol"; -const MilestonesToEol kMilestonesToEolNone = -1; +} // namespace const char* EolStatusToString(EolStatus eol_status) { switch (eol_status) { @@ -48,23 +50,8 @@ EolStatus StringToEolStatus(const std::string& eol_status) { return EolStatus::kSecurityOnly; if (eol_status == kEolStatusEol) return EolStatus::kEol; - LOG(WARNING) << "Invalid EOL attribute: " << eol_status; + LOG(WARNING) << "Invalid end-of-life attribute: " << eol_status; return EolStatus::kSupported; } -std::string MilestonesToEolToString(MilestonesToEol milestones_to_eol) { - return base::IntToString(milestones_to_eol); -} - -MilestonesToEol StringToMilestonesToEol(const std::string& milestones_to_eol) { - MilestonesToEol milestone = kMilestonesToEolNone; - if (!base::StringToInt(milestones_to_eol, &milestone)) { - LOG(WARNING) << "Invalid milestones to EOL attribute: " - << milestones_to_eol; - return kMilestonesToEolNone; - } - - return milestone; -} - } // namespace chromeos_update_engine diff --git a/omaha_utils.h b/omaha_utils.h index 60004515..86145403 100644 --- a/omaha_utils.h +++ b/omaha_utils.h @@ -21,11 +21,6 @@ namespace chromeos_update_engine { -// The possible string values for the end-of-life status. -extern const char kEolStatusSupported[]; -extern const char kEolStatusSecurityOnly[]; -extern const char kEolStatusEol[]; - // The end-of-life status of the device. enum class EolStatus { kSupported = 0, @@ -33,10 +28,6 @@ enum class EolStatus { kEol, }; -using MilestonesToEol = int; -// The default milestones to EOL. -extern const MilestonesToEol kMilestonesToEolNone; - // Returns the string representation of the |eol_status|. const char* EolStatusToString(EolStatus eol_status); @@ -44,14 +35,6 @@ const char* EolStatusToString(EolStatus eol_status); // of an invalid string, the default "supported" value will be used instead. EolStatus StringToEolStatus(const std::string& eol_status); -// Returns the string representation of the |milestones_to_eol|. -std::string MilestonesToEolToString(int milestones_to_eol); - -// Converts the milestones to EOL string to an |MilestonesToEol| enum class. -// When the milestones to EOL is not an integer, the default -// |kMilestonesToEolNone| will be returned. -MilestonesToEol StringToMilestonesToEol(const std::string& milestones_to_eol); - } // namespace chromeos_update_engine #endif // UPDATE_ENGINE_OMAHA_UTILS_H_ diff --git a/omaha_utils_unittest.cc b/omaha_utils_unittest.cc index 59c03660..8ceb76bf 100644 --- a/omaha_utils_unittest.cc +++ b/omaha_utils_unittest.cc @@ -39,12 +39,4 @@ TEST(OmahaUtilsTest, EolStatusTest) { EXPECT_EQ(EolStatus::kSupported, StringToEolStatus("hello, world!")); } -TEST(OmahaUtilsTest, MilestonesToEolTest) { - EXPECT_EQ(kMilestonesToEolNone, StringToMilestonesToEol("")); - EXPECT_EQ(kMilestonesToEolNone, StringToMilestonesToEol("not_a_number")); - EXPECT_EQ(1, StringToMilestonesToEol("1")); - EXPECT_EQ(0, StringToMilestonesToEol("0")); - EXPECT_EQ(-1, StringToMilestonesToEol("-1")); -} - } // namespace chromeos_update_engine diff --git a/update_engine_client.cc b/update_engine_client.cc index 9748c4d4..954e856d 100644 --- a/update_engine_client.cc +++ b/update_engine_client.cc @@ -41,7 +41,6 @@ using chromeos_update_engine::EolStatus; using chromeos_update_engine::ErrorCode; -using chromeos_update_engine::MilestonesToEol; using chromeos_update_engine::UpdateEngineStatusToString; using chromeos_update_engine::UpdateStatusToString; using chromeos_update_engine::utils::ErrorCodeToString; @@ -560,20 +559,12 @@ int UpdateEngineClient::ProcessFlags() { } if (FLAGS_eol_status) { - int eol_status, milestones_to_eol; - if (!client_->GetEolStatus(&eol_status, &milestones_to_eol)) { - LOG(ERROR) << "Error getting the end-of-life status and milestones to " - "end-of-life."; + int eol_status; + if (!client_->GetEolStatus(&eol_status)) { + LOG(ERROR) << "Error getting the end-of-life status."; } else { EolStatus eol_status_code = static_cast(eol_status); - MilestonesToEol milestones_to_eol_code = milestones_to_eol; - printf( - "EOL_STATUS=%s\n" - "MILESTONES_TO_EOL=%s\n", - EolStatusToString(eol_status_code), - chromeos_update_engine::MilestonesToEolToString( - milestones_to_eol_code) - .c_str()); + printf("EOL_STATUS=%s\n", EolStatusToString(eol_status_code)); } } From 48598e8b0d485b214610a0688cb935f86dc93b64 Mon Sep 17 00:00:00 2001 From: Steven Moreland Date: Thu, 5 Sep 2019 14:18:29 -0700 Subject: [PATCH 101/624] Remove libhwbinder/libhidltransport deps Since these were combined into libhidlbase. Bug: 135686713 Test: build only (libhwbinder/libhidltransport are empty) Change-Id: I7e7bc1e1cb29426aef799e0288ccdd5bf1c7d834 --- Android.bp | 5 ----- 1 file changed, 5 deletions(-) diff --git a/Android.bp b/Android.bp index a13a92b7..d754d63d 100644 --- a/Android.bp +++ b/Android.bp @@ -195,7 +195,6 @@ cc_defaults { shared_libs: [ "libbootloader_message", "libfs_mgr", - "libhwbinder", "libhidlbase", "liblp", "libutils", @@ -362,7 +361,6 @@ cc_binary { recovery: { exclude_shared_libs: [ "libprotobuf-cpp-lite", - "libhwbinder", "libbrillo-stream", "libbrillo", "libchrome", @@ -638,9 +636,6 @@ cc_test { "libchrome_test_helpers", "libupdate_engine_android", ], - shared_libs: [ - "libhidltransport", - ], data: [ ":test_http_server", From c14801d0aaceefb0f67227c6ab7826061141fa12 Mon Sep 17 00:00:00 2001 From: Jae Hoon Kim Date: Tue, 3 Sep 2019 13:13:59 -0700 Subject: [PATCH 102/624] update_engine: Printf's should leverage KeyValueStore printouts Since new values may be added and removed from the printouts to the calls of update_engine-client, the use of KeyValueStore to standardize the printouts will allow for future changes without messing up the string literal to printf. BUG=none TEST=FEATURES="test" emerge-$BOARD update_engine update_engine-client TEST=test_that -b $BOARD $IP autoupdate_EOL Change-Id: I55d69a0533da2312c56d455bf6ad3f4af845c2c2 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/1783594 Tested-by: Jae Hoon Kim Commit-Queue: Amin Hassani Reviewed-by: Amin Hassani --- update_engine_client.cc | 27 +++++++++++++++++++++------ 1 file changed, 21 insertions(+), 6 deletions(-) diff --git a/update_engine_client.cc b/update_engine_client.cc index 954e856d..d78cee70 100644 --- a/update_engine_client.cc +++ b/update_engine_client.cc @@ -26,10 +26,12 @@ #include #include #include +#include #include #include #include #include +#include #include "update_engine/client.h" #include "update_engine/common/error_code.h" @@ -39,6 +41,7 @@ #include "update_engine/update_status.h" #include "update_engine/update_status_utils.h" +using brillo::KeyValueStore; using chromeos_update_engine::EolStatus; using chromeos_update_engine::ErrorCode; using chromeos_update_engine::UpdateEngineStatusToString; @@ -550,11 +553,18 @@ int UpdateEngineClient::ProcessFlags() { LOG(ERROR) << "Error getting last attempt error."; } else { ErrorCode code = static_cast(last_attempt_error); - printf( - "ERROR_CODE=%i\n" - "ERROR_MESSAGE=%s\n", - last_attempt_error, - ErrorCodeToString(code).c_str()); + + KeyValueStore last_attempt_error_store; +#if BASE_VER < 576279 + last_attempt_error_store.SetString( + "ERROR_CODE", base::Int64ToString(last_attempt_error)); +#else + last_attempt_error_store.SetString( + "ERROR_CODE", base::NumberToString(last_attempt_error)); +#endif + last_attempt_error_store.SetString("ERROR_MESSAGE", + ErrorCodeToString(code)); + printf("%s", last_attempt_error_store.SaveToString().c_str()); } } @@ -564,7 +574,12 @@ int UpdateEngineClient::ProcessFlags() { LOG(ERROR) << "Error getting the end-of-life status."; } else { EolStatus eol_status_code = static_cast(eol_status); - printf("EOL_STATUS=%s\n", EolStatusToString(eol_status_code)); + + KeyValueStore eol_status_store; + eol_status_store.SetString("EOL_STATUS", + EolStatusToString(eol_status_code)); + + printf("%s", eol_status_store.SaveToString().c_str()); } } From d3d84218cafbc1a95e7d6bbb775b495d1bebf4d2 Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Sat, 17 Aug 2019 00:27:44 -0700 Subject: [PATCH 103/624] update_engine: Add internal error codes that used for metrics. Whenever we are dealing with some problem that require defining error codes and sending UMA metrics, we need to define a new ErrorCode enum value. These error codes then will be used to send which UMA metric value to send. Some of the UMA metrics seems to have circumvent this process by introducing their own error codes without adding them to the global list of error codes. This CL introduces three new error codes: - kInternalLibCurlError - kUnresolvedHostError - kUnresolvedHostRecovered (Technically not an error code, but fits the description and use case of it.) That are then translated to the UMA metric values we send for DownloadErrorCode. In addition, this CL moves the responsibility of sending these UMA metrics from LibCurlHttpFetcher to OmahaRequestAction which is the more correct place to send it because that's where the operations are completed (success or failure) and we can safely decide on the value of UMA without risking to send overlapping or duplicated metrics. For example, previously we send kInternalLibCurlError in conjunction with the kUnresolvedHostError. But doing this can hide the fact that these two error codes might be related and caused by the same underlying issue. Same goes for kUnresolvedHostError and kUnresolvedHosRecovered. If we send both these metrics at the same time, then we need to subtract the number of kUnresolvedHosRecovered from kUnresolvedHostError to figure out the number of unresolved host errors that did not recover. By exclusively sending one or another. We can see exactly how many are recovered and how many did not. Although this might change the meaning of kUnresolvedHostError metric, in the long term it will not be an issue as all the results will converge to the new behavior. The enum.xml (chrome) is updated in crrev.com/c/1774101 BUG=None TEST=cros_workon_make --board=amd64-generic --test --noreconf update_engine Change-Id: I3c7bb5f6159a0bc3a37d55666572b9cd6730f3cb Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/1759544 Reviewed-by: Nicolas Norvez Tested-by: Amin Hassani Commit-Queue: Amin Hassani --- common/error_code.h | 3 ++ common/error_code_utils.cc | 6 +++ common/http_fetcher.h | 17 +++++---- libcurl_http_fetcher.cc | 23 +++--------- libcurl_http_fetcher.h | 6 ++- libcurl_http_fetcher_unittest.cc | 62 ++++++++++++++++++++++++++++--- metrics_constants.h | 4 +- metrics_utils.cc | 10 +++++ mock_libcurl_http_fetcher.h | 37 ++++++++++++++++++ omaha_request_action.cc | 22 ++++++----- omaha_request_action.h | 5 --- payload_state.cc | 3 ++ update_manager/chromeos_policy.cc | 3 ++ 13 files changed, 152 insertions(+), 49 deletions(-) create mode 100644 mock_libcurl_http_fetcher.h diff --git a/common/error_code.h b/common/error_code.h index 252cc420..3dd74028 100644 --- a/common/error_code.h +++ b/common/error_code.h @@ -80,6 +80,9 @@ enum class ErrorCode : int { kRollbackNotPossible = 54, kFirstActiveOmahaPingSentPersistenceError = 55, kVerityCalculationError = 56, + kInternalLibCurlError = 57, + kUnresolvedHostError = 58, + kUnresolvedHostRecovered = 59, // VERY IMPORTANT! When adding new error codes: // diff --git a/common/error_code_utils.cc b/common/error_code_utils.cc index b0bbbd4a..5bcbaa40 100644 --- a/common/error_code_utils.cc +++ b/common/error_code_utils.cc @@ -161,6 +161,12 @@ string ErrorCodeToString(ErrorCode code) { return "ErrorCode::kFirstActiveOmahaPingSentPersistenceError"; case ErrorCode::kVerityCalculationError: return "ErrorCode::kVerityCalculationError"; + case ErrorCode::kInternalLibCurlError: + return "ErrorCode::kInternalLibCurlError"; + case ErrorCode::kUnresolvedHostError: + return "ErrorCode::kUnresolvedHostError"; + case ErrorCode::kUnresolvedHostRecovered: + return "ErrorCode::kUnresolvedHostRecovered"; // Don't add a default case to let the compiler warn about newly added // error codes which should be added here. } diff --git a/common/http_fetcher.h b/common/http_fetcher.h index 94f31d75..f74a0f05 100644 --- a/common/http_fetcher.h +++ b/common/http_fetcher.h @@ -59,6 +59,12 @@ class HttpFetcher { HttpFetcherDelegate* delegate() const { return delegate_; } int http_response_code() const { return http_response_code_; } + // Returns additional error code that can't be expressed in terms of an HTTP + // response code. For example, if there was a specific internal error code in + // the objects used in the implementation of this class (like libcurl) that we + // are interested about, we can communicate it through this value. + ErrorCode GetAuxiliaryErrorCode() const { return auxiliary_error_code_; } + // Optional: Post data to the server. The HttpFetcher should make a copy // of this data and upload it via HTTP POST during the transfer. The type of // the data is necessary for properly setting the Content-Type HTTP header. @@ -159,6 +165,10 @@ class HttpFetcher { // set to the response code when the transfer is complete. int http_response_code_; + // Set when there is an error that can't be expressed in the form of + // |http_response_code_|. + ErrorCode auxiliary_error_code_{ErrorCode::kSuccess}; + // The delegate; may be null. HttpFetcherDelegate* delegate_; @@ -209,13 +219,6 @@ class HttpFetcherDelegate { // situations. It's OK to destroy the |fetcher| object in this callback. virtual void TransferComplete(HttpFetcher* fetcher, bool successful) = 0; virtual void TransferTerminated(HttpFetcher* fetcher) {} - - // This allows |HttpFetcher| to send UMA metrics for its internal states - // (unrecoverable libcurl internal error, etc.). - virtual void ReportUpdateCheckMetrics( - metrics::CheckResult result, - metrics::CheckReaction reaction, - metrics::DownloadErrorCode download_error_code) {} }; } // namespace chromeos_update_engine diff --git a/libcurl_http_fetcher.cc b/libcurl_http_fetcher.cc index ad823cf2..4bea4eff 100644 --- a/libcurl_http_fetcher.cc +++ b/libcurl_http_fetcher.cc @@ -438,10 +438,7 @@ void LibcurlHttpFetcher::CurlPerformOnce() { // In case of an update check, we send UMA metrics and log the error. if (is_update_check_ && (retcode == CURLM_OUT_OF_MEMORY || retcode == CURLM_INTERNAL_ERROR)) { - delegate_->ReportUpdateCheckMetrics( - metrics::CheckResult::kUnset, - metrics::CheckReaction::kUnset, - metrics::DownloadErrorCode::kInternalError); + auxiliary_error_code_ = ErrorCode::kInternalLibCurlError; LOG(ERROR) << "curl_multi_perform is in an unrecoverable error condition: " << retcode; } else if (retcode != CURLM_OK) { @@ -478,19 +475,14 @@ void LibcurlHttpFetcher::CurlPerformOnce() { if (curl_code == CURLE_COULDNT_RESOLVE_HOST) { LOG(ERROR) << "libcurl can not resolve host."; unresolved_host_state_machine_.UpdateState(true); - if (delegate_) { - delegate_->ReportUpdateCheckMetrics( - metrics::CheckResult::kUnset, - metrics::CheckReaction::kUnset, - metrics::DownloadErrorCode::kUnresolvedHost); - } + auxiliary_error_code_ = ErrorCode::kUnresolvedHostError; } } // we're done! CleanUp(); - if (unresolved_host_state_machine_.getState() == + if (unresolved_host_state_machine_.GetState() == UnresolvedHostStateMachine::State::kRetry) { // Based on // https://curl.haxx.se/docs/todo.html#updated_DNS_server_while_running, @@ -499,14 +491,9 @@ void LibcurlHttpFetcher::CurlPerformOnce() { no_network_max_retries_++; LOG(INFO) << "Will retry after reloading resolv.conf because last attempt " "failed to resolve host."; - } else if (unresolved_host_state_machine_.getState() == + } else if (unresolved_host_state_machine_.GetState() == UnresolvedHostStateMachine::State::kRetriedSuccess) { - if (delegate_) { - delegate_->ReportUpdateCheckMetrics( - metrics::CheckResult::kUnset, - metrics::CheckReaction::kUnset, - metrics::DownloadErrorCode::kUnresolvedHostRecovered); - } + auxiliary_error_code_ = ErrorCode::kUnresolvedHostRecovered; } // TODO(petkov): This temporary code tries to deal with the case where the diff --git a/libcurl_http_fetcher.h b/libcurl_http_fetcher.h index 8f4258db..97a9a87f 100644 --- a/libcurl_http_fetcher.h +++ b/libcurl_http_fetcher.h @@ -50,7 +50,7 @@ class UnresolvedHostStateMachine { kNotRetry = 3, }; - State getState() { return state_; } + State GetState() { return state_; } // Updates the following internal state machine: // @@ -159,6 +159,8 @@ class LibcurlHttpFetcher : public HttpFetcher { } private: + FRIEND_TEST(LibcurlHttpFetcherTest, HostResolvedTest); + // libcurl's CURLOPT_CLOSESOCKETFUNCTION callback function. Called when // closing a socket created with the CURLOPT_OPENSOCKETFUNCTION callback. static int LibcurlCloseSocketCallback(void* clientp, curl_socket_t item); @@ -168,7 +170,7 @@ class LibcurlHttpFetcher : public HttpFetcher { void ProxiesResolved(); // Asks libcurl for the http response code and stores it in the object. - void GetHttpResponseCode(); + virtual void GetHttpResponseCode(); // Returns the last |CURLcode|. CURLcode GetCurlCode(); diff --git a/libcurl_http_fetcher_unittest.cc b/libcurl_http_fetcher_unittest.cc index 7f00daee..20f05b9c 100644 --- a/libcurl_http_fetcher_unittest.cc +++ b/libcurl_http_fetcher_unittest.cc @@ -19,10 +19,12 @@ #include #include +#include #include #include "update_engine/common/fake_hardware.h" #include "update_engine/common/mock_proxy_resolver.h" +#include "update_engine/mock_libcurl_http_fetcher.h" using std::string; @@ -42,7 +44,7 @@ class LibcurlHttpFetcherTest : public ::testing::Test { brillo::FakeMessageLoop loop_{nullptr}; FakeHardware fake_hardware_; - LibcurlHttpFetcher libcurl_fetcher_{nullptr, &fake_hardware_}; + MockLibcurlHttpFetcher libcurl_fetcher_{nullptr, &fake_hardware_}; UnresolvedHostStateMachine state_machine_; }; @@ -83,7 +85,7 @@ TEST_F(LibcurlHttpFetcherTest, InvalidURLTest) { int no_network_max_retries = 1; libcurl_fetcher_.set_no_network_max_retries(no_network_max_retries); - libcurl_fetcher_.BeginTransfer("not-an-URL"); + libcurl_fetcher_.BeginTransfer("not-a-URL"); while (loop_.PendingTasks()) { loop_.RunOnce(true); } @@ -92,10 +94,34 @@ TEST_F(LibcurlHttpFetcherTest, InvalidURLTest) { no_network_max_retries); } -TEST_F(LibcurlHttpFetcherTest, CouldntResolveHostTest) { +TEST_F(LibcurlHttpFetcherTest, CouldNotResolveHostTest) { int no_network_max_retries = 1; libcurl_fetcher_.set_no_network_max_retries(no_network_max_retries); + libcurl_fetcher_.BeginTransfer("https://An-uNres0lvable-uRl.invalid"); + + // The first time it can't resolve. + loop_.RunOnce(true); + EXPECT_EQ(libcurl_fetcher_.GetAuxiliaryErrorCode(), + ErrorCode::kUnresolvedHostError); + + while (loop_.PendingTasks()) { + loop_.RunOnce(true); + } + // The auxilary error code should've have been changed. + EXPECT_EQ(libcurl_fetcher_.GetAuxiliaryErrorCode(), + ErrorCode::kUnresolvedHostError); + + // If libcurl fails to resolve the name, we call res_init() to reload + // resolv.conf and retry exactly once more. See crbug.com/982813 for details. + EXPECT_EQ(libcurl_fetcher_.get_no_network_max_retries(), + no_network_max_retries + 1); +} + +TEST_F(LibcurlHttpFetcherTest, HostResolvedTest) { + int no_network_max_retries = 2; + libcurl_fetcher_.set_no_network_max_retries(no_network_max_retries); + // This test actually sends request to internet but according to // https://tools.ietf.org/html/rfc2606#section-2, .invalid domain names are // reserved and sure to be invalid. Ideally we should mock libcurl or @@ -104,9 +130,33 @@ TEST_F(LibcurlHttpFetcherTest, CouldntResolveHostTest) { // TODO(xiaochu) Refactor LibcurlHttpFetcher (and its relates) so it's // easier to mock the part that depends on internet connectivity. libcurl_fetcher_.BeginTransfer("https://An-uNres0lvable-uRl.invalid"); + + // The first time it can't resolve. + loop_.RunOnce(true); + EXPECT_EQ(libcurl_fetcher_.GetAuxiliaryErrorCode(), + ErrorCode::kUnresolvedHostError); + + // The second time, it will resolve, with error code 200 but we set the + // download size be smaller than the transfer size so it will retry again. + EXPECT_CALL(libcurl_fetcher_, GetHttpResponseCode()) + .WillOnce(testing::Invoke( + [this]() { libcurl_fetcher_.http_response_code_ = 200; })) + .WillRepeatedly(testing::Invoke( + [this]() { libcurl_fetcher_.http_response_code_ = 0; })); + libcurl_fetcher_.transfer_size_ = 10; + + // This time the host is resolved. But after that again we can't resolve + // anymore (See above). + loop_.RunOnce(true); + EXPECT_EQ(libcurl_fetcher_.GetAuxiliaryErrorCode(), + ErrorCode::kUnresolvedHostRecovered); + while (loop_.PendingTasks()) { loop_.RunOnce(true); } + // The auxilary error code should not have been changed. + EXPECT_EQ(libcurl_fetcher_.GetAuxiliaryErrorCode(), + ErrorCode::kUnresolvedHostRecovered); // If libcurl fails to resolve the name, we call res_init() to reload // resolv.conf and retry exactly once more. See crbug.com/982813 for details. @@ -117,21 +167,21 @@ TEST_F(LibcurlHttpFetcherTest, CouldntResolveHostTest) { TEST_F(LibcurlHttpFetcherTest, HttpFetcherStateMachineRetryFailedTest) { state_machine_.UpdateState(true); state_machine_.UpdateState(true); - EXPECT_EQ(state_machine_.getState(), + EXPECT_EQ(state_machine_.GetState(), UnresolvedHostStateMachine::State::kNotRetry); } TEST_F(LibcurlHttpFetcherTest, HttpFetcherStateMachineRetrySucceedTest) { state_machine_.UpdateState(true); state_machine_.UpdateState(false); - EXPECT_EQ(state_machine_.getState(), + EXPECT_EQ(state_machine_.GetState(), UnresolvedHostStateMachine::State::kRetriedSuccess); } TEST_F(LibcurlHttpFetcherTest, HttpFetcherStateMachineNoRetryTest) { state_machine_.UpdateState(false); state_machine_.UpdateState(false); - EXPECT_EQ(state_machine_.getState(), + EXPECT_EQ(state_machine_.GetState(), UnresolvedHostStateMachine::State::kInit); } diff --git a/metrics_constants.h b/metrics_constants.h index 167e577c..db21d905 100644 --- a/metrics_constants.h +++ b/metrics_constants.h @@ -64,10 +64,10 @@ enum class DownloadErrorCode { // calling res_init() can recover. kUnresolvedHostRecovered = 97, // This error is reported when libcurl returns CURLE_COULDNT_RESOLVE_HOST. - kUnresolvedHost = 98, + kUnresolvedHostError = 98, // This error is reported when libcurl has an internal error that // update_engine can't recover from. - kInternalError = 99, + kInternalLibCurlError = 99, // This error code is used to convey that malformed input was given // to the utils::GetDownloadErrorCode() function. This should never diff --git a/metrics_utils.cc b/metrics_utils.cc index 88c8d524..efbd067d 100644 --- a/metrics_utils.cc +++ b/metrics_utils.cc @@ -43,6 +43,9 @@ metrics::AttemptResult GetAttemptResult(ErrorCode code) { return metrics::AttemptResult::kUpdateSucceededNotActive; case ErrorCode::kDownloadTransferError: + case ErrorCode::kInternalLibCurlError: + case ErrorCode::kUnresolvedHostError: + case ErrorCode::kUnresolvedHostRecovered: return metrics::AttemptResult::kPayloadDownloadError; case ErrorCode::kDownloadInvalidMetadataSize: @@ -168,6 +171,13 @@ metrics::DownloadErrorCode GetDownloadErrorCode(ErrorCode code) { case ErrorCode::kDownloadTransferError: return metrics::DownloadErrorCode::kDownloadError; + case ErrorCode::kInternalLibCurlError: + return metrics::DownloadErrorCode::kInternalLibCurlError; + case ErrorCode::kUnresolvedHostError: + return metrics::DownloadErrorCode::kUnresolvedHostError; + case ErrorCode::kUnresolvedHostRecovered: + return metrics::DownloadErrorCode::kUnresolvedHostRecovered; + // All of these error codes are not related to downloading so break // out so we can warn and return InputMalformed. case ErrorCode::kSuccess: diff --git a/mock_libcurl_http_fetcher.h b/mock_libcurl_http_fetcher.h new file mode 100644 index 00000000..a8ef0f44 --- /dev/null +++ b/mock_libcurl_http_fetcher.h @@ -0,0 +1,37 @@ +// +// Copyright (C) 2019 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef UPDATE_ENGINE_MOCK_LIBCURL_HTTP_FETCHER_H_ +#define UPDATE_ENGINE_MOCK_LIBCURL_HTTP_FETCHER_H_ + +#include + +#include "update_engine/connection_manager_interface.h" + +namespace chromeos_update_engine { + +class MockLibcurlHttpFetcher : public LibcurlHttpFetcher { + public: + MockLibcurlHttpFetcher(ProxyResolver* proxy_resolver, + HardwareInterface* hardware) + : LibcurlHttpFetcher(proxy_resolver, hardware) {} + + MOCK_METHOD0(GetHttpResponseCode, void()); +}; + +} // namespace chromeos_update_engine + +#endif // UPDATE_ENGINE_MOCK_LIBCURL_HTTP_FETCHER_H_ diff --git a/omaha_request_action.cc b/omaha_request_action.cc index 6c67a3b7..4d865868 100644 --- a/omaha_request_action.cc +++ b/omaha_request_action.cc @@ -851,6 +851,17 @@ void OmahaRequestAction::TransferComplete(HttpFetcher* fetcher, return; } + ErrorCode aux_error_code = fetcher->GetAuxiliaryErrorCode(); + if (aux_error_code != ErrorCode::kSuccess) { + metrics::DownloadErrorCode download_error_code = + metrics_utils::GetDownloadErrorCode(aux_error_code); + system_state_->metrics_reporter()->ReportUpdateCheckMetrics( + system_state_, + metrics::CheckResult::kUnset, + metrics::CheckReaction::kUnset, + download_error_code); + } + if (!successful) { LOG(ERROR) << "Omaha request network transfer failed."; int code = GetHTTPResponseCode(); @@ -980,14 +991,6 @@ void OmahaRequestAction::TransferComplete(HttpFetcher* fetcher, } } -void OmahaRequestAction::ReportUpdateCheckMetrics( - metrics::CheckResult result, - metrics::CheckReaction reaction, - metrics::DownloadErrorCode download_error_code) { - system_state_->metrics_reporter()->ReportUpdateCheckMetrics( - system_state_, result, reaction, download_error_code); -} - void OmahaRequestAction::CompleteProcessing() { ScopedActionCompleter completer(processor_, this); OmahaResponse& output_object = const_cast(GetOutputObject()); @@ -1383,7 +1386,8 @@ void OmahaRequestAction::ActionCompleted(ErrorCode code) { break; } - ReportUpdateCheckMetrics(result, reaction, download_error_code); + system_state_->metrics_reporter()->ReportUpdateCheckMetrics( + system_state_, result, reaction, download_error_code); } bool OmahaRequestAction::ShouldIgnoreUpdate(const OmahaResponse& response, diff --git a/omaha_request_action.h b/omaha_request_action.h index 8dffb5c0..12d36d94 100644 --- a/omaha_request_action.h +++ b/omaha_request_action.h @@ -126,11 +126,6 @@ class OmahaRequestAction : public Action, void TransferComplete(HttpFetcher* fetcher, bool successful) override; - void ReportUpdateCheckMetrics( - metrics::CheckResult result, - metrics::CheckReaction reaction, - metrics::DownloadErrorCode download_error_code) override; - // Returns true if this is an Event request, false if it's an UpdateCheck. bool IsEvent() const { return event_.get() != nullptr; } diff --git a/payload_state.cc b/payload_state.cc index a6c36201..355552ec 100644 --- a/payload_state.cc +++ b/payload_state.cc @@ -365,6 +365,9 @@ void PayloadState::UpdateFailed(ErrorCode error) { case ErrorCode::kNoUpdate: case ErrorCode::kRollbackNotPossible: case ErrorCode::kFirstActiveOmahaPingSentPersistenceError: + case ErrorCode::kInternalLibCurlError: + case ErrorCode::kUnresolvedHostError: + case ErrorCode::kUnresolvedHostRecovered: LOG(INFO) << "Not incrementing URL index or failure count for this error"; break; diff --git a/update_manager/chromeos_policy.cc b/update_manager/chromeos_policy.cc index fab111a2..dd6cc8d6 100644 --- a/update_manager/chromeos_policy.cc +++ b/update_manager/chromeos_policy.cc @@ -151,6 +151,9 @@ bool HandleErrorCode(ErrorCode err_code, int* url_num_error_p) { case ErrorCode::kNoUpdate: case ErrorCode::kRollbackNotPossible: case ErrorCode::kFirstActiveOmahaPingSentPersistenceError: + case ErrorCode::kInternalLibCurlError: + case ErrorCode::kUnresolvedHostError: + case ErrorCode::kUnresolvedHostRecovered: LOG(INFO) << "Not changing URL index or failure count due to error " << chromeos_update_engine::utils::ErrorCodeToString(err_code) << " (" << static_cast(err_code) << ")"; From 2d311164ce2de193cd837a6922c10204d4c882a9 Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Thu, 12 Sep 2019 11:20:17 -0700 Subject: [PATCH 104/624] update_engine: Change general maintaines to Android ones Sometimes Chromium OS people add one of the "general maintaines" as reviewers instead of Chromium OS reviewers, this patch changes "general" to "Anrdoid et. al." so people don't keep adding Android people on Chromium OS patches. BUG=none TEST=none Change-Id: I8826312a34f6247b2e24c51b776992a021914061 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/1801062 Reviewed-by: Nicolas Norvez Reviewed-by: Sen Jiang Commit-Queue: Amin Hassani Tested-by: Amin Hassani --- OWNERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/OWNERS b/OWNERS index 4e476058..aa858bbc 100644 --- a/OWNERS +++ b/OWNERS @@ -1,6 +1,6 @@ set noparent -# Current general maintainers: +# Android et. al. maintainers: deymo@google.com senj@google.com From 87ea73fe5a48c54ad56ba769375d180d1ce5c614 Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Thu, 12 Sep 2019 13:07:37 -0700 Subject: [PATCH 105/624] dynamic_partition_test_utils: use libstorage_literals_headers We have a better library now. Test: boot_control_android_unittest Change-Id: Id4a4ecd9e0883511c4edbe4d548ede5d57d372f0 --- Android.bp | 4 ++++ dynamic_partition_test_utils.h | 13 ++----------- 2 files changed, 6 insertions(+), 11 deletions(-) diff --git a/Android.bp b/Android.bp index d754d63d..4640cb2c 100644 --- a/Android.bp +++ b/Android.bp @@ -637,6 +637,10 @@ cc_test { "libupdate_engine_android", ], + header_libs: [ + "libstorage_literals_headers", + ], + data: [ ":test_http_server", ":test_subprocess", diff --git a/dynamic_partition_test_utils.h b/dynamic_partition_test_utils.h index 574d30e8..61d8e0a3 100644 --- a/dynamic_partition_test_utils.h +++ b/dynamic_partition_test_utils.h @@ -30,6 +30,7 @@ #include #include #include +#include #include "update_engine/common/boot_control_interface.h" @@ -41,6 +42,7 @@ using testing::MakeMatcher; using testing::Matcher; using testing::MatcherInterface; using testing::MatchResultListener; +using namespace android::storage_literals; // NOLINT(build/namespaces) constexpr const uint32_t kMaxNumSlots = 2; constexpr const char* kSlotSuffixes[kMaxNumSlots] = {"_a", "_b"}; @@ -59,17 +61,6 @@ using PartitionSuffixSizes = std::map; using PartitionMetadata = BootControlInterface::PartitionMetadata; -// C++ standards do not allow uint64_t (aka unsigned long) to be the parameter -// of user-defined literal operators. -// clang-format off -inline constexpr unsigned long long operator"" _MiB(unsigned long long x) { // NOLINT - return x << 20; -} -inline constexpr unsigned long long operator"" _GiB(unsigned long long x) { // NOLINT - return x << 30; -} -// clang-format on - constexpr uint64_t kDefaultGroupSize = 5_GiB; // Super device size. 1 MiB for metadata. constexpr uint64_t kDefaultSuperSize = kDefaultGroupSize * 2 + 1_MiB; From 0aa7e518afffa655585995416ec91a92ffacd1cb Mon Sep 17 00:00:00 2001 From: Jae Hoon Kim Date: Tue, 10 Sep 2019 13:20:48 -0700 Subject: [PATCH 106/624] update_engine: AddressSanitizer PostinstallerRunnerAction RunAsRoot leak fix The reason why this change is required is because of |FileDescriptorWatcher::Controller| within |PostinstallRunnerAction|. If the delay is too short (previously 10ms) it was possible for the posted task within the |FileDescriptorWatcher::Controller| to be present after that of the task which stops the processor. In order to mitigate this issue, the process of stopping the processor should be a |PostDelayedTask()| instead of a direct call in stopping the processor to ensure the processor stops after |Watcher::StartWatching()| happens. Within |FileDescriptorWatcher::Controller| it states: "If the MessageLoopForIO is deleted before Watcher::StartWatching() runs, |watcher_| is leaked." BUG=chromium:989749 TEST=FEATURES="nostrip" ./build_packages --board amd64-generic --withdebugsymbols TEST=FEATURES="test nostrip -splitdebug" USE="asan debug" CFLAGS="-g -O2" CXXFLAGS="-g -O2" emerge-amd64-generic update_engine TEST=sudo cat /build/amd64-generic/tmp/portage/chromeos-base/update_engine-9999/temp/asan_logs/asan.10 | asan_symbolize.py -s /build/amd64-generic -d > /tmp/asan.log Change-Id: I88f0a86686830553fea150d0188b2851753c2f94 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/1796613 Tested-by: Jae Hoon Kim Reviewed-by: Amin Hassani Commit-Queue: Jae Hoon Kim --- payload_consumer/postinstall_runner_action_unittest.cc | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/payload_consumer/postinstall_runner_action_unittest.cc b/payload_consumer/postinstall_runner_action_unittest.cc index 04c81fac..84f2c2c4 100644 --- a/payload_consumer/postinstall_runner_action_unittest.cc +++ b/payload_consumer/postinstall_runner_action_unittest.cc @@ -142,7 +142,14 @@ class PostinstallRunnerActionTest : public ::testing::Test { base::TimeDelta::FromMilliseconds(10)); } else { CHECK(processor_); - processor_->StopProcessing(); + // Must |PostDelayedTask()| here to be safe that |FileDescriptorWatcher| + // doesn't leak memory, do not directly call |StopProcessing()|. + loop_.PostDelayedTask( + FROM_HERE, + base::Bind( + [](ActionProcessor* processor) { processor->StopProcessing(); }, + base::Unretained(processor_)), + base::TimeDelta::FromMilliseconds(100)); } } From 3d31b0e395bce689d617f31cdcfb7a3c169cfb7a Mon Sep 17 00:00:00 2001 From: Ben Chan Date: Wed, 18 Sep 2019 16:44:37 -0700 Subject: [PATCH 107/624] update_engine: update OWNERS BUG=None TEST=None Change-Id: Iec84f90a345feaaaa225beaec1e09b7c409b3e51 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/1810872 Reviewed-by: Amin Hassani Tested-by: Ben Chan --- OWNERS | 1 - 1 file changed, 1 deletion(-) diff --git a/OWNERS b/OWNERS index aa858bbc..75fd9f16 100644 --- a/OWNERS +++ b/OWNERS @@ -5,7 +5,6 @@ deymo@google.com senj@google.com # Chromium OS maintainers: -benchan@google.com ahassani@google.com kimjae@google.com From 2f8594e369b7eb14809a210c9f8450b158cb9f06 Mon Sep 17 00:00:00 2001 From: Felipe Andrade Date: Thu, 12 Sep 2019 20:11:47 +0200 Subject: [PATCH 108/624] Update links to policy list page Replace the references to the old policy list page by the new page. BUG=chromium:987706 TEST=None Change-Id: I8bcaf95cdf6902e7a038be75ca2522196fba522a Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/1801635 Reviewed-by: Amin Hassani Reviewed-by: Maksim Ivanov Reviewed-by: Felipe Andrade Tested-by: Felipe Andrade Tested-by: Felipe Andrade Commit-Queue: Amin Hassani --- omaha_request_params.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/omaha_request_params.h b/omaha_request_params.h index 7b281da8..6b579ec2 100644 --- a/omaha_request_params.h +++ b/omaha_request_params.h @@ -387,7 +387,7 @@ class OmahaRequestParams { // Token used when making an update request for a specific build. // For example: Token for a Quick Fix Build: - // https://www.chromium.org/administrators/policy-list-3#DeviceQuickFixBuildToken. + // https://cloud.google.com/docs/chrome-enterprise/policies/?policy=DeviceQuickFixBuildToken std::string autoupdate_token_; DISALLOW_COPY_AND_ASSIGN(OmahaRequestParams); From 13d41cb2accc1e2e1271b22a53f0dce6db0493f5 Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Mon, 16 Sep 2019 13:18:22 -0700 Subject: [PATCH 109/624] [REFACTOR] Pass DeltaArchiveManifest to DynamicPartitionControl DynamicPartitionControl need the list of operations to calculate COW sizes. - Remove BootControlInterface::PartitionMetadata. Replace all references with DeltaArchiveManifest. DeltaArchiveManifest has all information that PartitionMetadata has. - Rename all InitPartitionMetadata to PreparePartitionsForUpdate - Change all PreparePartitionsForUpdate to use new signature Bug: 138816109 Test: update_enigne_unittests --gtest_filter=*BootControl*:*Dynamic* Change-Id: I4389ba2b1801addf8c3bc8395e2ea6a9a3ed27a0 --- boot_control_android.cc | 12 +- boot_control_android.h | 8 +- boot_control_android_unittest.cc | 22 +-- boot_control_chromeos.cc | 6 +- boot_control_chromeos.h | 6 +- common/boot_control_interface.h | 30 +--- common/boot_control_stub.cc | 6 +- common/boot_control_stub.h | 6 +- common/fake_boot_control.h | 6 +- dynamic_partition_control_android.cc | 58 ++++--- dynamic_partition_control_android.h | 15 +- dynamic_partition_control_android_unittest.cc | 159 +++++++++--------- dynamic_partition_control_interface.h | 5 +- dynamic_partition_test_utils.h | 149 ++++++++++------ mock_dynamic_partition_control.h | 4 +- payload_consumer/delta_performer.cc | 42 +---- payload_consumer/delta_performer.h | 2 +- 17 files changed, 276 insertions(+), 260 deletions(-) diff --git a/boot_control_android.cc b/boot_control_android.cc index ce86666c..4c998b1b 100644 --- a/boot_control_android.cc +++ b/boot_control_android.cc @@ -39,8 +39,6 @@ using android::hardware::boot::V1_0::BoolResult; using android::hardware::boot::V1_0::CommandResult; using android::hardware::boot::V1_0::IBootControl; using Slot = chromeos_update_engine::BootControlInterface::Slot; -using PartitionMetadata = - chromeos_update_engine::BootControlInterface::PartitionMetadata; namespace { @@ -277,9 +275,9 @@ bool BootControlAndroid::MarkBootSuccessfulAsync( brillo::MessageLoop::kTaskIdNull; } -bool BootControlAndroid::InitPartitionMetadata( +bool BootControlAndroid::PreparePartitionsForUpdate( Slot target_slot, - const PartitionMetadata& partition_metadata, + const DeltaArchiveManifest& manifest, bool update_metadata) { if (fs_mgr_overlayfs_is_setup()) { // Non DAP devices can use overlayfs as well. @@ -294,14 +292,14 @@ bool BootControlAndroid::InitPartitionMetadata( auto source_slot = GetCurrentSlot(); if (target_slot == source_slot) { - LOG(ERROR) << "Cannot call InitPartitionMetadata on current slot."; + LOG(ERROR) << "Cannot call PreparePartitionsForUpdate on current slot."; return false; } // Although the current build supports dynamic partitions, the given payload // doesn't use it for target partitions. This could happen when applying a // retrofit update. Skip updating the partition metadata for the target slot. - is_target_dynamic_ = !partition_metadata.groups.empty(); + is_target_dynamic_ = !manifest.dynamic_partition_metadata().groups().empty(); if (!is_target_dynamic_) { return true; } @@ -311,7 +309,7 @@ bool BootControlAndroid::InitPartitionMetadata( } return dynamic_control_->PreparePartitionsForUpdate( - source_slot, target_slot, partition_metadata); + source_slot, target_slot, manifest); } } // namespace chromeos_update_engine diff --git a/boot_control_android.h b/boot_control_android.h index a6f33bed..65543ca2 100644 --- a/boot_control_android.h +++ b/boot_control_android.h @@ -51,9 +51,9 @@ class BootControlAndroid : public BootControlInterface { bool MarkSlotUnbootable(BootControlInterface::Slot slot) override; bool SetActiveBootSlot(BootControlInterface::Slot slot) override; bool MarkBootSuccessfulAsync(base::Callback callback) override; - bool InitPartitionMetadata(Slot slot, - const PartitionMetadata& partition_metadata, - bool update_metadata) override; + bool PreparePartitionsForUpdate(Slot slot, + const DeltaArchiveManifest& manifest, + bool update_metadata) override; void Cleanup() override; private: @@ -84,7 +84,7 @@ class BootControlAndroid : public BootControlInterface { const std::string& partition_name_suffix) const; // Whether the target partitions should be loaded as dynamic partitions. Set - // by InitPartitionMetadata() per each update. + // by PreparePartitionsForUpdate() per each update. bool is_target_dynamic_{false}; DISALLOW_COPY_AND_ASSIGN(BootControlAndroid); diff --git a/boot_control_android_unittest.cc b/boot_control_android_unittest.cc index 3b921912..f090de2f 100644 --- a/boot_control_android_unittest.cc +++ b/boot_control_android_unittest.cc @@ -102,7 +102,7 @@ class BootControlAndroidTest : public ::testing::Test { LoadMetadataBuilder(GetSuperDevice(slot), slot)) .Times(AnyNumber()) .WillRepeatedly(Invoke([sizes](auto, auto) { - return NewFakeMetadata(PartitionSuffixSizesToMetadata(sizes)); + return NewFakeMetadata(PartitionSuffixSizesToManifest(sizes)); })); } @@ -125,11 +125,11 @@ class BootControlAndroidTest : public ::testing::Test { })); } - bool InitPartitionMetadata(uint32_t slot, - PartitionSizes partition_sizes, - bool update_metadata = true) { - auto m = PartitionSizesToMetadata(partition_sizes); - return bootctl_.InitPartitionMetadata(slot, m, update_metadata); + bool PreparePartitionsForUpdate(uint32_t slot, + PartitionSizes partition_sizes, + bool update_metadata = true) { + auto m = PartitionSizesToManifest(partition_sizes); + return bootctl_.PreparePartitionsForUpdate(slot, m, update_metadata); } BootControlAndroid bootctl_; // BootControlAndroid under test. @@ -155,9 +155,9 @@ TEST_P(BootControlAndroidTestP, {T("system"), 2_GiB}, {T("vendor"), 1_GiB}}); - // Not calling through BootControlAndroidTest::InitPartitionMetadata(), since - // we don't want any default group in the PartitionMetadata. - EXPECT_TRUE(bootctl_.InitPartitionMetadata(target(), {}, true)); + // Not calling through BootControlAndroidTest::PreparePartitionsForUpdate(), + // since we don't want any default group in the PartitionMetadata. + EXPECT_TRUE(bootctl_.PreparePartitionsForUpdate(target(), {}, true)); // Should use dynamic source partitions. EXPECT_CALL(dynamicControl(), GetState(S("system"))) @@ -197,7 +197,7 @@ TEST_P(BootControlAndroidTestP, GetPartitionDeviceWhenResumingUpdate) { {T("system"), 2_GiB}, {T("vendor"), 1_GiB}}); - EXPECT_TRUE(InitPartitionMetadata( + EXPECT_TRUE(PreparePartitionsForUpdate( target(), {{"system", 2_GiB}, {"vendor", 1_GiB}}, false)); // Dynamic partition "system". @@ -240,7 +240,7 @@ INSTANTIATE_TEST_CASE_P(BootControlAndroidTest, TEST_F(BootControlAndroidTest, ApplyingToCurrentSlot) { SetSlots({1, 1}); - EXPECT_FALSE(InitPartitionMetadata(target(), {})) + EXPECT_FALSE(PreparePartitionsForUpdate(target(), {})) << "Should not be able to apply to current slot."; } diff --git a/boot_control_chromeos.cc b/boot_control_chromeos.cc index ccba316c..7e748d5e 100644 --- a/boot_control_chromeos.cc +++ b/boot_control_chromeos.cc @@ -326,10 +326,8 @@ int BootControlChromeOS::GetPartitionNumber( return -1; } -bool BootControlChromeOS::InitPartitionMetadata( - Slot slot, - const PartitionMetadata& partition_metadata, - bool update_metadata) { +bool BootControlChromeOS::PreparePartitionsForUpdate( + Slot slot, const DeltaArchiveManifest& manifest, bool update_metadata) { return true; } diff --git a/boot_control_chromeos.h b/boot_control_chromeos.h index f3682e9f..29841c91 100644 --- a/boot_control_chromeos.h +++ b/boot_control_chromeos.h @@ -50,9 +50,9 @@ class BootControlChromeOS : public BootControlInterface { bool MarkSlotUnbootable(BootControlInterface::Slot slot) override; bool SetActiveBootSlot(BootControlInterface::Slot slot) override; bool MarkBootSuccessfulAsync(base::Callback callback) override; - bool InitPartitionMetadata(Slot slot, - const PartitionMetadata& partition_metadata, - bool update_metadata) override; + bool PreparePartitionsForUpdate(Slot slot, + const DeltaArchiveManifest& manifest, + bool update_metadata) override; void Cleanup() override; private: diff --git a/common/boot_control_interface.h b/common/boot_control_interface.h index 392d7851..9bf639a6 100644 --- a/common/boot_control_interface.h +++ b/common/boot_control_interface.h @@ -25,6 +25,8 @@ #include #include +#include "update_engine/update_metadata.pb.h" + namespace chromeos_update_engine { // The abstract boot control interface defines the interaction with the @@ -35,19 +37,6 @@ class BootControlInterface { public: using Slot = unsigned int; - struct PartitionMetadata { - struct Partition { - std::string name; - uint64_t size; - }; - struct Group { - std::string name; - uint64_t size; - std::vector partitions; - }; - std::vector groups; - }; - static const Slot kInvalidSlot = UINT_MAX; virtual ~BootControlInterface() = default; @@ -67,9 +56,9 @@ class BootControlInterface { // The |slot| number must be between 0 and GetNumSlots() - 1 and the // |partition_name| is a platform-specific name that identifies a partition on // every slot. In order to access the dynamic partitions in the target slot, - // InitPartitionMetadata() must be called (once per payload) prior to calling - // this function. On success, returns true and stores the block device in - // |device|. + // PreparePartitionsForUpdate() must be called (once per payload) prior to + // calling this function. On success, returns true and stores the block device + // in |device|. virtual bool GetPartitionDevice(const std::string& partition_name, Slot slot, std::string* device) const = 0; @@ -96,12 +85,11 @@ class BootControlInterface { // Initializes the metadata of the underlying partitions for a given |slot| // and sets up the states for accessing dynamic partitions. - // |partition_metadata| will be written to the specified |slot| if + // Metadata will be written to the specified |slot| if // |update_metadata| is set. - virtual bool InitPartitionMetadata( - Slot slot, - const PartitionMetadata& partition_metadata, - bool update_metadata) = 0; + virtual bool PreparePartitionsForUpdate(Slot slot, + const DeltaArchiveManifest& manifest, + bool update_metadata) = 0; // Do necessary clean-up operations after the whole update. virtual void Cleanup() = 0; diff --git a/common/boot_control_stub.cc b/common/boot_control_stub.cc index 0fe8a989..b10e82f5 100644 --- a/common/boot_control_stub.cc +++ b/common/boot_control_stub.cc @@ -59,10 +59,8 @@ bool BootControlStub::MarkBootSuccessfulAsync( return false; } -bool BootControlStub::InitPartitionMetadata( - Slot slot, - const PartitionMetadata& partition_metadata, - bool update_metadata) { +bool BootControlStub::PreparePartitionsForUpdate( + Slot slot, const DeltaArchiveManifest& manifest, bool update_metadata) { LOG(ERROR) << __FUNCTION__ << " should never be called."; return false; } diff --git a/common/boot_control_stub.h b/common/boot_control_stub.h index 8dfaffc6..f2973a28 100644 --- a/common/boot_control_stub.h +++ b/common/boot_control_stub.h @@ -45,9 +45,9 @@ class BootControlStub : public BootControlInterface { bool MarkSlotUnbootable(BootControlInterface::Slot slot) override; bool SetActiveBootSlot(BootControlInterface::Slot slot) override; bool MarkBootSuccessfulAsync(base::Callback callback) override; - bool InitPartitionMetadata(Slot slot, - const PartitionMetadata& partition_metadata, - bool update_metadata) override; + bool PreparePartitionsForUpdate(Slot slot, + const DeltaArchiveManifest& manifest, + bool update_metadata) override; void Cleanup() override; private: diff --git a/common/fake_boot_control.h b/common/fake_boot_control.h index 3d650758..11810d10 100644 --- a/common/fake_boot_control.h +++ b/common/fake_boot_control.h @@ -74,9 +74,9 @@ class FakeBootControl : public BootControlInterface { return true; } - bool InitPartitionMetadata(Slot slot, - const PartitionMetadata& partition_metadata, - bool update_metadata) override { + bool PreparePartitionsForUpdate(Slot slot, + const DeltaArchiveManifest& manifest, + bool update_metadata) override { return true; } diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc index b9732322..e351dbdf 100644 --- a/dynamic_partition_control_android.cc +++ b/dynamic_partition_control_android.cc @@ -16,6 +16,7 @@ #include "update_engine/dynamic_partition_control_android.h" +#include #include #include #include @@ -48,8 +49,6 @@ using android::fs_mgr::SlotSuffixForSlotNumber; namespace chromeos_update_engine { -using PartitionMetadata = BootControlInterface::PartitionMetadata; - constexpr char kUseDynamicPartitions[] = "ro.boot.dynamic_partitions"; constexpr char kRetrfoitDynamicPartitions[] = "ro.boot.dynamic_partitions_retrofit"; @@ -309,14 +308,14 @@ bool DynamicPartitionControlAndroid::GetDeviceDir(std::string* out) { bool DynamicPartitionControlAndroid::PreparePartitionsForUpdate( uint32_t source_slot, uint32_t target_slot, - const PartitionMetadata& partition_metadata) { + const DeltaArchiveManifest& manifest) { const std::string target_suffix = SlotSuffixForSlotNumber(target_slot); // Unmap all the target dynamic partitions because they would become // inconsistent with the new metadata. - for (const auto& group : partition_metadata.groups) { - for (const auto& partition : group.partitions) { - if (!UnmapPartitionOnDeviceMapper(partition.name + target_suffix)) { + for (const auto& group : manifest.dynamic_partition_metadata().groups()) { + for (const auto& partition_name : group.partition_names()) { + if (!UnmapPartitionOnDeviceMapper(partition_name + target_suffix)) { return false; } } @@ -337,8 +336,7 @@ bool DynamicPartitionControlAndroid::PreparePartitionsForUpdate( return false; } - if (!UpdatePartitionMetadata( - builder.get(), target_slot, partition_metadata)) { + if (!UpdatePartitionMetadata(builder.get(), target_slot, manifest)) { return false; } @@ -355,13 +353,13 @@ std::string DynamicPartitionControlAndroid::GetSuperPartitionName( bool DynamicPartitionControlAndroid::UpdatePartitionMetadata( MetadataBuilder* builder, uint32_t target_slot, - const PartitionMetadata& partition_metadata) { + const DeltaArchiveManifest& manifest) { const std::string target_suffix = SlotSuffixForSlotNumber(target_slot); DeleteGroupsWithSuffix(builder, target_suffix); uint64_t total_size = 0; - for (const auto& group : partition_metadata.groups) { - total_size += group.size; + for (const auto& group : manifest.dynamic_partition_metadata().groups()) { + total_size += group.size(); } std::string expr; @@ -378,18 +376,36 @@ bool DynamicPartitionControlAndroid::UpdatePartitionMetadata( return false; } - for (const auto& group : partition_metadata.groups) { - auto group_name_suffix = group.name + target_suffix; - if (!builder->AddGroup(group_name_suffix, group.size)) { + // name of partition(e.g. "system") -> size in bytes + std::map partition_sizes; + for (const auto& partition : manifest.partitions()) { + partition_sizes.emplace(partition.partition_name(), + partition.new_partition_info().size()); + } + + for (const auto& group : manifest.dynamic_partition_metadata().groups()) { + auto group_name_suffix = group.name() + target_suffix; + if (!builder->AddGroup(group_name_suffix, group.size())) { LOG(ERROR) << "Cannot add group " << group_name_suffix << " with size " - << group.size; + << group.size(); return false; } LOG(INFO) << "Added group " << group_name_suffix << " with size " - << group.size; + << group.size(); + + for (const auto& partition_name : group.partition_names()) { + auto partition_sizes_it = partition_sizes.find(partition_name); + if (partition_sizes_it == partition_sizes.end()) { + // TODO(tbao): Support auto-filling partition info for framework-only + // OTA. + LOG(ERROR) << "dynamic_partition_metadata contains partition " + << partition_name << " but it is not part of the manifest. " + << "This is not supported."; + return false; + } + uint64_t partition_size = partition_sizes_it->second; - for (const auto& partition : group.partitions) { - auto partition_name_suffix = partition.name + target_suffix; + auto partition_name_suffix = partition_name + target_suffix; Partition* p = builder->AddPartition( partition_name_suffix, group_name_suffix, LP_PARTITION_ATTR_READONLY); if (!p) { @@ -397,13 +413,13 @@ bool DynamicPartitionControlAndroid::UpdatePartitionMetadata( << " to group " << group_name_suffix; return false; } - if (!builder->ResizePartition(p, partition.size)) { + if (!builder->ResizePartition(p, partition_size)) { LOG(ERROR) << "Cannot resize partition " << partition_name_suffix - << " to size " << partition.size << ". Not enough space?"; + << " to size " << partition_size << ". Not enough space?"; return false; } LOG(INFO) << "Added partition " << partition_name_suffix << " to group " - << group_name_suffix << " with size " << partition.size; + << group_name_suffix << " with size " << partition_size; } } diff --git a/dynamic_partition_control_android.h b/dynamic_partition_control_android.h index d743e6e5..0907236a 100644 --- a/dynamic_partition_control_android.h +++ b/dynamic_partition_control_android.h @@ -44,10 +44,10 @@ class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { std::unique_ptr LoadMetadataBuilder( const std::string& super_device, uint32_t source_slot) override; - bool PreparePartitionsForUpdate(uint32_t source_slot, - uint32_t target_slot, - const BootControlInterface::PartitionMetadata& - partition_metadata) override; + bool PreparePartitionsForUpdate( + uint32_t source_slot, + uint32_t target_slot, + const DeltaArchiveManifest& manifest) override; bool GetDeviceDir(std::string* path) override; std::string GetSuperPartitionName(uint32_t slot) override; @@ -94,10 +94,9 @@ class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { // Update |builder| according to |partition_metadata|, assuming the device // does not have Virtual A/B. - bool UpdatePartitionMetadata( - android::fs_mgr::MetadataBuilder* builder, - uint32_t target_slot, - const BootControlInterface::PartitionMetadata& partition_metadata); + bool UpdatePartitionMetadata(android::fs_mgr::MetadataBuilder* builder, + uint32_t target_slot, + const DeltaArchiveManifest& manifest); DISALLOW_COPY_AND_ASSIGN(DynamicPartitionControlAndroid); }; diff --git a/dynamic_partition_control_android_unittest.cc b/dynamic_partition_control_android_unittest.cc index 1a3f6647..552774e6 100644 --- a/dynamic_partition_control_android_unittest.cc +++ b/dynamic_partition_control_android_unittest.cc @@ -86,7 +86,7 @@ class DynamicPartitionControlAndroidTest : public ::testing::Test { LoadMetadataBuilder(GetSuperDevice(slot), slot, _)) .Times(AnyNumber()) .WillRepeatedly(Invoke([sizes](auto, auto, auto) { - return NewFakeMetadata(PartitionSuffixSizesToMetadata(sizes)); + return NewFakeMetadata(PartitionSuffixSizesToManifest(sizes)); })); } @@ -112,7 +112,7 @@ class DynamicPartitionControlAndroidTest : public ::testing::Test { } bool PreparePartitionsForUpdate(const PartitionSizes& partition_sizes) { return dynamicControl().PreparePartitionsForUpdate( - source(), target(), PartitionSizesToMetadata(partition_sizes)); + source(), target(), PartitionSizesToManifest(partition_sizes)); } void SetSlots(const TestParam& slots) { slots_ = slots; } @@ -125,24 +125,24 @@ class DynamicPartitionControlAndroidTest : public ::testing::Test { const PartitionSizes& update_metadata, const PartitionSuffixSizes& expected) { return UpdatePartitionMetadata( - PartitionSuffixSizesToMetadata(source_metadata), - PartitionSizesToMetadata(update_metadata), - PartitionSuffixSizesToMetadata(expected)); + PartitionSuffixSizesToManifest(source_metadata), + PartitionSizesToManifest(update_metadata), + PartitionSuffixSizesToManifest(expected)); } testing::AssertionResult UpdatePartitionMetadata( - const PartitionMetadata& source_metadata, - const PartitionMetadata& update_metadata, - const PartitionMetadata& expected) { + const DeltaArchiveManifest& source_manifest, + const DeltaArchiveManifest& update_manifest, + const DeltaArchiveManifest& expected) { return UpdatePartitionMetadata( - source_metadata, update_metadata, MetadataMatches(expected)); + source_manifest, update_manifest, MetadataMatches(expected)); } testing::AssertionResult UpdatePartitionMetadata( - const PartitionMetadata& source_metadata, - const PartitionMetadata& update_metadata, + const DeltaArchiveManifest& source_manifest, + const DeltaArchiveManifest& update_manifest, const Matcher& matcher) { - auto super_metadata = NewFakeMetadata(source_metadata); + auto super_metadata = NewFakeMetadata(source_manifest); if (!module_->UpdatePartitionMetadata( - super_metadata.get(), target(), update_metadata)) { + super_metadata.get(), target(), update_manifest)) { return testing::AssertionFailure() << "UpdatePartitionMetadataInternal failed"; } @@ -290,112 +290,115 @@ INSTANTIATE_TEST_CASE_P(DynamicPartitionControlAndroidTest, class DynamicPartitionControlAndroidGroupTestP : public DynamicPartitionControlAndroidTestP { public: - PartitionMetadata source_metadata; + DeltaArchiveManifest source_manifest; void SetUp() override { DynamicPartitionControlAndroidTestP::SetUp(); - source_metadata = { - .groups = {SimpleGroup(S("android"), 3_GiB, S("system"), 2_GiB), - SimpleGroup(S("oem"), 2_GiB, S("vendor"), 1_GiB), - SimpleGroup(T("android"), 3_GiB, T("system"), 0), - SimpleGroup(T("oem"), 2_GiB, T("vendor"), 0)}}; + AddGroupAndPartition( + &source_manifest, S("android"), 3_GiB, S("system"), 2_GiB); + AddGroupAndPartition(&source_manifest, S("oem"), 2_GiB, S("vendor"), 1_GiB); + AddGroupAndPartition(&source_manifest, T("android"), 3_GiB, T("system"), 0); + AddGroupAndPartition(&source_manifest, T("oem"), 2_GiB, T("vendor"), 0); } - // Return a simple group with only one partition. - PartitionMetadata::Group SimpleGroup(const string& group, - uint64_t group_size, - const string& partition, - uint64_t partition_size) { - return {.name = group, - .size = group_size, - .partitions = {{.name = partition, .size = partition_size}}}; + void AddGroupAndPartition(DeltaArchiveManifest* manifest, + const string& group, + uint64_t group_size, + const string& partition, + uint64_t partition_size) { + auto* g = AddGroup(manifest, group, group_size); + AddPartition(manifest, g, partition, partition_size); } }; // Allow to resize within group. TEST_P(DynamicPartitionControlAndroidGroupTestP, ResizeWithinGroup) { - PartitionMetadata expected{ - .groups = {SimpleGroup(T("android"), 3_GiB, T("system"), 3_GiB), - SimpleGroup(T("oem"), 2_GiB, T("vendor"), 2_GiB)}}; + DeltaArchiveManifest expected; + AddGroupAndPartition(&expected, T("android"), 3_GiB, T("system"), 3_GiB); + AddGroupAndPartition(&expected, T("oem"), 2_GiB, T("vendor"), 2_GiB); - PartitionMetadata update_metadata{ - .groups = {SimpleGroup("android", 3_GiB, "system", 3_GiB), - SimpleGroup("oem", 2_GiB, "vendor", 2_GiB)}}; + DeltaArchiveManifest update_manifest; + AddGroupAndPartition(&update_manifest, "android", 3_GiB, "system", 3_GiB); + AddGroupAndPartition(&update_manifest, "oem", 2_GiB, "vendor", 2_GiB); EXPECT_TRUE( - UpdatePartitionMetadata(source_metadata, update_metadata, expected)); + UpdatePartitionMetadata(source_manifest, update_manifest, expected)); } TEST_P(DynamicPartitionControlAndroidGroupTestP, NotEnoughSpaceForGroup) { - PartitionMetadata update_metadata{ - .groups = {SimpleGroup("android", 3_GiB, "system", 1_GiB), - SimpleGroup("oem", 2_GiB, "vendor", 3_GiB)}}; - EXPECT_FALSE(UpdatePartitionMetadata(source_metadata, update_metadata, {})) + DeltaArchiveManifest update_manifest; + AddGroupAndPartition(&update_manifest, "android", 3_GiB, "system", 1_GiB), + AddGroupAndPartition(&update_manifest, "oem", 2_GiB, "vendor", 3_GiB); + EXPECT_FALSE(UpdatePartitionMetadata(source_manifest, update_manifest, {})) << "Should not be able to grow over maximum size of group"; } TEST_P(DynamicPartitionControlAndroidGroupTestP, GroupTooBig) { - PartitionMetadata update_metadata{ - .groups = {{.name = "android", .size = 3_GiB}, - {.name = "oem", .size = 3_GiB}}}; - EXPECT_FALSE(UpdatePartitionMetadata(source_metadata, update_metadata, {})) + DeltaArchiveManifest update_manifest; + AddGroup(&update_manifest, "android", 3_GiB); + AddGroup(&update_manifest, "oem", 3_GiB); + EXPECT_FALSE(UpdatePartitionMetadata(source_manifest, update_manifest, {})) << "Should not be able to grow over size of super / 2"; } TEST_P(DynamicPartitionControlAndroidGroupTestP, AddPartitionToGroup) { - PartitionMetadata expected{ - .groups = {{.name = T("android"), - .size = 3_GiB, - .partitions = {{.name = T("system"), .size = 2_GiB}, - {.name = T("system_ext"), .size = 1_GiB}}}}}; - PartitionMetadata update_metadata{ - .groups = {{.name = "android", - .size = 3_GiB, - .partitions = {{.name = "system", .size = 2_GiB}, - {.name = "system_ext", .size = 1_GiB}}}, - SimpleGroup("oem", 2_GiB, "vendor", 2_GiB)}}; + DeltaArchiveManifest expected; + auto* g = AddGroup(&expected, T("android"), 3_GiB); + AddPartition(&expected, g, T("system"), 2_GiB); + AddPartition(&expected, g, T("system_ext"), 1_GiB); + + DeltaArchiveManifest update_manifest; + g = AddGroup(&update_manifest, "android", 3_GiB); + AddPartition(&update_manifest, g, "system", 2_GiB); + AddPartition(&update_manifest, g, "system_ext", 1_GiB); + AddGroupAndPartition(&update_manifest, "oem", 2_GiB, "vendor", 2_GiB); + EXPECT_TRUE( - UpdatePartitionMetadata(source_metadata, update_metadata, expected)); + UpdatePartitionMetadata(source_manifest, update_manifest, expected)); } TEST_P(DynamicPartitionControlAndroidGroupTestP, RemovePartitionFromGroup) { - PartitionMetadata expected{ - .groups = {{.name = T("android"), .size = 3_GiB, .partitions = {}}}}; - PartitionMetadata update_metadata{ - .groups = {{.name = "android", .size = 3_GiB, .partitions = {}}, - SimpleGroup("oem", 2_GiB, "vendor", 2_GiB)}}; + DeltaArchiveManifest expected; + AddGroup(&expected, T("android"), 3_GiB); + + DeltaArchiveManifest update_manifest; + AddGroup(&update_manifest, "android", 3_GiB); + AddGroupAndPartition(&update_manifest, "oem", 2_GiB, "vendor", 2_GiB); + EXPECT_TRUE( - UpdatePartitionMetadata(source_metadata, update_metadata, expected)); + UpdatePartitionMetadata(source_manifest, update_manifest, expected)); } TEST_P(DynamicPartitionControlAndroidGroupTestP, AddGroup) { - PartitionMetadata expected{ - .groups = { - SimpleGroup(T("new_group"), 2_GiB, T("new_partition"), 2_GiB)}}; - PartitionMetadata update_metadata{ - .groups = {SimpleGroup("android", 2_GiB, "system", 2_GiB), - SimpleGroup("oem", 1_GiB, "vendor", 1_GiB), - SimpleGroup("new_group", 2_GiB, "new_partition", 2_GiB)}}; + DeltaArchiveManifest expected; + AddGroupAndPartition( + &expected, T("new_group"), 2_GiB, T("new_partition"), 2_GiB); + + DeltaArchiveManifest update_manifest; + AddGroupAndPartition(&update_manifest, "android", 2_GiB, "system", 2_GiB); + AddGroupAndPartition(&update_manifest, "oem", 1_GiB, "vendor", 1_GiB); + AddGroupAndPartition( + &update_manifest, "new_group", 2_GiB, "new_partition", 2_GiB); EXPECT_TRUE( - UpdatePartitionMetadata(source_metadata, update_metadata, expected)); + UpdatePartitionMetadata(source_manifest, update_manifest, expected)); } TEST_P(DynamicPartitionControlAndroidGroupTestP, RemoveGroup) { - PartitionMetadata update_metadata{ - .groups = {SimpleGroup("android", 2_GiB, "system", 2_GiB)}}; + DeltaArchiveManifest update_manifest; + AddGroupAndPartition(&update_manifest, "android", 2_GiB, "system", 2_GiB); EXPECT_TRUE(UpdatePartitionMetadata( - source_metadata, update_metadata, Not(HasGroup(T("oem"))))); + source_manifest, update_manifest, Not(HasGroup(T("oem"))))); } TEST_P(DynamicPartitionControlAndroidGroupTestP, ResizeGroup) { - PartitionMetadata expected{ - .groups = {SimpleGroup(T("android"), 2_GiB, T("system"), 2_GiB), - SimpleGroup(T("oem"), 3_GiB, T("vendor"), 3_GiB)}}; - PartitionMetadata update_metadata{ - .groups = {SimpleGroup("android", 2_GiB, "system", 2_GiB), - SimpleGroup("oem", 3_GiB, "vendor", 3_GiB)}}; + DeltaArchiveManifest expected; + AddGroupAndPartition(&expected, T("android"), 2_GiB, T("system"), 2_GiB); + AddGroupAndPartition(&expected, T("oem"), 3_GiB, T("vendor"), 3_GiB); + DeltaArchiveManifest update_manifest; + AddGroupAndPartition(&update_manifest, "android", 2_GiB, "system", 2_GiB), + AddGroupAndPartition(&update_manifest, "oem", 3_GiB, "vendor", 3_GiB); EXPECT_TRUE( - UpdatePartitionMetadata(source_metadata, update_metadata, expected)); + UpdatePartitionMetadata(source_manifest, update_manifest, expected)); } INSTANTIATE_TEST_CASE_P(DynamicPartitionControlAndroidTest, diff --git a/dynamic_partition_control_interface.h b/dynamic_partition_control_interface.h index 9c7b8d07..a4dc5765 100644 --- a/dynamic_partition_control_interface.h +++ b/dynamic_partition_control_interface.h @@ -27,6 +27,7 @@ #include #include "update_engine/common/boot_control_interface.h" +#include "update_engine/update_metadata.pb.h" namespace chromeos_update_engine { @@ -88,13 +89,13 @@ class DynamicPartitionControlInterface { virtual std::unique_ptr LoadMetadataBuilder( const std::string& super_device, uint32_t source_slot) = 0; - // Prepare all partitions for an update specified in |partition_metadata|. + // Prepare all partitions for an update specified in |manifest|. // This is needed before calling MapPartitionOnDeviceMapper(), otherwise the // device would be mapped in an inconsistent way. virtual bool PreparePartitionsForUpdate( uint32_t source_slot, uint32_t target_slot, - const BootControlInterface::PartitionMetadata& partition_metadata) = 0; + const DeltaArchiveManifest& manifest) = 0; // Return a possible location for devices listed by name. virtual bool GetDeviceDir(std::string* path) = 0; diff --git a/dynamic_partition_test_utils.h b/dynamic_partition_test_utils.h index 61d8e0a3..346998fc 100644 --- a/dynamic_partition_test_utils.h +++ b/dynamic_partition_test_utils.h @@ -33,6 +33,7 @@ #include #include "update_engine/common/boot_control_interface.h" +#include "update_engine/update_metadata.pb.h" namespace chromeos_update_engine { @@ -59,8 +60,6 @@ using PartitionSizes = std::map; // "{name_a, size}" using PartitionSuffixSizes = std::map; -using PartitionMetadata = BootControlInterface::PartitionMetadata; - constexpr uint64_t kDefaultGroupSize = 5_GiB; // Super device size. 1 MiB for metadata. constexpr uint64_t kDefaultSuperSize = kDefaultGroupSize * 2 + 1_MiB; @@ -78,8 +77,8 @@ inline std::ostream& operator<<(std::ostream& os, const std::map& param) { return os << "}"; } -template -inline std::ostream& operator<<(std::ostream& os, const std::vector& param) { +template +inline void VectorToStream(std::ostream& os, const V& param) { os << "["; bool first = true; for (const auto& e : param) { @@ -88,21 +87,28 @@ inline std::ostream& operator<<(std::ostream& os, const std::vector& param) { os << e; first = false; } - return os << "]"; + os << "]"; } -inline std::ostream& operator<<(std::ostream& os, - const PartitionMetadata::Partition& p) { - return os << "{" << p.name << ", " << p.size << "}"; +inline std::ostream& operator<<(std::ostream& os, const PartitionUpdate& p) { + return os << "{" << p.partition_name() << ", " + << p.new_partition_info().size() << "}"; } inline std::ostream& operator<<(std::ostream& os, - const PartitionMetadata::Group& g) { - return os << "{" << g.name << ", " << g.size << ", " << g.partitions << "}"; + const DynamicPartitionGroup& g) { + os << "{" << g.name() << ", " << g.size() << ", "; + VectorToStream(os, g.partition_names()); + return os << "}"; } -inline std::ostream& operator<<(std::ostream& os, const PartitionMetadata& m) { - return os << m.groups; +inline std::ostream& operator<<(std::ostream& os, + const DeltaArchiveManifest& m) { + os << "{.groups = "; + VectorToStream(os, m.dynamic_partition_metadata().groups()); + os << ", .partitions = "; + VectorToStream(os, m.partitions()); + return os; } inline std::string GetDevice(const std::string& name) { @@ -113,90 +119,125 @@ inline std::string GetDmDevice(const std::string& name) { return kFakeDmDevicePath + name; } +inline DynamicPartitionGroup* AddGroup(DeltaArchiveManifest* manifest, + const std::string& group, + uint64_t group_size) { + auto* g = manifest->mutable_dynamic_partition_metadata()->add_groups(); + g->set_name(group); + g->set_size(group_size); + return g; +} + +inline void AddPartition(DeltaArchiveManifest* manifest, + DynamicPartitionGroup* group, + const std::string& partition, + uint64_t partition_size) { + group->add_partition_names(partition); + auto* p = manifest->add_partitions(); + p->set_partition_name(partition); + p->mutable_new_partition_info()->set_size(partition_size); +} + // To support legacy tests, auto-convert {name_a: size} map to -// PartitionMetadata. -inline PartitionMetadata PartitionSuffixSizesToMetadata( +// DeltaArchiveManifest. +inline DeltaArchiveManifest PartitionSuffixSizesToManifest( const PartitionSuffixSizes& partition_sizes) { - PartitionMetadata metadata; + DeltaArchiveManifest manifest; for (const char* suffix : kSlotSuffixes) { - metadata.groups.push_back( - {std::string(kDefaultGroup) + suffix, kDefaultGroupSize, {}}); + AddGroup(&manifest, std::string(kDefaultGroup) + suffix, kDefaultGroupSize); } for (const auto& pair : partition_sizes) { for (size_t suffix_idx = 0; suffix_idx < kMaxNumSlots; ++suffix_idx) { if (base::EndsWith(pair.first, kSlotSuffixes[suffix_idx], base::CompareCase::SENSITIVE)) { - metadata.groups[suffix_idx].partitions.push_back( - {pair.first, pair.second}); + AddPartition( + &manifest, + manifest.mutable_dynamic_partition_metadata()->mutable_groups( + suffix_idx), + pair.first, + pair.second); } } } - return metadata; + return manifest; } // To support legacy tests, auto-convert {name: size} map to PartitionMetadata. -inline PartitionMetadata PartitionSizesToMetadata( +inline DeltaArchiveManifest PartitionSizesToManifest( const PartitionSizes& partition_sizes) { - PartitionMetadata metadata; - metadata.groups.push_back( - {std::string{kDefaultGroup}, kDefaultGroupSize, {}}); + DeltaArchiveManifest manifest; + auto* g = AddGroup(&manifest, std::string(kDefaultGroup), kDefaultGroupSize); for (const auto& pair : partition_sizes) { - metadata.groups[0].partitions.push_back({pair.first, pair.second}); + AddPartition(&manifest, g, pair.first, pair.second); } - return metadata; + return manifest; } inline std::unique_ptr NewFakeMetadata( - const PartitionMetadata& metadata) { + const DeltaArchiveManifest& manifest) { auto builder = MetadataBuilder::New(kDefaultSuperSize, kFakeMetadataSize, kMaxNumSlots); - EXPECT_GE(builder->AllocatableSpace(), kDefaultGroupSize * 2); - EXPECT_NE(nullptr, builder); - if (builder == nullptr) - return nullptr; - for (const auto& group : metadata.groups) { - EXPECT_TRUE(builder->AddGroup(group.name, group.size)); - for (const auto& partition : group.partitions) { - auto p = builder->AddPartition(partition.name, group.name, 0 /* attr */); - EXPECT_TRUE(p && builder->ResizePartition(p, partition.size)); + for (const auto& group : manifest.dynamic_partition_metadata().groups()) { + EXPECT_TRUE(builder->AddGroup(group.name(), group.size())); + for (const auto& partition_name : group.partition_names()) { + EXPECT_NE( + nullptr, + builder->AddPartition(partition_name, group.name(), 0 /* attr */)); } } + for (const auto& partition : manifest.partitions()) { + auto p = builder->FindPartition(partition.partition_name()); + EXPECT_TRUE(p && builder->ResizePartition( + p, partition.new_partition_info().size())); + } return builder; } class MetadataMatcher : public MatcherInterface { public: explicit MetadataMatcher(const PartitionSuffixSizes& partition_sizes) - : partition_metadata_(PartitionSuffixSizesToMetadata(partition_sizes)) {} - explicit MetadataMatcher(const PartitionMetadata& partition_metadata) - : partition_metadata_(partition_metadata) {} + : manifest_(PartitionSuffixSizesToManifest(partition_sizes)) {} + explicit MetadataMatcher(const DeltaArchiveManifest& manifest) + : manifest_(manifest) {} bool MatchAndExplain(MetadataBuilder* metadata, MatchResultListener* listener) const override { bool success = true; - for (const auto& group : partition_metadata_.groups) { - for (const auto& partition : group.partitions) { - auto p = metadata->FindPartition(partition.name); + for (const auto& group : manifest_.dynamic_partition_metadata().groups()) { + for (const auto& partition_name : group.partition_names()) { + auto p = metadata->FindPartition(partition_name); if (p == nullptr) { if (!success) *listener << "; "; - *listener << "No partition " << partition.name; + *listener << "No partition " << partition_name; + success = false; + continue; + } + const auto& partition_updates = manifest_.partitions(); + auto it = std::find_if(partition_updates.begin(), + partition_updates.end(), + [&](const auto& p) { + return p.partition_name() == partition_name; + }); + if (it == partition_updates.end()) { + *listener << "Can't find partition update " << partition_name; success = false; continue; } - if (p->size() != partition.size) { + auto partition_size = it->new_partition_info().size(); + if (p->size() != partition_size) { if (!success) *listener << "; "; - *listener << "Partition " << partition.name << " has size " - << p->size() << ", expected " << partition.size; + *listener << "Partition " << partition_name << " has size " + << p->size() << ", expected " << partition_size; success = false; } - if (p->group_name() != group.name) { + if (p->group_name() != group.name()) { if (!success) *listener << "; "; - *listener << "Partition " << partition.name << " has group " - << p->group_name() << ", expected " << group.name; + *listener << "Partition " << partition_name << " has group " + << p->group_name() << ", expected " << group.name(); success = false; } } @@ -205,15 +246,15 @@ class MetadataMatcher : public MatcherInterface { } void DescribeTo(std::ostream* os) const override { - *os << "expect: " << partition_metadata_; + *os << "expect: " << manifest_; } void DescribeNegationTo(std::ostream* os) const override { - *os << "expect not: " << partition_metadata_; + *os << "expect not: " << manifest_; } private: - PartitionMetadata partition_metadata_; + DeltaArchiveManifest manifest_; }; inline Matcher MetadataMatches( @@ -222,8 +263,8 @@ inline Matcher MetadataMatches( } inline Matcher MetadataMatches( - const PartitionMetadata& partition_metadata) { - return MakeMatcher(new MetadataMatcher(partition_metadata)); + const DeltaArchiveManifest& manifest) { + return MakeMatcher(new MetadataMatcher(manifest)); } MATCHER_P(HasGroup, group, " has group " + group) { diff --git a/mock_dynamic_partition_control.h b/mock_dynamic_partition_control.h index aab3c4d8..d96432b1 100644 --- a/mock_dynamic_partition_control.h +++ b/mock_dynamic_partition_control.h @@ -45,9 +45,7 @@ class MockDynamicPartitionControl : public DynamicPartitionControlInterface { MOCK_METHOD1(GetDeviceDir, bool(std::string*)); MOCK_METHOD0(GetDynamicPartitionsFeatureFlag, FeatureFlag()); MOCK_METHOD3(PreparePartitionsForUpdate, - bool(uint32_t, - uint32_t, - const BootControlInterface::PartitionMetadata&)); + bool(uint32_t, uint32_t, const DeltaArchiveManifest&)); MOCK_METHOD1(GetSuperPartitionName, std::string(uint32_t)); MOCK_METHOD0(GetVirtualAbFeatureFlag, FeatureFlag()); }; diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc index d76a959e..3ff98ca2 100644 --- a/payload_consumer/delta_performer.cc +++ b/payload_consumer/delta_performer.cc @@ -808,7 +808,6 @@ bool DeltaPerformer::ParseManifestPartitions(ErrorCode* error) { for (const PartitionUpdate& partition : manifest_.partitions()) { partitions_.push_back(partition); } - manifest_.clear_partitions(); } else if (major_payload_version_ == kChromeOSMajorPayloadVersion) { LOG(INFO) << "Converting update information from old format."; PartitionUpdate root_part; @@ -923,12 +922,16 @@ bool DeltaPerformer::ParseManifestPartitions(ErrorCode* error) { } if (install_plan_->target_slot != BootControlInterface::kInvalidSlot) { - if (!InitPartitionMetadata()) { + if (!PreparePartitionsForUpdate()) { *error = ErrorCode::kInstallDeviceOpenError; return false; } } + if (major_payload_version_ == kBrilloMajorPayloadVersion) { + manifest_.clear_partitions(); + } + if (!install_plan_->LoadPartitionsFromSlots(boot_control_)) { LOG(ERROR) << "Unable to determine all the partition devices."; *error = ErrorCode::kInstallDeviceOpenError; @@ -938,45 +941,18 @@ bool DeltaPerformer::ParseManifestPartitions(ErrorCode* error) { return true; } -bool DeltaPerformer::InitPartitionMetadata() { - BootControlInterface::PartitionMetadata partition_metadata; - if (manifest_.has_dynamic_partition_metadata()) { - std::map partition_sizes; - for (const auto& partition : install_plan_->partitions) { - partition_sizes.emplace(partition.name, partition.target_size); - } - for (const auto& group : manifest_.dynamic_partition_metadata().groups()) { - BootControlInterface::PartitionMetadata::Group e; - e.name = group.name(); - e.size = group.size(); - for (const auto& partition_name : group.partition_names()) { - auto it = partition_sizes.find(partition_name); - if (it == partition_sizes.end()) { - // TODO(tbao): Support auto-filling partition info for framework-only - // OTA. - LOG(ERROR) << "dynamic_partition_metadata contains partition " - << partition_name - << " but it is not part of the manifest. " - << "This is not supported."; - return false; - } - e.partitions.push_back({partition_name, it->second}); - } - partition_metadata.groups.push_back(std::move(e)); - } - } - +bool DeltaPerformer::PreparePartitionsForUpdate() { bool metadata_updated = false; prefs_->GetBoolean(kPrefsDynamicPartitionMetadataUpdated, &metadata_updated); - if (!boot_control_->InitPartitionMetadata( - install_plan_->target_slot, partition_metadata, !metadata_updated)) { + if (!boot_control_->PreparePartitionsForUpdate( + install_plan_->target_slot, manifest_, !metadata_updated)) { LOG(ERROR) << "Unable to initialize partition metadata for slot " << BootControlInterface::SlotName(install_plan_->target_slot); return false; } TEST_AND_RETURN_FALSE( prefs_->SetBoolean(kPrefsDynamicPartitionMetadataUpdated, true)); - LOG(INFO) << "InitPartitionMetadata done."; + LOG(INFO) << "PreparePartitionsForUpdate done."; return true; } diff --git a/payload_consumer/delta_performer.h b/payload_consumer/delta_performer.h index 17cb5995..25c348c9 100644 --- a/payload_consumer/delta_performer.h +++ b/payload_consumer/delta_performer.h @@ -275,7 +275,7 @@ class DeltaPerformer : public FileWriter { // After install_plan_ is filled with partition names and sizes, initialize // metadata of partitions and map necessary devices before opening devices. - bool InitPartitionMetadata(); + bool PreparePartitionsForUpdate(); // Update Engine preference store. PrefsInterface* prefs_; From 420db9b98a0b0ace6d412c1a52cfd9a0f326a3e0 Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Tue, 23 Jul 2019 20:50:33 -0700 Subject: [PATCH 110/624] DynamicPartitionControl: map snapshot devices for virt ab Bug: 138816109 Test: apply OTA on Virtual A/B devices Test: update_engine_unittests Change-Id: I23263624afb370d122d5aed5c3a9bf48a1a3d8fc --- Android.bp | 11 ++++- dynamic_partition_control_android.cc | 72 ++++++++++++++++++++++++++-- dynamic_partition_control_android.h | 21 ++++++-- 3 files changed, 96 insertions(+), 8 deletions(-) diff --git a/Android.bp b/Android.bp index 4640cb2c..1be0d630 100644 --- a/Android.bp +++ b/Android.bp @@ -191,7 +191,10 @@ cc_defaults { name: "libupdate_engine_boot_control_exports", defaults: ["update_metadata-protos_exports"], - static_libs: ["update_metadata-protos"], + static_libs: [ + "libsnapshot", + "update_metadata-protos", + ], shared_libs: [ "libbootloader_message", "libfs_mgr", @@ -200,6 +203,12 @@ cc_defaults { "libutils", "android.hardware.boot@1.0", ], + target: { + recovery: { + static_libs: ["libsnapshot_nobinder"], + exclude_static_libs: ["libsnapshot"], + }, + }, } cc_library_static { diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc index e351dbdf..f4305747 100644 --- a/dynamic_partition_control_android.cc +++ b/dynamic_partition_control_android.cc @@ -16,6 +16,7 @@ #include "update_engine/dynamic_partition_control_android.h" +#include // NOLINT(build/c++11) - using libsnapshot / liblp API #include #include #include @@ -30,6 +31,7 @@ #include #include #include +#include #include "update_engine/common/boot_control_interface.h" #include "update_engine/common/utils.h" @@ -54,7 +56,18 @@ constexpr char kRetrfoitDynamicPartitions[] = "ro.boot.dynamic_partitions_retrofit"; constexpr char kVirtualAbEnabled[] = "ro.virtual_ab.enabled"; constexpr char kVirtualAbRetrofit[] = "ro.virtual_ab.retrofit"; -constexpr uint64_t kMapTimeoutMillis = 1000; +// Map timeout for dynamic partitions. +constexpr std::chrono::milliseconds kMapTimeout{1000}; +// Map timeout for dynamic partitions with snapshots. Since several devices +// needs to be mapped, this timeout is longer than |kMapTimeout|. +constexpr std::chrono::milliseconds kMapSnapshotTimeout{5000}; + +DynamicPartitionControlAndroid::DynamicPartitionControlAndroid() { + if (GetVirtualAbFeatureFlag().IsEnabled()) { + snapshot_ = android::snapshot::SnapshotManager::New(); + CHECK(snapshot_ != nullptr) << "Cannot initialize SnapshotManager."; + } +} DynamicPartitionControlAndroid::~DynamicPartitionControlAndroid() { CleanupInternal(false /* wait */); @@ -97,10 +110,20 @@ bool DynamicPartitionControlAndroid::MapPartitionInternal( .metadata_slot = slot, .partition_name = target_partition_name, .force_writable = force_writable, - .timeout_ms = std::chrono::milliseconds(kMapTimeoutMillis), }; + bool success = false; + if (GetVirtualAbFeatureFlag().IsEnabled() && force_writable) { + // Only target partitions are mapped with force_writable. On Virtual + // A/B devices, target partitions may overlap with source partitions, so + // they must be mapped with snapshot. + params.timeout_ms = kMapSnapshotTimeout; + success = snapshot_->MapUpdateSnapshot(params, path); + } else { + params.timeout_ms = kMapTimeout; + success = CreateLogicalPartition(params, path); + } - if (!CreateLogicalPartition(params, path)) { + if (!success) { LOG(ERROR) << "Cannot map " << target_partition_name << " in " << super_device << " on device mapper."; return false; @@ -161,7 +184,19 @@ bool DynamicPartitionControlAndroid::UnmapPartitionOnDeviceMapper( const std::string& target_partition_name) { if (DeviceMapper::Instance().GetState(target_partition_name) != DmDeviceState::INVALID) { - if (!DestroyLogicalPartition(target_partition_name)) { + // Partitions at target slot on non-Virtual A/B devices are mapped as + // dm-linear. Also, on Virtual A/B devices, system_other may be mapped for + // preopt apps as dm-linear. + // Call DestroyLogicalPartition to handle these cases. + bool success = DestroyLogicalPartition(target_partition_name); + + // On a Virtual A/B device, |target_partition_name| may be a leftover from + // a paused update. Clean up any underlying devices. + if (GetVirtualAbFeatureFlag().IsEnabled()) { + success &= snapshot_->UnmapUpdateSnapshot(target_partition_name); + } + + if (!success) { LOG(ERROR) << "Cannot unmap " << target_partition_name << " from device mapper."; return false; @@ -309,6 +344,20 @@ bool DynamicPartitionControlAndroid::PreparePartitionsForUpdate( uint32_t source_slot, uint32_t target_slot, const DeltaArchiveManifest& manifest) { + // TODO(elsk): Also call PrepareDynamicPartitionsForUpdate when applying + // downgrade packages on retrofit Virtual A/B devices and when applying + // secondary OTA. b/138258570 + if (GetVirtualAbFeatureFlag().IsEnabled()) { + return PrepareSnapshotPartitionsForUpdate( + source_slot, target_slot, manifest); + } + return PrepareDynamicPartitionsForUpdate(source_slot, target_slot, manifest); +} + +bool DynamicPartitionControlAndroid::PrepareDynamicPartitionsForUpdate( + uint32_t source_slot, + uint32_t target_slot, + const DeltaArchiveManifest& manifest) { const std::string target_suffix = SlotSuffixForSlotNumber(target_slot); // Unmap all the target dynamic partitions because they would become @@ -345,6 +394,21 @@ bool DynamicPartitionControlAndroid::PreparePartitionsForUpdate( return StoreMetadata(target_device, builder.get(), target_slot); } +bool DynamicPartitionControlAndroid::PrepareSnapshotPartitionsForUpdate( + uint32_t source_slot, + uint32_t target_slot, + const DeltaArchiveManifest& manifest) { + if (!snapshot_->BeginUpdate()) { + LOG(ERROR) << "Cannot begin new update."; + return false; + } + if (!snapshot_->CreateUpdateSnapshots(manifest)) { + LOG(ERROR) << "Cannot create update snapshots."; + return false; + } + return true; +} + std::string DynamicPartitionControlAndroid::GetSuperPartitionName( uint32_t slot) { return fs_mgr_get_super_partition_name(slot); diff --git a/dynamic_partition_control_android.h b/dynamic_partition_control_android.h index 0907236a..9509a625 100644 --- a/dynamic_partition_control_android.h +++ b/dynamic_partition_control_android.h @@ -23,11 +23,13 @@ #include #include +#include + namespace chromeos_update_engine { class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { public: - DynamicPartitionControlAndroid() = default; + DynamicPartitionControlAndroid(); ~DynamicPartitionControlAndroid(); FeatureFlag GetDynamicPartitionsFeatureFlag() override; FeatureFlag GetVirtualAbFeatureFlag() override; @@ -83,8 +85,6 @@ class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { private: friend class DynamicPartitionControlAndroidTest; - std::set mapped_devices_; - void CleanupInternal(bool wait); bool MapPartitionInternal(const std::string& super_device, const std::string& target_partition_name, @@ -98,6 +98,21 @@ class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { uint32_t target_slot, const DeltaArchiveManifest& manifest); + // Helper for PreparePartitionsForUpdate. Used for dynamic partitions without + // Virtual A/B update. + bool PrepareDynamicPartitionsForUpdate(uint32_t source_slot, + uint32_t target_slot, + const DeltaArchiveManifest& manifest); + + // Helper for PreparePartitionsForUpdate. Used for snapshotted partitions for + // Virtual A/B update. + bool PrepareSnapshotPartitionsForUpdate(uint32_t source_slot, + uint32_t target_slot, + const DeltaArchiveManifest& manifest); + + std::set mapped_devices_; + std::unique_ptr snapshot_; + DISALLOW_COPY_AND_ASSIGN(DynamicPartitionControlAndroid); }; From a33bca41ec30bab21322f1c555eb054b324c92ed Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Tue, 3 Sep 2019 20:29:45 -0700 Subject: [PATCH 111/624] Call SnapshotManager::FinishedSnapshotWrites ... when the update has finished. This allows SnapshotManager to conclude the update and prepare for reboot. Test: builds Test: apply OTA on Virtual A/B device, then check /metadata/ota/snapshot-boot exists, then reboot Bug: 138816109 Change-Id: I2a4699865b09358ef018313bed64e34617a78e3c --- boot_control_android.cc | 4 ++++ dynamic_partition_control_android.cc | 7 +++++++ dynamic_partition_control_android.h | 1 + dynamic_partition_control_interface.h | 2 ++ mock_dynamic_partition_control.h | 2 ++ 5 files changed, 16 insertions(+) diff --git a/boot_control_android.cc b/boot_control_android.cc index 4c998b1b..4a010bda 100644 --- a/boot_control_android.cc +++ b/boot_control_android.cc @@ -245,6 +245,10 @@ bool BootControlAndroid::MarkSlotUnbootable(Slot slot) { } bool BootControlAndroid::SetActiveBootSlot(Slot slot) { + if (slot != GetCurrentSlot() && !dynamic_control_->FinishUpdate()) { + return false; + } + CommandResult result; auto ret = module_->setActiveBootSlot(slot, StoreResultCallback(&result)); if (!ret.isOk()) { diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc index f4305747..8dcf343d 100644 --- a/dynamic_partition_control_android.cc +++ b/dynamic_partition_control_android.cc @@ -490,4 +490,11 @@ bool DynamicPartitionControlAndroid::UpdatePartitionMetadata( return true; } +bool DynamicPartitionControlAndroid::FinishUpdate() { + if (!GetVirtualAbFeatureFlag().IsEnabled()) + return true; + LOG(INFO) << "Snapshot writes are done."; + return snapshot_->FinishedSnapshotWrites(); +} + } // namespace chromeos_update_engine diff --git a/dynamic_partition_control_android.h b/dynamic_partition_control_android.h index 9509a625..f9dfd894 100644 --- a/dynamic_partition_control_android.h +++ b/dynamic_partition_control_android.h @@ -52,6 +52,7 @@ class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { const DeltaArchiveManifest& manifest) override; bool GetDeviceDir(std::string* path) override; std::string GetSuperPartitionName(uint32_t slot) override; + bool FinishUpdate() override; protected: // These functions are exposed for testing. diff --git a/dynamic_partition_control_interface.h b/dynamic_partition_control_interface.h index a4dc5765..0ccfcd6b 100644 --- a/dynamic_partition_control_interface.h +++ b/dynamic_partition_control_interface.h @@ -103,6 +103,8 @@ class DynamicPartitionControlInterface { // Return the name of the super partition (which stores super partition // metadata) for a given slot. virtual std::string GetSuperPartitionName(uint32_t slot) = 0; + + virtual bool FinishUpdate() = 0; }; } // namespace chromeos_update_engine diff --git a/mock_dynamic_partition_control.h b/mock_dynamic_partition_control.h index d96432b1..1af6cfed 100644 --- a/mock_dynamic_partition_control.h +++ b/mock_dynamic_partition_control.h @@ -48,6 +48,7 @@ class MockDynamicPartitionControl : public DynamicPartitionControlInterface { bool(uint32_t, uint32_t, const DeltaArchiveManifest&)); MOCK_METHOD1(GetSuperPartitionName, std::string(uint32_t)); MOCK_METHOD0(GetVirtualAbFeatureFlag, FeatureFlag()); + MOCK_METHOD0(FinishUpdate, bool()); }; class MockDynamicPartitionControlAndroid @@ -75,6 +76,7 @@ class MockDynamicPartitionControlAndroid MOCK_METHOD0(GetDynamicPartitionsFeatureFlag, FeatureFlag()); MOCK_METHOD1(GetSuperPartitionName, std::string(uint32_t)); MOCK_METHOD0(GetVirtualAbFeatureFlag, FeatureFlag()); + MOCK_METHOD0(FinishUpdate, bool()); }; } // namespace chromeos_update_engine From f407f187af489b92c9a50a1f4ac8cf703b8fab56 Mon Sep 17 00:00:00 2001 From: Caroline Tice Date: Mon, 23 Sep 2019 10:32:09 -0700 Subject: [PATCH 112/624] update_engine: Fix field assignment order, for compiler warning. The latest llvm compiler version (r370808), to which we are about to upgrade Chrome OS, introduces a new warning, -Wreorder-init-list, which complains if struct field assignments at initialization occur in a different order than their declartion order. This CL fixes a build error in the ChromeOS UnitTest stage that is uncovered by this warning. BUG=chromium:1006866 TEST=compilation works with this fix, with new compiler Change-Id: Id48ae7b24345d960313eaa03ce58378065a9316e Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/1818788 Tested-by: Caroline Tice Commit-Queue: Manoj Gupta Reviewed-by: Amin Hassani --- update_status_utils_unittest.cc | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/update_status_utils_unittest.cc b/update_status_utils_unittest.cc index 3af30c78..e3dd037c 100644 --- a/update_status_utils_unittest.cc +++ b/update_status_utils_unittest.cc @@ -25,14 +25,16 @@ using std::string; namespace chromeos_update_engine { TEST(UpdateStatusUtilsTest, UpdateEngineStatusToStringTest) { + // Keep field assignments in same order as they were declared, + // to prevent compiler warning, -Wreorder-init-fields. update_engine::UpdateEngineStatus update_engine_status = { - .status = update_engine::UpdateStatus::CHECKING_FOR_UPDATE, - .is_install = true, - .is_enterprise_rollback = true, .last_checked_time = 156000000, + .status = update_engine::UpdateStatus::CHECKING_FOR_UPDATE, + .progress = 0.5, .new_size_bytes = 888, .new_version = "12345.0.0", - .progress = 0.5, + .is_enterprise_rollback = true, + .is_install = true, }; string print = R"(CURRENT_OP=UPDATE_STATUS_CHECKING_FOR_UPDATE From 051627abe8995741f996e3931d899c0b08e503eb Mon Sep 17 00:00:00 2001 From: Jae Hoon Kim Date: Tue, 3 Sep 2019 12:56:32 -0700 Subject: [PATCH 113/624] update_engine: Parse and supply EOL date for Chrome From Omaha, the optional field |_eol_date| is to indicate the EOL of a device. Chrome side will leverage these values to display a notification. The value for |_eol_date| should be an integer value indicating the days from Unix Epoch date. If |_eol_date| does not exist in the Omaha response or have non-integer values, the default will fallback to |kEolDateInvalid|. BUG=chromium:998983 TEST=FEATURES="test" emerge-$B update_engine update_engine-client system_api TEST=test_that -b $B $IP autoupdate_EOL TEST=test_that -b $B $IP autoupdate_EOL.approaching_eol TEST=test_that -b $B $IP autoupdate_EOL.future_eol Cq-Depend:chromium:1783596, chromium:1811116 Change-Id: I2b1063873118ccf8fe22ba09a5961e27aa980c7b Reviewed-on: https://chromium-review.googlesource.com/1783897 Tested-by: Jae Hoon Kim Commit-Ready: ChromeOS CL Exonerator Bot Legacy-Commit-Queue: Commit Bot Reviewed-by: Amin Hassani --- client_library/client_dbus.cc | 1 + .../include/update_engine/update_status.h | 2 + common/constants.cc | 1 + common/constants.h | 1 + dbus_service.cc | 1 + omaha_request_action.cc | 28 +++++++-- omaha_request_action.h | 4 +- omaha_request_action_unittest.cc | 62 +++++++++++++++++++ omaha_utils.cc | 23 +++++-- omaha_utils.h | 18 ++++++ omaha_utils_unittest.cc | 13 ++++ update_attempter.cc | 6 ++ update_attempter_unittest.cc | 34 ++++++++++ update_engine_client.cc | 14 ++--- 14 files changed, 190 insertions(+), 18 deletions(-) diff --git a/client_library/client_dbus.cc b/client_library/client_dbus.cc index e6aba923..d1d6cc01 100644 --- a/client_library/client_dbus.cc +++ b/client_library/client_dbus.cc @@ -56,6 +56,7 @@ void ConvertToUpdateEngineStatus(const StatusResult& status, out_status->status = static_cast(status.current_operation()); out_status->is_enterprise_rollback = status.is_enterprise_rollback(); out_status->is_install = status.is_install(); + out_status->eol_date = status.eol_date(); } } // namespace diff --git a/client_library/include/update_engine/update_status.h b/client_library/include/update_engine/update_status.h index c877df6d..c1d0968f 100644 --- a/client_library/include/update_engine/update_status.h +++ b/client_library/include/update_engine/update_status.h @@ -88,6 +88,8 @@ struct UpdateEngineStatus { bool is_enterprise_rollback; // Indication of install for DLC(s). bool is_install; + // The end-of-life date of the device in the number of days since Unix Epoch. + int64_t eol_date; }; } // namespace update_engine diff --git a/common/constants.cc b/common/constants.cc index 87bdf911..64bdf0cf 100644 --- a/common/constants.cc +++ b/common/constants.cc @@ -55,6 +55,7 @@ const char kPrefsNumResponsesSeen[] = "num-responses-seen"; const char kPrefsOmahaCohort[] = "omaha-cohort"; const char kPrefsOmahaCohortHint[] = "omaha-cohort-hint"; const char kPrefsOmahaCohortName[] = "omaha-cohort-name"; +const char kPrefsOmahaEolDate[] = "omaha-eol-date"; const char kPrefsOmahaEolStatus[] = "omaha-eol-status"; const char kPrefsP2PEnabled[] = "p2p-enabled"; const char kPrefsP2PFirstAttemptTimestamp[] = "p2p-first-attempt-timestamp"; diff --git a/common/constants.h b/common/constants.h index d95a56a1..23c9003d 100644 --- a/common/constants.h +++ b/common/constants.h @@ -56,6 +56,7 @@ extern const char kPrefsNumResponsesSeen[]; extern const char kPrefsOmahaCohort[]; extern const char kPrefsOmahaCohortHint[]; extern const char kPrefsOmahaCohortName[]; +extern const char kPrefsOmahaEolDate[]; extern const char kPrefsOmahaEolStatus[]; extern const char kPrefsP2PEnabled[]; extern const char kPrefsP2PFirstAttemptTimestamp[]; diff --git a/dbus_service.cc b/dbus_service.cc index b0dc0766..065fe0c3 100644 --- a/dbus_service.cc +++ b/dbus_service.cc @@ -49,6 +49,7 @@ void ConvertToStatusResult(const UpdateEngineStatus& ue_status, out_status->set_new_size(ue_status.new_size_bytes); out_status->set_is_enterprise_rollback(ue_status.is_enterprise_rollback); out_status->set_is_install(ue_status.is_install); + out_status->set_eol_date(ue_status.eol_date); } } // namespace diff --git a/omaha_request_action.cc b/omaha_request_action.cc index 4d865868..7ca43720 100644 --- a/omaha_request_action.cc +++ b/omaha_request_action.cc @@ -109,6 +109,7 @@ constexpr char kValNoUpdate[] = "noupdate"; // updatecheck attributes (without the underscore prefix). constexpr char kAttrEol[] = "eol"; +constexpr char kAttrEolDate[] = "eol_date"; constexpr char kAttrRollback[] = "rollback"; constexpr char kAttrFirmwareVersion[] = "firmware_version"; constexpr char kAttrKernelVersion[] = "kernel_version"; @@ -1317,14 +1318,29 @@ bool OmahaRequestAction::PersistCohortData(const string& prefs_key, } bool OmahaRequestAction::PersistEolStatus(const map& attrs) { + bool ret = true; + + // Set EOL date. + auto eol_date_attr = attrs.find(kAttrEolDate); + if (eol_date_attr == attrs.end()) { + system_state_->prefs()->Delete(kPrefsOmahaEolDate); + } else if (!system_state_->prefs()->SetString(kPrefsOmahaEolDate, + eol_date_attr->second)) { + LOG(ERROR) << "Setting EOL date failed."; + ret = false; + } + + // Set EOL. auto eol_attr = attrs.find(kAttrEol); - if (eol_attr != attrs.end()) { - return system_state_->prefs()->SetString(kPrefsOmahaEolStatus, - eol_attr->second); - } else if (system_state_->prefs()->Exists(kPrefsOmahaEolStatus)) { - return system_state_->prefs()->Delete(kPrefsOmahaEolStatus); + if (eol_attr == attrs.end()) { + system_state_->prefs()->Delete(kPrefsOmahaEolStatus); + } else if (!system_state_->prefs()->SetString(kPrefsOmahaEolStatus, + eol_attr->second)) { + LOG(ERROR) << "Setting EOL status failed."; + ret = false; } - return true; + + return ret; } void OmahaRequestAction::ActionCompleted(ErrorCode code) { diff --git a/omaha_request_action.h b/omaha_request_action.h index 12d36d94..96f09e92 100644 --- a/omaha_request_action.h +++ b/omaha_request_action.h @@ -184,7 +184,9 @@ class OmahaRequestAction : public Action, const std::string& new_value); // Parse and persist the end-of-life status flag sent back in the updatecheck - // tag attributes. The flag will be validated and stored in the Prefs. + // tag attributes. In addition, the optional end-of-life date flag will also + // be parsed and persisted. The flags will be validated and stored in the + // Prefs. bool PersistEolStatus(const std::map& attrs); // If this is an update check request, initializes diff --git a/omaha_request_action_unittest.cc b/omaha_request_action_unittest.cc index 8008e008..94d5152a 100644 --- a/omaha_request_action_unittest.cc +++ b/omaha_request_action_unittest.cc @@ -53,6 +53,7 @@ #include "update_engine/mock_payload_state.h" #include "update_engine/omaha_request_builder_xml.h" #include "update_engine/omaha_request_params.h" +#include "update_engine/omaha_utils.h" #include "update_engine/update_manager/rollback_prefs.h" using base::Time; @@ -2809,4 +2810,65 @@ TEST_F(OmahaRequestActionTest, NoIncludeRequisitionTest) { EXPECT_EQ(string::npos, post_str.find("requisition")); } +TEST_F(OmahaRequestActionTest, PersistEolDatesTest) { + tuc_params_.http_response = + "" + ""; + tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; + + ASSERT_TRUE(TestUpdateCheck()); + + string eol, eol_date; + EXPECT_TRUE( + fake_system_state_.prefs()->GetString(kPrefsOmahaEolStatus, &eol)); + EXPECT_EQ(kEolStatusSupported, eol); + EXPECT_TRUE( + fake_system_state_.prefs()->GetString(kPrefsOmahaEolDate, &eol_date)); + EXPECT_EQ("200", eol_date); +} + +TEST_F(OmahaRequestActionTest, PersistEolMissingDatesTest) { + tuc_params_.http_response = + "" + ""; + tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; + + ASSERT_TRUE(TestUpdateCheck()); + + string eol, eol_date; + EXPECT_TRUE( + fake_system_state_.prefs()->GetString(kPrefsOmahaEolStatus, &eol)); + EXPECT_EQ(kEolStatusSupported, eol); + EXPECT_FALSE( + fake_system_state_.prefs()->GetString(kPrefsOmahaEolDate, &eol_date)); +} + +TEST_F(OmahaRequestActionTest, PersistEolBadDatesTest) { + tuc_params_.http_response = + "" + ""; + tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; + + ASSERT_TRUE(TestUpdateCheck()); + + string eol, eol_date; + EXPECT_TRUE( + fake_system_state_.prefs()->GetString(kPrefsOmahaEolStatus, &eol)); + EXPECT_EQ(kEolStatusSupported, eol); + EXPECT_TRUE( + fake_system_state_.prefs()->GetString(kPrefsOmahaEolDate, &eol_date)); + EXPECT_EQ(kEolDateInvalid, StringToEolDate(eol_date)); +} + } // namespace chromeos_update_engine diff --git a/omaha_utils.cc b/omaha_utils.cc index 6bd75250..9fe425b1 100644 --- a/omaha_utils.cc +++ b/omaha_utils.cc @@ -17,17 +17,15 @@ #include "update_engine/omaha_utils.h" #include +#include namespace chromeos_update_engine { -namespace { - -// The possible string values for the end-of-life status. const char kEolStatusSupported[] = "supported"; const char kEolStatusSecurityOnly[] = "security-only"; const char kEolStatusEol[] = "eol"; -} // namespace +const EolDate kEolDateInvalid = -9999; const char* EolStatusToString(EolStatus eol_status) { switch (eol_status) { @@ -54,4 +52,21 @@ EolStatus StringToEolStatus(const std::string& eol_status) { return EolStatus::kSupported; } +std::string EolDateToString(EolDate eol_date) { +#if BASE_VER < 576279 + return base::Int64ToString(eol_date); +#else + return base::NumberToString(eol_date); +#endif +} + +EolDate StringToEolDate(const std::string& eol_date) { + EolDate date = kEolDateInvalid; + if (!base::StringToInt64(eol_date, &date)) { + LOG(WARNING) << "Invalid EOL date attribute: " << eol_date; + return kEolDateInvalid; + } + return date; +} + } // namespace chromeos_update_engine diff --git a/omaha_utils.h b/omaha_utils.h index 86145403..128232af 100644 --- a/omaha_utils.h +++ b/omaha_utils.h @@ -21,6 +21,16 @@ namespace chromeos_update_engine { +using EolDate = int64_t; + +// |EolDate| indicating an invalid end-of-life date. +extern const EolDate kEolDateInvalid; + +// The possible string values for the end-of-life status. +extern const char kEolStatusSupported[]; +extern const char kEolStatusSecurityOnly[]; +extern const char kEolStatusEol[]; + // The end-of-life status of the device. enum class EolStatus { kSupported = 0, @@ -35,6 +45,14 @@ const char* EolStatusToString(EolStatus eol_status); // of an invalid string, the default "supported" value will be used instead. EolStatus StringToEolStatus(const std::string& eol_status); +// Returns the string representation of the |eol_date|. +std::string EolDateToString(EolDate eol_date); + +// Converts the end-of-life date string to an EolDate numeric value. In case +// of an invalid string, the default |kEolDateInvalid| value will be used +// instead. +EolDate StringToEolDate(const std::string& eol_date); + } // namespace chromeos_update_engine #endif // UPDATE_ENGINE_OMAHA_UTILS_H_ diff --git a/omaha_utils_unittest.cc b/omaha_utils_unittest.cc index 8ceb76bf..ccb9578d 100644 --- a/omaha_utils_unittest.cc +++ b/omaha_utils_unittest.cc @@ -39,4 +39,17 @@ TEST(OmahaUtilsTest, EolStatusTest) { EXPECT_EQ(EolStatus::kSupported, StringToEolStatus("hello, world!")); } +TEST(OmahaUtilsTest, EolDateTest) { + // Supported values are converted back and forth properly. + const std::vector tests = {kEolDateInvalid, -1, 0, 1}; + for (EolDate eol_date : tests) { + EXPECT_EQ(eol_date, StringToEolDate(EolDateToString(eol_date))) + << "The StringToEolDate() was " << EolDateToString(eol_date); + } + + // Invalid values are assumed as "supported". + EXPECT_EQ(kEolDateInvalid, StringToEolDate("")); + EXPECT_EQ(kEolDateInvalid, StringToEolDate("hello, world!")); +} + } // namespace chromeos_update_engine diff --git a/update_attempter.cc b/update_attempter.cc index 780ba7bf..18e50881 100644 --- a/update_attempter.cc +++ b/update_attempter.cc @@ -54,6 +54,7 @@ #include "update_engine/omaha_request_action.h" #include "update_engine/omaha_request_params.h" #include "update_engine/omaha_response_handler_action.h" +#include "update_engine/omaha_utils.h" #include "update_engine/p2p_manager.h" #include "update_engine/payload_consumer/download_action.h" #include "update_engine/payload_consumer/filesystem_verifier_action.h" @@ -1385,6 +1386,11 @@ bool UpdateAttempter::GetStatus(UpdateEngineStatus* out_status) { out_status->is_enterprise_rollback = install_plan_ && install_plan_->is_rollback; out_status->is_install = is_install_; + + string str_eol_date; + system_state_->prefs()->GetString(kPrefsOmahaEolDate, &str_eol_date); + out_status->eol_date = StringToEolDate(str_eol_date); + return true; } diff --git a/update_attempter_unittest.cc b/update_attempter_unittest.cc index 0e743535..4aff897b 100644 --- a/update_attempter_unittest.cc +++ b/update_attempter_unittest.cc @@ -18,6 +18,7 @@ #include +#include #include #include @@ -48,6 +49,7 @@ #include "update_engine/mock_p2p_manager.h" #include "update_engine/mock_payload_state.h" #include "update_engine/mock_service_observer.h" +#include "update_engine/omaha_utils.h" #include "update_engine/payload_consumer/filesystem_verifier_action.h" #include "update_engine/payload_consumer/install_plan.h" #include "update_engine/payload_consumer/payload_constants.h" @@ -2244,4 +2246,36 @@ TEST_F(UpdateAttempterTest, IsEnterpriseRollbackInGetStatusTrue) { EXPECT_TRUE(status.is_enterprise_rollback); } +TEST_F(UpdateAttempterTest, FutureEolTest) { + EolDate eol_date = std::numeric_limits::max(); + EXPECT_CALL(*prefs_, GetString(kPrefsOmahaEolDate, _)) + .WillOnce( + DoAll(SetArgPointee<1>(EolDateToString(eol_date)), Return(true))); + + UpdateEngineStatus status; + attempter_.GetStatus(&status); + EXPECT_EQ(eol_date, status.eol_date); +} + +TEST_F(UpdateAttempterTest, PastEolTest) { + EolDate eol_date = 1; + EXPECT_CALL(*prefs_, GetString(kPrefsOmahaEolDate, _)) + .WillOnce( + DoAll(SetArgPointee<1>(EolDateToString(eol_date)), Return(true))); + + UpdateEngineStatus status; + attempter_.GetStatus(&status); + EXPECT_EQ(eol_date, status.eol_date); +} + +TEST_F(UpdateAttempterTest, FailedEolTest) { + EolDate eol_date = kEolDateInvalid; + EXPECT_CALL(*prefs_, GetString(kPrefsOmahaEolDate, _)) + .WillOnce(Return(false)); + + UpdateEngineStatus status; + attempter_.GetStatus(&status); + EXPECT_EQ(eol_date, status.eol_date); +} + } // namespace chromeos_update_engine diff --git a/update_engine_client.cc b/update_engine_client.cc index d78cee70..7b5c4df1 100644 --- a/update_engine_client.cc +++ b/update_engine_client.cc @@ -42,6 +42,8 @@ #include "update_engine/update_status_utils.h" using brillo::KeyValueStore; +using chromeos_update_engine::EolDate; +using chromeos_update_engine::EolDateToString; using chromeos_update_engine::EolStatus; using chromeos_update_engine::ErrorCode; using chromeos_update_engine::UpdateEngineStatusToString; @@ -569,16 +571,14 @@ int UpdateEngineClient::ProcessFlags() { } if (FLAGS_eol_status) { - int eol_status; - if (!client_->GetEolStatus(&eol_status)) { - LOG(ERROR) << "Error getting the end-of-life status."; + UpdateEngineStatus status; + if (!client_->GetStatus(&status)) { + LOG(ERROR) << "Error GetStatus() for getting EOL info."; } else { - EolStatus eol_status_code = static_cast(eol_status); + EolDate eol_date_code = status.eol_date; KeyValueStore eol_status_store; - eol_status_store.SetString("EOL_STATUS", - EolStatusToString(eol_status_code)); - + eol_status_store.SetString("EOL_DATE", EolDateToString(eol_date_code)); printf("%s", eol_status_store.SaveToString().c_str()); } } From dac04b73367d63eb186e9deddbe8d9f41d3092a7 Mon Sep 17 00:00:00 2001 From: Tianjie Xu Date: Mon, 30 Sep 2019 11:48:17 -0700 Subject: [PATCH 114/624] Signature size now uses size_t Signature size shouldn't have negative values. Make its usage more consistent. Test: mma Change-Id: Ie6cbf49694fcbe0348d6338373423d77d3edfdd4 --- .../delta_performer_integration_test.cc | 12 ++++++------ payload_generator/generate_delta_main.cc | 14 ++++++++------ payload_generator/payload_signer.cc | 2 +- payload_generator/payload_signer.h | 2 +- payload_generator/payload_signer_unittest.cc | 2 +- 5 files changed, 17 insertions(+), 15 deletions(-) diff --git a/payload_consumer/delta_performer_integration_test.cc b/payload_consumer/delta_performer_integration_test.cc index 6b4771d6..38494f21 100644 --- a/payload_consumer/delta_performer_integration_test.cc +++ b/payload_consumer/delta_performer_integration_test.cc @@ -173,7 +173,7 @@ static size_t GetSignatureSize(const string& private_key_path) { return signature.size(); } -static bool InsertSignaturePlaceholder(int signature_size, +static bool InsertSignaturePlaceholder(size_t signature_size, const string& payload_path, uint64_t* out_metadata_size) { vector signatures; @@ -186,7 +186,7 @@ static bool InsertSignaturePlaceholder(int signature_size, static void SignGeneratedPayload(const string& payload_path, uint64_t* out_metadata_size) { string private_key_path = GetBuildArtifactsPath(kUnittestPrivateKeyPath); - int signature_size = GetSignatureSize(private_key_path); + size_t signature_size = GetSignatureSize(private_key_path); brillo::Blob hash; ASSERT_TRUE(PayloadSigner::HashPayloadForSigning( payload_path, {signature_size}, &hash, nullptr)); @@ -229,15 +229,15 @@ static void SignGeneratedShellPayload(SignatureTest signature_test, fclose(fprikey); RSA_free(rsa); } - int signature_size = GetSignatureSize(private_key_path); + size_t signature_size = GetSignatureSize(private_key_path); test_utils::ScopedTempFile hash_file("hash.XXXXXX"); string signature_size_string; if (signature_test == kSignatureGeneratedShellRotateCl1 || signature_test == kSignatureGeneratedShellRotateCl2) signature_size_string = - base::StringPrintf("%d:%d", signature_size, signature_size); + base::StringPrintf("%zu:%zu", signature_size, signature_size); else - signature_size_string = base::StringPrintf("%d", signature_size); + signature_size_string = base::StringPrintf("%zu", signature_size); string delta_generator_path = GetBuildArtifactsPath("delta_generator"); ASSERT_EQ(0, System(base::StringPrintf( @@ -531,7 +531,7 @@ static void GenerateDeltaFile(bool full_kernel, if (signature_test == kSignatureGeneratedPlaceholder || signature_test == kSignatureGeneratedPlaceholderMismatch) { - int signature_size = + size_t signature_size = GetSignatureSize(GetBuildArtifactsPath(kUnittestPrivateKeyPath)); LOG(INFO) << "Inserting placeholder signature."; ASSERT_TRUE(InsertSignaturePlaceholder( diff --git a/payload_generator/generate_delta_main.cc b/payload_generator/generate_delta_main.cc index 7c304cec..bef09bb0 100644 --- a/payload_generator/generate_delta_main.cc +++ b/payload_generator/generate_delta_main.cc @@ -54,13 +54,13 @@ namespace chromeos_update_engine { namespace { void ParseSignatureSizes(const string& signature_sizes_flag, - vector* signature_sizes) { + vector* signature_sizes) { signature_sizes->clear(); vector split_strings = base::SplitString( signature_sizes_flag, ":", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL); for (const string& str : split_strings) { - int size = 0; - bool parsing_successful = base::StringToInt(str, &size); + size_t size = 0; + bool parsing_successful = base::StringToSizeT(str, &size); LOG_IF(FATAL, !parsing_successful) << "Invalid signature size: " << str; LOG_IF(FATAL, size != 256 && size != 512) @@ -102,7 +102,7 @@ bool ParseImageInfo(const string& channel, return true; } -void CalculateHashForSigning(const vector& sizes, +void CalculateHashForSigning(const vector& sizes, const string& out_hash_file, const string& out_metadata_hash_file, const string& in_file) { @@ -445,8 +445,10 @@ int Main(int argc, char** argv) { // Initialize the Xz compressor. XzCompressInit(); - vector signature_sizes; - ParseSignatureSizes(FLAGS_signature_size, &signature_sizes); + vector signature_sizes; + if (!FLAGS_signature_size.empty()) { + ParseSignatureSizes(FLAGS_signature_size, &signature_sizes); + } if (!FLAGS_out_hash_file.empty() || !FLAGS_out_metadata_hash_file.empty()) { CHECK(FLAGS_out_metadata_size_file.empty()); diff --git a/payload_generator/payload_signer.cc b/payload_generator/payload_signer.cc index 92313dcf..9739052f 100644 --- a/payload_generator/payload_signer.cc +++ b/payload_generator/payload_signer.cc @@ -352,7 +352,7 @@ bool PayloadSigner::SignatureBlobLength(const vector& private_key_paths, } bool PayloadSigner::HashPayloadForSigning(const string& payload_path, - const vector& signature_sizes, + const vector& signature_sizes, brillo::Blob* out_payload_hash_data, brillo::Blob* out_metadata_hash) { // Create a signature blob with signatures filled with 0. diff --git a/payload_generator/payload_signer.h b/payload_generator/payload_signer.h index 7854e126..76e583b8 100644 --- a/payload_generator/payload_signer.h +++ b/payload_generator/payload_signer.h @@ -91,7 +91,7 @@ class PayloadSigner { // // The changes to payload are not preserved or written to disk. static bool HashPayloadForSigning(const std::string& payload_path, - const std::vector& signature_sizes, + const std::vector& signature_sizes, brillo::Blob* out_payload_hash_data, brillo::Blob* out_metadata_hash); diff --git a/payload_generator/payload_signer_unittest.cc b/payload_generator/payload_signer_unittest.cc index 75fc6942..eaf87768 100644 --- a/payload_generator/payload_signer_unittest.cc +++ b/payload_generator/payload_signer_unittest.cc @@ -171,7 +171,7 @@ TEST_F(PayloadSignerTest, SkipMetadataSignatureTest) { uint64_t metadata_size; EXPECT_TRUE(payload.WritePayload( payload_file.path(), "/dev/null", "", &metadata_size)); - const vector sizes = {256}; + const vector sizes = {256}; brillo::Blob unsigned_payload_hash, unsigned_metadata_hash; EXPECT_TRUE(PayloadSigner::HashPayloadForSigning(payload_file.path(), sizes, From 05b3b963888adb5b9bbe906fb09cdae7ce47836c Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Thu, 26 Sep 2019 17:19:21 -0700 Subject: [PATCH 115/624] Add snapshot_enabled field to DeltaArchiveManifest. When this field is set, OTA client should try to use snapshots during updates. If it is not set, OTA client MUST NOT use snapshots. This field is set iff the target build has Virtual A/B feature enabled / retrofitted (except for secondary OTAs, where system_other should be written directly). In follow-up CLs, DynamicPartitionControlAndroid only maps partitions as snapshots if this flag is set. DeltaPerformer may skip in-place SOURCE_COPY operations if snapshots are created. Note that: - On retrofit Virtual A/B devices, this field is always set. If updating from a non Virtual A/B build to a Virtual A/B build, the OTA client on the device would simply ignore this field and perform a regular A/B OTA. - When downgrading a retrofit Virtual A/B device back to a regular A/B build, this field is NOT set, and the OTA client will create the target slot partitions as usual. - When sideloading a full OTA in recovery, OTA client cannot create any snapshots. This field is effectively ignored (as if it were set to false). Fixes: 141720569 Test: build OTA Test: update_engine_unittests Test: apply Virtual A/B OTA Change-Id: I1939b24f6687f66a682cd6b7ae826f27acf98e2f --- payload_generator/payload_generation_config.cc | 5 +++++ update_metadata.proto | 6 ++++++ 2 files changed, 11 insertions(+) diff --git a/payload_generator/payload_generation_config.cc b/payload_generator/payload_generation_config.cc index 648fe8b9..2dd2626e 100644 --- a/payload_generator/payload_generation_config.cc +++ b/payload_generator/payload_generation_config.cc @@ -160,6 +160,11 @@ bool ImageConfig::LoadDynamicPartitionMetadata( } } } + + bool snapshot_enabled = false; + store.GetBoolean("virtual_ab", &snapshot_enabled); + metadata->set_snapshot_enabled(snapshot_enabled); + dynamic_partition_metadata = std::move(metadata); return true; } diff --git a/update_metadata.proto b/update_metadata.proto index 7e8e7d4f..1657a7e2 100644 --- a/update_metadata.proto +++ b/update_metadata.proto @@ -302,6 +302,12 @@ message DynamicPartitionMetadata { // - If an updatable group is in the manifest but not on the device, the group // is added to the device. repeated DynamicPartitionGroup groups = 1; + + // Whether dynamic partitions have snapshots during the update. If this is + // set to true, the update_engine daemon creates snapshots for all dynamic + // partitions if possible. If this is unset, the update_engine daemon MUST + // NOT create snapshots for dynamic partitions. + optional bool snapshot_enabled = 2; } message DeltaArchiveManifest { From 0f59a9a41177186cf41b331e279d0b7804512654 Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Fri, 27 Sep 2019 10:24:31 -0700 Subject: [PATCH 116/624] update_engine: Deprecate minor version 1 Minor version 1 was for the old days where we rewrite the signle partition with an update (no A/B partitions). But those days are long over and we don't think there is any device out that has this capability anymore. Even if there is, we can always serve full payloads along with the stepping stone we have in M53. So this is safe to go. BUG=chromium:1008553 TEST=sudo FEATURES=test emerge update_engine TEST=ran cros flash two times. Change-Id: Ib928ade36af5136cd4013a30dfb39ee7fd5b07b1 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/1829160 Tested-by: Amin Hassani Reviewed-by: Sen Jiang Commit-Queue: Amin Hassani --- Android.bp | 11 - BUILD.gn | 11 - payload_consumer/delta_performer.cc | 110 +-- payload_consumer/delta_performer.h | 2 - .../delta_performer_integration_test.cc | 171 +--- payload_consumer/payload_constants.cc | 18 +- payload_consumer/payload_constants.h | 2 +- payload_generator/cycle_breaker.cc | 218 ----- payload_generator/cycle_breaker.h | 71 -- payload_generator/cycle_breaker_unittest.cc | 279 ------ payload_generator/delta_diff_generator.cc | 15 +- payload_generator/delta_diff_utils.cc | 190 +---- payload_generator/delta_diff_utils.h | 8 - .../delta_diff_utils_unittest.cc | 239 +----- payload_generator/generate_delta_main.cc | 15 +- payload_generator/graph_types.cc | 23 - payload_generator/graph_types.h | 87 -- payload_generator/graph_utils.cc | 142 ---- payload_generator/graph_utils.h | 54 -- payload_generator/graph_utils_unittest.cc | 94 --- payload_generator/inplace_generator.cc | 798 ------------------ payload_generator/inplace_generator.h | 240 ------ .../inplace_generator_unittest.cc | 752 ----------------- .../payload_generation_config.cc | 20 +- payload_generator/payload_generation_config.h | 4 - payload_generator/tarjan.cc | 83 -- payload_generator/tarjan.h | 53 -- payload_generator/tarjan_unittest.cc | 94 --- payload_generator/topological_sort.cc | 57 -- payload_generator/topological_sort.h | 42 - .../topological_sort_unittest.cc | 96 --- scripts/payload_info.py | 4 - scripts/update_payload/applier.py | 50 +- scripts/update_payload/checker.py | 117 +-- scripts/update_payload/checker_unittest.py | 159 +--- scripts/update_payload/common.py | 7 +- scripts/update_payload/test_utils.py | 6 +- scripts/update_payload/update_metadata_pb2.py | 370 ++++---- update_metadata.proto | 8 +- 39 files changed, 328 insertions(+), 4392 deletions(-) delete mode 100644 payload_generator/cycle_breaker.cc delete mode 100644 payload_generator/cycle_breaker.h delete mode 100644 payload_generator/cycle_breaker_unittest.cc delete mode 100644 payload_generator/graph_types.cc delete mode 100644 payload_generator/graph_types.h delete mode 100644 payload_generator/graph_utils.cc delete mode 100644 payload_generator/graph_utils.h delete mode 100644 payload_generator/graph_utils_unittest.cc delete mode 100644 payload_generator/inplace_generator.cc delete mode 100644 payload_generator/inplace_generator.h delete mode 100644 payload_generator/inplace_generator_unittest.cc delete mode 100644 payload_generator/tarjan.cc delete mode 100644 payload_generator/tarjan.h delete mode 100644 payload_generator/tarjan_unittest.cc delete mode 100644 payload_generator/topological_sort.cc delete mode 100644 payload_generator/topological_sort.h delete mode 100644 payload_generator/topological_sort_unittest.cc diff --git a/Android.bp b/Android.bp index 2e215c5a..47f03181 100644 --- a/Android.bp +++ b/Android.bp @@ -440,7 +440,6 @@ cc_library_static { "payload_generator/block_mapping.cc", "payload_generator/boot_img_filesystem.cc", "payload_generator/bzip.cc", - "payload_generator/cycle_breaker.cc", "payload_generator/deflate_utils.cc", "payload_generator/delta_diff_generator.cc", "payload_generator/delta_diff_utils.cc", @@ -448,9 +447,6 @@ cc_library_static { "payload_generator/extent_ranges.cc", "payload_generator/extent_utils.cc", "payload_generator/full_update_generator.cc", - "payload_generator/graph_types.cc", - "payload_generator/graph_utils.cc", - "payload_generator/inplace_generator.cc", "payload_generator/mapfile_filesystem.cc", "payload_generator/payload_file.cc", "payload_generator/payload_generation_config_android.cc", @@ -459,8 +455,6 @@ cc_library_static { "payload_generator/payload_signer.cc", "payload_generator/raw_filesystem.cc", "payload_generator/squashfs_filesystem.cc", - "payload_generator/tarjan.cc", - "payload_generator/topological_sort.cc", "payload_generator/xz_android.cc", ], } @@ -639,7 +633,6 @@ cc_test { "payload_generator/blob_file_writer_unittest.cc", "payload_generator/block_mapping_unittest.cc", "payload_generator/boot_img_filesystem_unittest.cc", - "payload_generator/cycle_breaker_unittest.cc", "payload_generator/deflate_utils_unittest.cc", "payload_generator/delta_diff_utils_unittest.cc", "payload_generator/ext2_filesystem_unittest.cc", @@ -647,8 +640,6 @@ cc_test { "payload_generator/extent_utils_unittest.cc", "payload_generator/fake_filesystem.cc", "payload_generator/full_update_generator_unittest.cc", - "payload_generator/graph_utils_unittest.cc", - "payload_generator/inplace_generator_unittest.cc", "payload_generator/mapfile_filesystem_unittest.cc", "payload_generator/payload_file_unittest.cc", "payload_generator/payload_generation_config_android_unittest.cc", @@ -656,8 +647,6 @@ cc_test { "payload_generator/payload_properties_unittest.cc", "payload_generator/payload_signer_unittest.cc", "payload_generator/squashfs_filesystem_unittest.cc", - "payload_generator/tarjan_unittest.cc", - "payload_generator/topological_sort_unittest.cc", "payload_generator/zip_unittest.cc", "testrunner.cc", "update_attempter_android_unittest.cc", diff --git a/BUILD.gn b/BUILD.gn index 5f5aa545..1e803a06 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -329,7 +329,6 @@ static_library("libpayload_generator") { "payload_generator/block_mapping.cc", "payload_generator/boot_img_filesystem.cc", "payload_generator/bzip.cc", - "payload_generator/cycle_breaker.cc", "payload_generator/deflate_utils.cc", "payload_generator/delta_diff_generator.cc", "payload_generator/delta_diff_utils.cc", @@ -337,9 +336,6 @@ static_library("libpayload_generator") { "payload_generator/extent_ranges.cc", "payload_generator/extent_utils.cc", "payload_generator/full_update_generator.cc", - "payload_generator/graph_types.cc", - "payload_generator/graph_utils.cc", - "payload_generator/inplace_generator.cc", "payload_generator/mapfile_filesystem.cc", "payload_generator/payload_file.cc", "payload_generator/payload_generation_config.cc", @@ -348,8 +344,6 @@ static_library("libpayload_generator") { "payload_generator/payload_signer.cc", "payload_generator/raw_filesystem.cc", "payload_generator/squashfs_filesystem.cc", - "payload_generator/tarjan.cc", - "payload_generator/topological_sort.cc", "payload_generator/xz_chromeos.cc", ] configs += [ ":target_defaults" ] @@ -499,23 +493,18 @@ if (use.test) { "payload_generator/blob_file_writer_unittest.cc", "payload_generator/block_mapping_unittest.cc", "payload_generator/boot_img_filesystem_unittest.cc", - "payload_generator/cycle_breaker_unittest.cc", "payload_generator/deflate_utils_unittest.cc", "payload_generator/delta_diff_utils_unittest.cc", "payload_generator/ext2_filesystem_unittest.cc", "payload_generator/extent_ranges_unittest.cc", "payload_generator/extent_utils_unittest.cc", "payload_generator/full_update_generator_unittest.cc", - "payload_generator/graph_utils_unittest.cc", - "payload_generator/inplace_generator_unittest.cc", "payload_generator/mapfile_filesystem_unittest.cc", "payload_generator/payload_file_unittest.cc", "payload_generator/payload_generation_config_unittest.cc", "payload_generator/payload_properties_unittest.cc", "payload_generator/payload_signer_unittest.cc", "payload_generator/squashfs_filesystem_unittest.cc", - "payload_generator/tarjan_unittest.cc", - "payload_generator/topological_sort_unittest.cc", "payload_generator/zip_unittest.cc", "payload_state_unittest.cc", "testrunner.cc", diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc index 53acc117..cc39943c 100644 --- a/payload_consumer/delta_performer.cc +++ b/payload_consumer/delta_performer.cc @@ -315,9 +315,8 @@ bool DeltaPerformer::OpenCurrentPartition() { install_plan_->partitions.size() - partitions_.size(); const InstallPlan::Partition& install_part = install_plan_->partitions[num_previous_partitions + current_partition_]; - // Open source fds if we have a delta payload with minor version >= 2. - if (payload_->type == InstallPayloadType::kDelta && - GetMinorVersion() != kInPlaceMinorPayloadVersion) { + // Open source fds if we have a delta payload. + if (payload_->type == InstallPayloadType::kDelta) { source_path_ = install_part.source_path; int err; source_fd_ = OpenFile(source_path_.c_str(), O_RDONLY, false, &err); @@ -369,9 +368,8 @@ bool DeltaPerformer::OpenCurrentECCPartition() { if (current_partition_ >= partitions_.size()) return false; - // No support for ECC in minor version 1 or full payloads. - if (payload_->type == InstallPayloadType::kFull || - GetMinorVersion() == kInPlaceMinorPayloadVersion) + // No support for ECC for full payloads. + if (payload_->type == InstallPayloadType::kFull) return false; #if USE_FEC @@ -685,14 +683,6 @@ bool DeltaPerformer::Write(const void* bytes, size_t count, ErrorCode* error) { op_result = PerformZeroOrDiscardOperation(op); OP_DURATION_HISTOGRAM("ZERO_OR_DISCARD", op_start_time); break; - case InstallOperation::MOVE: - op_result = PerformMoveOperation(op); - OP_DURATION_HISTOGRAM("MOVE", op_start_time); - break; - case InstallOperation::BSDIFF: - op_result = PerformBsdiffOperation(op); - OP_DURATION_HISTOGRAM("BSDIFF", op_start_time); - break; case InstallOperation::SOURCE_COPY: op_result = PerformSourceCopyOperation(op, error); OP_DURATION_HISTOGRAM("SOURCE_COPY", op_start_time); @@ -1030,57 +1020,6 @@ bool DeltaPerformer::PerformZeroOrDiscardOperation( return true; } -bool DeltaPerformer::PerformMoveOperation(const InstallOperation& operation) { - // Calculate buffer size. Note, this function doesn't do a sliding - // window to copy in case the source and destination blocks overlap. - // If we wanted to do a sliding window, we could program the server - // to generate deltas that effectively did a sliding window. - - uint64_t blocks_to_read = 0; - for (int i = 0; i < operation.src_extents_size(); i++) - blocks_to_read += operation.src_extents(i).num_blocks(); - - uint64_t blocks_to_write = 0; - for (int i = 0; i < operation.dst_extents_size(); i++) - blocks_to_write += operation.dst_extents(i).num_blocks(); - - DCHECK_EQ(blocks_to_write, blocks_to_read); - brillo::Blob buf(blocks_to_write * block_size_); - - // Read in bytes. - ssize_t bytes_read = 0; - for (int i = 0; i < operation.src_extents_size(); i++) { - ssize_t bytes_read_this_iteration = 0; - const Extent& extent = operation.src_extents(i); - const size_t bytes = extent.num_blocks() * block_size_; - TEST_AND_RETURN_FALSE(extent.start_block() != kSparseHole); - TEST_AND_RETURN_FALSE(utils::PReadAll(target_fd_, - &buf[bytes_read], - bytes, - extent.start_block() * block_size_, - &bytes_read_this_iteration)); - TEST_AND_RETURN_FALSE(bytes_read_this_iteration == - static_cast(bytes)); - bytes_read += bytes_read_this_iteration; - } - - // Write bytes out. - ssize_t bytes_written = 0; - for (int i = 0; i < operation.dst_extents_size(); i++) { - const Extent& extent = operation.dst_extents(i); - const size_t bytes = extent.num_blocks() * block_size_; - TEST_AND_RETURN_FALSE(extent.start_block() != kSparseHole); - TEST_AND_RETURN_FALSE(utils::PWriteAll(target_fd_, - &buf[bytes_written], - bytes, - extent.start_block() * block_size_)); - bytes_written += bytes; - } - DCHECK_EQ(bytes_written, bytes_read); - DCHECK_EQ(bytes_written, static_cast(buf.size())); - return true; -} - bool DeltaPerformer::ValidateSourceHash(const brillo::Blob& calculated_hash, const InstallOperation& operation, const FileDescriptorPtr source_fd, @@ -1265,47 +1204,6 @@ bool DeltaPerformer::ExtentsToBsdiffPositionsString( return true; } -bool DeltaPerformer::PerformBsdiffOperation(const InstallOperation& operation) { - // Since we delete data off the beginning of the buffer as we use it, - // the data we need should be exactly at the beginning of the buffer. - TEST_AND_RETURN_FALSE(buffer_offset_ == operation.data_offset()); - TEST_AND_RETURN_FALSE(buffer_.size() >= operation.data_length()); - - string input_positions; - TEST_AND_RETURN_FALSE(ExtentsToBsdiffPositionsString(operation.src_extents(), - block_size_, - operation.src_length(), - &input_positions)); - string output_positions; - TEST_AND_RETURN_FALSE(ExtentsToBsdiffPositionsString(operation.dst_extents(), - block_size_, - operation.dst_length(), - &output_positions)); - - TEST_AND_RETURN_FALSE(bsdiff::bspatch(target_path_.c_str(), - target_path_.c_str(), - buffer_.data(), - buffer_.size(), - input_positions.c_str(), - output_positions.c_str()) == 0); - DiscardBuffer(true, buffer_.size()); - - if (operation.dst_length() % block_size_) { - // Zero out rest of final block. - // TODO(adlr): build this into bspatch; it's more efficient that way. - const Extent& last_extent = - operation.dst_extents(operation.dst_extents_size() - 1); - const uint64_t end_byte = - (last_extent.start_block() + last_extent.num_blocks()) * block_size_; - const uint64_t begin_byte = - end_byte - (block_size_ - operation.dst_length() % block_size_); - brillo::Blob zeros(end_byte - begin_byte); - TEST_AND_RETURN_FALSE(utils::PWriteAll( - target_fd_, zeros.data(), end_byte - begin_byte, begin_byte)); - } - return true; -} - namespace { class BsdiffExtentFile : public bsdiff::FileInterface { diff --git a/payload_consumer/delta_performer.h b/payload_consumer/delta_performer.h index 55cb2a46..4493c2ae 100644 --- a/payload_consumer/delta_performer.h +++ b/payload_consumer/delta_performer.h @@ -223,8 +223,6 @@ class DeltaPerformer : public FileWriter { // set even if it fails. bool PerformReplaceOperation(const InstallOperation& operation); bool PerformZeroOrDiscardOperation(const InstallOperation& operation); - bool PerformMoveOperation(const InstallOperation& operation); - bool PerformBsdiffOperation(const InstallOperation& operation); bool PerformSourceCopyOperation(const InstallOperation& operation, ErrorCode* error); bool PerformSourceBsdiffOperation(const InstallOperation& operation, diff --git a/payload_consumer/delta_performer_integration_test.cc b/payload_consumer/delta_performer_integration_test.cc index e064077f..904ea5a5 100644 --- a/payload_consumer/delta_performer_integration_test.cc +++ b/payload_consumer/delta_performer_integration_test.cc @@ -291,7 +291,6 @@ static void SignGeneratedShellPayload(SignatureTest signature_test, static void GenerateDeltaFile(bool full_kernel, bool full_rootfs, - bool noop, ssize_t chunk_size, SignatureTest signature_test, DeltaState* state, @@ -368,24 +367,16 @@ static void GenerateDeltaFile(bool full_kernel, ones.size())); } - if (noop) { - EXPECT_TRUE(base::CopyFile(base::FilePath(state->a_img), - base::FilePath(state->b_img))); - old_image_info = new_image_info; - } else { - if (minor_version == kSourceMinorPayloadVersion) { - // Create a result image with image_size bytes of garbage. - brillo::Blob ones(state->image_size, 0xff); - EXPECT_TRUE(utils::WriteFile( - state->result_img.c_str(), ones.data(), ones.size())); - EXPECT_EQ(utils::FileSize(state->a_img), - utils::FileSize(state->result_img)); - } - - EXPECT_TRUE( - base::CopyFile(GetBuildArtifactsPath().Append("gen/disk_ext2_4k.img"), - base::FilePath(state->b_img))); + // Create a result image with image_size bytes of garbage. + brillo::Blob ones(state->image_size, 0xff); + EXPECT_TRUE( + utils::WriteFile(state->result_img.c_str(), ones.data(), ones.size())); + EXPECT_EQ(utils::FileSize(state->a_img), utils::FileSize(state->result_img)); + EXPECT_TRUE( + base::CopyFile(GetBuildArtifactsPath().Append("gen/disk_ext2_4k.img"), + base::FilePath(state->b_img))); + { // Make some changes to the B image. string b_mnt; ScopedLoopMounter b_mounter(state->b_img, &b_mnt, 0); @@ -460,10 +451,6 @@ static void GenerateDeltaFile(bool full_kernel, std::copy( std::begin(kNewData), std::end(kNewData), state->new_kernel_data.begin()); - if (noop) { - state->old_kernel_data = state->new_kernel_data; - } - // Write kernels to disk EXPECT_TRUE(utils::WriteFile(state->old_kernel.c_str(), state->old_kernel_data.data(), @@ -564,7 +551,6 @@ static void GenerateDeltaFile(bool full_kernel, static void ApplyDeltaFile(bool full_kernel, bool full_rootfs, - bool noop, SignatureTest signature_test, DeltaState* state, bool hash_checks_mandatory, @@ -611,11 +597,6 @@ static void ApplyDeltaFile(bool full_kernel, EXPECT_FALSE(signature.data().empty()); } - if (noop) { - EXPECT_EQ(0, manifest.install_operations_size()); - EXPECT_EQ(1, manifest.kernel_install_operations_size()); - } - if (full_kernel) { EXPECT_FALSE(manifest.has_old_kernel_info()); } else { @@ -632,25 +613,12 @@ static void ApplyDeltaFile(bool full_kernel, EXPECT_EQ(manifest.new_image_info().build_version(), "test-build-version"); if (!full_rootfs) { - if (noop) { - EXPECT_EQ(manifest.old_image_info().channel(), "test-channel"); - EXPECT_EQ(manifest.old_image_info().board(), "test-board"); - EXPECT_EQ(manifest.old_image_info().version(), "test-version"); - EXPECT_EQ(manifest.old_image_info().key(), "test-key"); - EXPECT_EQ(manifest.old_image_info().build_channel(), - "test-build-channel"); - EXPECT_EQ(manifest.old_image_info().build_version(), - "test-build-version"); - } else { - EXPECT_EQ(manifest.old_image_info().channel(), "src-channel"); - EXPECT_EQ(manifest.old_image_info().board(), "src-board"); - EXPECT_EQ(manifest.old_image_info().version(), "src-version"); - EXPECT_EQ(manifest.old_image_info().key(), "src-key"); - EXPECT_EQ(manifest.old_image_info().build_channel(), - "src-build-channel"); - EXPECT_EQ(manifest.old_image_info().build_version(), - "src-build-version"); - } + EXPECT_EQ(manifest.old_image_info().channel(), "src-channel"); + EXPECT_EQ(manifest.old_image_info().board(), "src-board"); + EXPECT_EQ(manifest.old_image_info().version(), "src-version"); + EXPECT_EQ(manifest.old_image_info().key(), "src-key"); + EXPECT_EQ(manifest.old_image_info().build_channel(), "src-build-channel"); + EXPECT_EQ(manifest.old_image_info().build_version(), "src-build-version"); } if (full_rootfs) { @@ -741,25 +709,14 @@ static void ApplyDeltaFile(bool full_kernel, // The partitions should be empty before DeltaPerformer. install_plan->partitions.clear(); - // With minor version 2, we want the target to be the new image, result_img, - // but with version 1, we want to update A in place. - string target_root, target_kernel; - if (minor_version == kSourceMinorPayloadVersion) { - target_root = state->result_img; - target_kernel = state->result_kernel; - } else { - target_root = state->a_img; - target_kernel = state->old_kernel; - } - state->fake_boot_control_.SetPartitionDevice( kPartitionNameRoot, install_plan->source_slot, state->a_img); state->fake_boot_control_.SetPartitionDevice( kPartitionNameKernel, install_plan->source_slot, state->old_kernel); state->fake_boot_control_.SetPartitionDevice( - kPartitionNameRoot, install_plan->target_slot, target_root); + kPartitionNameRoot, install_plan->target_slot, state->result_img); state->fake_boot_control_.SetPartitionDevice( - kPartitionNameKernel, install_plan->target_slot, target_kernel); + kPartitionNameKernel, install_plan->target_slot, state->result_kernel); ErrorCode expected_error, actual_error; bool continue_writing; @@ -838,20 +795,12 @@ void VerifyPayloadResult(DeltaPerformer* performer, return; } - brillo::Blob updated_kernel_partition; - if (minor_version == kSourceMinorPayloadVersion) { - CompareFilesByBlock( - state->result_kernel, state->new_kernel, state->kernel_size); - CompareFilesByBlock(state->result_img, state->b_img, state->image_size); - EXPECT_TRUE( - utils::ReadFile(state->result_kernel, &updated_kernel_partition)); - } else { - CompareFilesByBlock( - state->old_kernel, state->new_kernel, state->kernel_size); - CompareFilesByBlock(state->a_img, state->b_img, state->image_size); - EXPECT_TRUE(utils::ReadFile(state->old_kernel, &updated_kernel_partition)); - } + CompareFilesByBlock( + state->result_kernel, state->new_kernel, state->kernel_size); + CompareFilesByBlock(state->result_img, state->b_img, state->image_size); + brillo::Blob updated_kernel_partition; + EXPECT_TRUE(utils::ReadFile(state->result_kernel, &updated_kernel_partition)); ASSERT_GE(updated_kernel_partition.size(), arraysize(kNewData)); EXPECT_TRUE(std::equal(std::begin(kNewData), std::end(kNewData), @@ -897,7 +846,6 @@ void VerifyPayload(DeltaPerformer* performer, void DoSmallImageTest(bool full_kernel, bool full_rootfs, - bool noop, ssize_t chunk_size, SignatureTest signature_test, bool hash_checks_mandatory, @@ -906,7 +854,6 @@ void DoSmallImageTest(bool full_kernel, DeltaPerformer* performer = nullptr; GenerateDeltaFile(full_kernel, full_rootfs, - noop, chunk_size, signature_test, &state, @@ -921,7 +868,6 @@ void DoSmallImageTest(bool full_kernel, ScopedPathUnlinker result_kernel_unlinker(state.result_kernel); ApplyDeltaFile(full_kernel, full_rootfs, - noop, signature_test, &state, hash_checks_mandatory, @@ -936,8 +882,7 @@ void DoOperationHashMismatchTest(OperationHashTest op_hash_test, bool hash_checks_mandatory) { DeltaState state; uint64_t minor_version = kFullPayloadMinorVersion; - GenerateDeltaFile( - true, true, false, -1, kSignatureGenerated, &state, minor_version); + GenerateDeltaFile(true, true, -1, kSignatureGenerated, &state, minor_version); ScopedPathUnlinker a_img_unlinker(state.a_img); ScopedPathUnlinker b_img_unlinker(state.b_img); ScopedPathUnlinker delta_unlinker(state.delta_path); @@ -946,7 +891,6 @@ void DoOperationHashMismatchTest(OperationHashTest op_hash_test, DeltaPerformer* performer = nullptr; ApplyDeltaFile(true, true, - false, kSignatureGenerated, &state, hash_checks_mandatory, @@ -957,144 +901,105 @@ void DoOperationHashMismatchTest(OperationHashTest op_hash_test, } TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageTest) { - DoSmallImageTest(false, - false, - false, - -1, - kSignatureGenerator, - false, - kInPlaceMinorPayloadVersion); + DoSmallImageTest( + false, false, -1, kSignatureGenerator, false, kSourceMinorPayloadVersion); } TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignaturePlaceholderTest) { DoSmallImageTest(false, - false, false, -1, kSignatureGeneratedPlaceholder, false, - kInPlaceMinorPayloadVersion); + kSourceMinorPayloadVersion); } TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignaturePlaceholderMismatchTest) { DeltaState state; GenerateDeltaFile(false, - false, false, -1, kSignatureGeneratedPlaceholderMismatch, &state, - kInPlaceMinorPayloadVersion); + kSourceMinorPayloadVersion); } TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageChunksTest) { DoSmallImageTest(false, - false, false, kBlockSize, kSignatureGenerator, false, - kInPlaceMinorPayloadVersion); + kSourceMinorPayloadVersion); } TEST(DeltaPerformerIntegrationTest, RunAsRootFullKernelSmallImageTest) { - DoSmallImageTest(true, - false, - false, - -1, - kSignatureGenerator, - false, - kInPlaceMinorPayloadVersion); + DoSmallImageTest( + true, false, -1, kSignatureGenerator, false, kSourceMinorPayloadVersion); } TEST(DeltaPerformerIntegrationTest, RunAsRootFullSmallImageTest) { DoSmallImageTest(true, true, - false, -1, kSignatureGenerator, true, kFullPayloadMinorVersion); } -TEST(DeltaPerformerIntegrationTest, RunAsRootNoopSmallImageTest) { - DoSmallImageTest(false, - false, - true, - -1, - kSignatureGenerator, - false, - kInPlaceMinorPayloadVersion); -} - TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignNoneTest) { - DoSmallImageTest(false, - false, - false, - -1, - kSignatureNone, - false, - kInPlaceMinorPayloadVersion); + DoSmallImageTest( + false, false, -1, kSignatureNone, false, kSourceMinorPayloadVersion); } TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignGeneratedTest) { - DoSmallImageTest(false, - false, - false, - -1, - kSignatureGenerated, - true, - kInPlaceMinorPayloadVersion); + DoSmallImageTest( + false, false, -1, kSignatureGenerated, true, kSourceMinorPayloadVersion); } TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignGeneratedShellTest) { DoSmallImageTest(false, - false, false, -1, kSignatureGeneratedShell, false, - kInPlaceMinorPayloadVersion); + kSourceMinorPayloadVersion); } TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignGeneratedShellBadKeyTest) { DoSmallImageTest(false, - false, false, -1, kSignatureGeneratedShellBadKey, false, - kInPlaceMinorPayloadVersion); + kSourceMinorPayloadVersion); } TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignGeneratedShellRotateCl1Test) { DoSmallImageTest(false, - false, false, -1, kSignatureGeneratedShellRotateCl1, false, - kInPlaceMinorPayloadVersion); + kSourceMinorPayloadVersion); } TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignGeneratedShellRotateCl2Test) { DoSmallImageTest(false, - false, false, -1, kSignatureGeneratedShellRotateCl2, false, - kInPlaceMinorPayloadVersion); + kSourceMinorPayloadVersion); } TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSourceOpsTest) { DoSmallImageTest(false, - false, false, -1, kSignatureGenerator, diff --git a/payload_consumer/payload_constants.cc b/payload_consumer/payload_constants.cc index 213d798f..9e684d7c 100644 --- a/payload_consumer/payload_constants.cc +++ b/payload_consumer/payload_constants.cc @@ -16,22 +16,24 @@ #include "update_engine/payload_consumer/payload_constants.h" +#include + namespace chromeos_update_engine { const uint64_t kChromeOSMajorPayloadVersion = 1; const uint64_t kBrilloMajorPayloadVersion = 2; -const uint32_t kMinSupportedMinorPayloadVersion = 1; -const uint32_t kMaxSupportedMinorPayloadVersion = 6; - const uint32_t kFullPayloadMinorVersion = 0; -const uint32_t kInPlaceMinorPayloadVersion = 1; +// const uint32_t kInPlaceMinorPayloadVersion = 1; DEPRECATED const uint32_t kSourceMinorPayloadVersion = 2; const uint32_t kOpSrcHashMinorPayloadVersion = 3; const uint32_t kBrotliBsdiffMinorPayloadVersion = 4; const uint32_t kPuffdiffMinorPayloadVersion = 5; const uint32_t kVerityMinorPayloadVersion = 6; +const uint32_t kMinSupportedMinorPayloadVersion = kSourceMinorPayloadVersion; +const uint32_t kMaxSupportedMinorPayloadVersion = kVerityMinorPayloadVersion; + const uint64_t kMinSupportedMajorPayloadVersion = 1; const uint64_t kMaxSupportedMajorPayloadVersion = 2; @@ -44,10 +46,6 @@ const char kDeltaMagic[4] = {'C', 'r', 'A', 'U'}; const char* InstallOperationTypeName(InstallOperation_Type op_type) { switch (op_type) { - case InstallOperation::BSDIFF: - return "BSDIFF"; - case InstallOperation::MOVE: - return "MOVE"; case InstallOperation::REPLACE: return "REPLACE"; case InstallOperation::REPLACE_BZ: @@ -66,6 +64,10 @@ const char* InstallOperationTypeName(InstallOperation_Type op_type) { return "PUFFDIFF"; case InstallOperation::BROTLI_BSDIFF: return "BROTLI_BSDIFF"; + + case InstallOperation::BSDIFF: + case InstallOperation::MOVE: + NOTREACHED(); } return ""; } diff --git a/payload_consumer/payload_constants.h b/payload_consumer/payload_constants.h index 7f768984..fe823f41 100644 --- a/payload_consumer/payload_constants.h +++ b/payload_consumer/payload_constants.h @@ -39,7 +39,7 @@ extern const uint64_t kMaxSupportedMajorPayloadVersion; extern const uint32_t kFullPayloadMinorVersion; // The minor version used by the in-place delta generator algorithm. -extern const uint32_t kInPlaceMinorPayloadVersion; +// extern const uint32_t kInPlaceMinorPayloadVersion; DEPRECATED // The minor version used by the A to B delta generator algorithm. extern const uint32_t kSourceMinorPayloadVersion; diff --git a/payload_generator/cycle_breaker.cc b/payload_generator/cycle_breaker.cc deleted file mode 100644 index d76f679f..00000000 --- a/payload_generator/cycle_breaker.cc +++ /dev/null @@ -1,218 +0,0 @@ -// -// Copyright (C) 2012 The Android Open Source Project -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -#include "update_engine/payload_generator/cycle_breaker.h" - -#include - -#include -#include -#include -#include - -#include -#include -#include - -#include "update_engine/payload_generator/graph_utils.h" -#include "update_engine/payload_generator/tarjan.h" - -using std::make_pair; -using std::set; -using std::vector; - -namespace chromeos_update_engine { - -// This is the outer function from the original paper. -void CycleBreaker::BreakCycles(const Graph& graph, set* out_cut_edges) { - cut_edges_.clear(); - - // Make a copy, which we will modify by removing edges. Thus, in each - // iteration subgraph_ is the current subgraph or the original with - // vertices we desire. This variable was "A_K" in the original paper. - subgraph_ = graph; - - // The paper calls for the "adjacency structure (i.e., graph) of - // strong (-ly connected) component K with least vertex in subgraph - // induced by {s, s + 1, ..., n}". - // We arbitrarily order each vertex by its index in the graph. Thus, - // each iteration, we are looking at the subgraph {s, s + 1, ..., n} - // and looking for the strongly connected component with vertex s. - - TarjanAlgorithm tarjan; - skipped_ops_ = 0; - - for (Graph::size_type i = 0; i < subgraph_.size(); i++) { - InstallOperation_Type op_type = graph[i].aop.op.type(); - if (op_type == InstallOperation::REPLACE || - op_type == InstallOperation::REPLACE_BZ) { - skipped_ops_++; - continue; - } - - if (i > 0) { - // Erase node (i - 1) from subgraph_. First, erase what it points to - subgraph_[i - 1].out_edges.clear(); - // Now, erase any pointers to node (i - 1) - for (Graph::size_type j = i; j < subgraph_.size(); j++) { - subgraph_[j].out_edges.erase(i - 1); - } - } - - // Calculate SCC (strongly connected component) with vertex i. - vector component_indexes; - tarjan.Execute(i, &subgraph_, &component_indexes); - - // Set subgraph edges for the components in the SCC. - for (vector::iterator it = component_indexes.begin(); - it != component_indexes.end(); - ++it) { - subgraph_[*it].subgraph_edges.clear(); - for (vector::iterator jt = component_indexes.begin(); - jt != component_indexes.end(); - ++jt) { - // If there's a link from *it -> *jt in the graph, - // add a subgraph_ edge - if (base::ContainsKey(subgraph_[*it].out_edges, *jt)) - subgraph_[*it].subgraph_edges.insert(*jt); - } - } - - current_vertex_ = i; - blocked_.clear(); - blocked_.resize(subgraph_.size()); - blocked_graph_.clear(); - blocked_graph_.resize(subgraph_.size()); - Circuit(current_vertex_, 0); - } - - out_cut_edges->swap(cut_edges_); - LOG(INFO) << "Cycle breaker skipped " << skipped_ops_ << " ops."; - DCHECK(stack_.empty()); -} - -static const size_t kMaxEdgesToConsider = 2; - -void CycleBreaker::HandleCircuit() { - stack_.push_back(current_vertex_); - CHECK_GE(stack_.size(), static_cast::size_type>(2)); - Edge min_edge = make_pair(stack_[0], stack_[1]); - uint64_t min_edge_weight = std::numeric_limits::max(); - size_t edges_considered = 0; - for (vector::const_iterator it = stack_.begin(); - it != (stack_.end() - 1); - ++it) { - Edge edge = make_pair(*it, *(it + 1)); - if (cut_edges_.find(edge) != cut_edges_.end()) { - stack_.pop_back(); - return; - } - uint64_t edge_weight = graph_utils::EdgeWeight(subgraph_, edge); - if (edge_weight < min_edge_weight) { - min_edge_weight = edge_weight; - min_edge = edge; - } - edges_considered++; - if (edges_considered == kMaxEdgesToConsider) - break; - } - cut_edges_.insert(min_edge); - stack_.pop_back(); -} - -void CycleBreaker::Unblock(Vertex::Index u) { - blocked_[u] = false; - - for (Vertex::EdgeMap::iterator it = blocked_graph_[u].out_edges.begin(); - it != blocked_graph_[u].out_edges.end();) { - Vertex::Index w = it->first; - blocked_graph_[u].out_edges.erase(it++); - if (blocked_[w]) - Unblock(w); - } -} - -bool CycleBreaker::StackContainsCutEdge() const { - for (vector::const_iterator it = ++stack_.begin(), - e = stack_.end(); - it != e; - ++it) { - Edge edge = make_pair(*(it - 1), *it); - if (base::ContainsKey(cut_edges_, edge)) { - return true; - } - } - return false; -} - -bool CycleBreaker::Circuit(Vertex::Index vertex, Vertex::Index depth) { - // "vertex" was "v" in the original paper. - bool found = false; // Was "f" in the original paper. - stack_.push_back(vertex); - blocked_[vertex] = true; - { - static int counter = 0; - counter++; - if (counter == 10000) { - counter = 0; - std::string stack_str; - for (Vertex::Index index : stack_) { - stack_str += std::to_string(index); - stack_str += " -> "; - } - LOG(INFO) << "stack: " << stack_str; - } - } - - for (Vertex::SubgraphEdgeMap::iterator w = - subgraph_[vertex].subgraph_edges.begin(); - w != subgraph_[vertex].subgraph_edges.end(); - ++w) { - if (*w == current_vertex_) { - // The original paper called for printing stack_ followed by - // current_vertex_ here, which is a cycle. Instead, we call - // HandleCircuit() to break it. - HandleCircuit(); - found = true; - } else if (!blocked_[*w]) { - if (Circuit(*w, depth + 1)) { - found = true; - if ((depth > kMaxEdgesToConsider) || StackContainsCutEdge()) - break; - } - } - } - - if (found) { - Unblock(vertex); - } else { - for (Vertex::SubgraphEdgeMap::iterator w = - subgraph_[vertex].subgraph_edges.begin(); - w != subgraph_[vertex].subgraph_edges.end(); - ++w) { - if (blocked_graph_[*w].out_edges.find(vertex) == - blocked_graph_[*w].out_edges.end()) { - blocked_graph_[*w].out_edges.insert( - make_pair(vertex, EdgeProperties())); - } - } - } - CHECK_EQ(vertex, stack_.back()); - stack_.pop_back(); - return found; -} - -} // namespace chromeos_update_engine diff --git a/payload_generator/cycle_breaker.h b/payload_generator/cycle_breaker.h deleted file mode 100644 index 01518fef..00000000 --- a/payload_generator/cycle_breaker.h +++ /dev/null @@ -1,71 +0,0 @@ -// -// Copyright (C) 2010 The Android Open Source Project -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -#ifndef UPDATE_ENGINE_PAYLOAD_GENERATOR_CYCLE_BREAKER_H_ -#define UPDATE_ENGINE_PAYLOAD_GENERATOR_CYCLE_BREAKER_H_ - -// This is a modified implementation of Donald B. Johnson's algorithm for -// finding all elementary cycles (a.k.a. circuits) in a directed graph. -// See the paper "Finding All the Elementary Circuits of a Directed Graph" -// at http://dutta.csc.ncsu.edu/csc791_spring07/wrap/circuits_johnson.pdf -// for reference. - -// Note: this version of the algorithm not only finds cycles, but breaks them. -// It uses a simple greedy algorithm for cutting: when a cycle is discovered, -// the edge with the least weight is cut. Longer term we may wish to do -// something more intelligent, since the goal is (ideally) to minimize the -// sum of the weights of all cut cycles. In practice, it's intractable -// to consider all cycles before cutting any; there are simply too many. -// In a sample graph representative of a typical workload, I found over -// 5 * 10^15 cycles. - -#include -#include - -#include "update_engine/payload_generator/graph_types.h" - -namespace chromeos_update_engine { - -class CycleBreaker { - public: - CycleBreaker() : skipped_ops_(0) {} - // out_cut_edges is replaced with the cut edges. - void BreakCycles(const Graph& graph, std::set* out_cut_edges); - - size_t skipped_ops() const { return skipped_ops_; } - - private: - void HandleCircuit(); - void Unblock(Vertex::Index u); - bool Circuit(Vertex::Index vertex, Vertex::Index depth); - bool StackContainsCutEdge() const; - - std::vector blocked_; // "blocked" in the paper - Vertex::Index current_vertex_; // "s" in the paper - std::vector stack_; // the stack variable in the paper - Graph subgraph_; // "A_K" in the paper - Graph blocked_graph_; // "B" in the paper - - std::set cut_edges_; - - // Number of operations skipped b/c we know they don't have any - // incoming edges. - size_t skipped_ops_; -}; - -} // namespace chromeos_update_engine - -#endif // UPDATE_ENGINE_PAYLOAD_GENERATOR_CYCLE_BREAKER_H_ diff --git a/payload_generator/cycle_breaker_unittest.cc b/payload_generator/cycle_breaker_unittest.cc deleted file mode 100644 index fdcf49be..00000000 --- a/payload_generator/cycle_breaker_unittest.cc +++ /dev/null @@ -1,279 +0,0 @@ -// -// Copyright (C) 2010 The Android Open Source Project -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -#include "update_engine/payload_generator/cycle_breaker.h" - -#include -#include -#include -#include - -#include -#include -#include - -#include "update_engine/payload_generator/graph_types.h" - -using std::make_pair; -using std::pair; -using std::set; -using std::string; -using std::vector; - -namespace chromeos_update_engine { - -namespace { -void SetOpForNodes(Graph* graph) { - for (Vertex& vertex : *graph) { - vertex.aop.op.set_type(InstallOperation::MOVE); - } -} -} // namespace - -class CycleBreakerTest : public ::testing::Test {}; - -TEST(CycleBreakerTest, SimpleTest) { - int counter = 0; - const Vertex::Index n_a = counter++; - const Vertex::Index n_b = counter++; - const Vertex::Index n_c = counter++; - const Vertex::Index n_d = counter++; - const Vertex::Index n_e = counter++; - const Vertex::Index n_f = counter++; - const Vertex::Index n_g = counter++; - const Vertex::Index n_h = counter++; - const Graph::size_type kNodeCount = counter++; - - Graph graph(kNodeCount); - SetOpForNodes(&graph); - - graph[n_a].out_edges.insert(make_pair(n_e, EdgeProperties())); - graph[n_a].out_edges.insert(make_pair(n_f, EdgeProperties())); - graph[n_b].out_edges.insert(make_pair(n_a, EdgeProperties())); - graph[n_c].out_edges.insert(make_pair(n_d, EdgeProperties())); - graph[n_d].out_edges.insert(make_pair(n_e, EdgeProperties())); - graph[n_d].out_edges.insert(make_pair(n_f, EdgeProperties())); - graph[n_e].out_edges.insert(make_pair(n_b, EdgeProperties())); - graph[n_e].out_edges.insert(make_pair(n_c, EdgeProperties())); - graph[n_e].out_edges.insert(make_pair(n_f, EdgeProperties())); - graph[n_f].out_edges.insert(make_pair(n_g, EdgeProperties())); - graph[n_g].out_edges.insert(make_pair(n_h, EdgeProperties())); - graph[n_h].out_edges.insert(make_pair(n_g, EdgeProperties())); - - CycleBreaker breaker; - - set broken_edges; - breaker.BreakCycles(graph, &broken_edges); - - // The following cycles must be cut: - // A->E->B - // C->D->E - // G->H - - EXPECT_TRUE(base::ContainsKey(broken_edges, make_pair(n_a, n_e)) || - base::ContainsKey(broken_edges, make_pair(n_e, n_b)) || - base::ContainsKey(broken_edges, make_pair(n_b, n_a))); - EXPECT_TRUE(base::ContainsKey(broken_edges, make_pair(n_c, n_d)) || - base::ContainsKey(broken_edges, make_pair(n_d, n_e)) || - base::ContainsKey(broken_edges, make_pair(n_e, n_c))); - EXPECT_TRUE(base::ContainsKey(broken_edges, make_pair(n_g, n_h)) || - base::ContainsKey(broken_edges, make_pair(n_h, n_g))); - EXPECT_EQ(3U, broken_edges.size()); -} - -namespace { -pair EdgeWithWeight(Vertex::Index dest, - uint64_t weight) { - EdgeProperties props; - props.extents.resize(1); - props.extents[0].set_num_blocks(weight); - return make_pair(dest, props); -} -} // namespace - -// This creates a bunch of cycles like this: -// -// root <------. -// (t)-> / | \ | -// V V V | -// N N N | -// \ | / | -// VVV | -// N | -// / | \ | -// V V V | -// N N N | -// ... | -// (s)-> \ | / | -// VVV | -// N | -// \_________/ -// -// such that the original cutting algo would cut edges (s). We changed -// the algorithm to cut cycles (t) instead, since they are closer to the -// root, and that can massively speed up cycle cutting. -TEST(CycleBreakerTest, AggressiveCutTest) { - size_t counter = 0; - - const int kNodesPerGroup = 4; - const int kGroups = 33; - - Graph graph(kGroups * kNodesPerGroup + 1); // + 1 for the root node - SetOpForNodes(&graph); - - const Vertex::Index n_root = counter++; - - Vertex::Index last_hub = n_root; - for (int i = 0; i < kGroups; i++) { - uint64_t weight = 5; - if (i == 0) - weight = 2; - else if (i == (kGroups - 1)) - weight = 1; - - const Vertex::Index next_hub = counter++; - - for (int j = 0; j < (kNodesPerGroup - 1); j++) { - const Vertex::Index node = counter++; - graph[last_hub].out_edges.insert(EdgeWithWeight(node, weight)); - graph[node].out_edges.insert(EdgeWithWeight(next_hub, weight)); - } - last_hub = next_hub; - } - - graph[last_hub].out_edges.insert(EdgeWithWeight(n_root, 5)); - - EXPECT_EQ(counter, graph.size()); - - CycleBreaker breaker; - - set broken_edges; - LOG(INFO) << "If this hangs for more than 1 second, the test has failed."; - breaker.BreakCycles(graph, &broken_edges); - - set expected_cuts; - - for (Vertex::EdgeMap::const_iterator it = graph[n_root].out_edges.begin(), - e = graph[n_root].out_edges.end(); - it != e; - ++it) { - expected_cuts.insert(make_pair(n_root, it->first)); - } - - EXPECT_TRUE(broken_edges == expected_cuts); -} - -TEST(CycleBreakerTest, WeightTest) { - size_t counter = 0; - const Vertex::Index n_a = counter++; - const Vertex::Index n_b = counter++; - const Vertex::Index n_c = counter++; - const Vertex::Index n_d = counter++; - const Vertex::Index n_e = counter++; - const Vertex::Index n_f = counter++; - const Vertex::Index n_g = counter++; - const Vertex::Index n_h = counter++; - const Vertex::Index n_i = counter++; - const Vertex::Index n_j = counter++; - const Graph::size_type kNodeCount = counter++; - - Graph graph(kNodeCount); - SetOpForNodes(&graph); - - graph[n_a].out_edges.insert(EdgeWithWeight(n_b, 4)); - graph[n_a].out_edges.insert(EdgeWithWeight(n_f, 3)); - graph[n_a].out_edges.insert(EdgeWithWeight(n_h, 2)); - graph[n_b].out_edges.insert(EdgeWithWeight(n_a, 3)); - graph[n_b].out_edges.insert(EdgeWithWeight(n_c, 4)); - graph[n_c].out_edges.insert(EdgeWithWeight(n_b, 5)); - graph[n_c].out_edges.insert(EdgeWithWeight(n_d, 3)); - graph[n_d].out_edges.insert(EdgeWithWeight(n_a, 6)); - graph[n_d].out_edges.insert(EdgeWithWeight(n_e, 3)); - graph[n_e].out_edges.insert(EdgeWithWeight(n_d, 4)); - graph[n_e].out_edges.insert(EdgeWithWeight(n_g, 5)); - graph[n_f].out_edges.insert(EdgeWithWeight(n_g, 2)); - graph[n_g].out_edges.insert(EdgeWithWeight(n_f, 3)); - graph[n_g].out_edges.insert(EdgeWithWeight(n_d, 5)); - graph[n_h].out_edges.insert(EdgeWithWeight(n_i, 8)); - graph[n_i].out_edges.insert(EdgeWithWeight(n_e, 4)); - graph[n_i].out_edges.insert(EdgeWithWeight(n_h, 9)); - graph[n_i].out_edges.insert(EdgeWithWeight(n_j, 6)); - - CycleBreaker breaker; - - set broken_edges; - breaker.BreakCycles(graph, &broken_edges); - - // These are required to be broken: - EXPECT_TRUE(base::ContainsKey(broken_edges, make_pair(n_b, n_a))); - EXPECT_TRUE(base::ContainsKey(broken_edges, make_pair(n_b, n_c))); - EXPECT_TRUE(base::ContainsKey(broken_edges, make_pair(n_d, n_e))); - EXPECT_TRUE(base::ContainsKey(broken_edges, make_pair(n_f, n_g))); - EXPECT_TRUE(base::ContainsKey(broken_edges, make_pair(n_h, n_i))); -} - -TEST(CycleBreakerTest, UnblockGraphTest) { - size_t counter = 0; - const Vertex::Index n_a = counter++; - const Vertex::Index n_b = counter++; - const Vertex::Index n_c = counter++; - const Vertex::Index n_d = counter++; - const Graph::size_type kNodeCount = counter++; - - Graph graph(kNodeCount); - SetOpForNodes(&graph); - - graph[n_a].out_edges.insert(EdgeWithWeight(n_b, 1)); - graph[n_a].out_edges.insert(EdgeWithWeight(n_c, 1)); - graph[n_b].out_edges.insert(EdgeWithWeight(n_c, 2)); - graph[n_c].out_edges.insert(EdgeWithWeight(n_b, 2)); - graph[n_b].out_edges.insert(EdgeWithWeight(n_d, 2)); - graph[n_d].out_edges.insert(EdgeWithWeight(n_a, 2)); - - CycleBreaker breaker; - - set broken_edges; - breaker.BreakCycles(graph, &broken_edges); - - // These are required to be broken: - EXPECT_TRUE(base::ContainsKey(broken_edges, make_pair(n_a, n_b))); - EXPECT_TRUE(base::ContainsKey(broken_edges, make_pair(n_a, n_c))); -} - -TEST(CycleBreakerTest, SkipOpsTest) { - size_t counter = 0; - const Vertex::Index n_a = counter++; - const Vertex::Index n_b = counter++; - const Vertex::Index n_c = counter++; - const Graph::size_type kNodeCount = counter++; - - Graph graph(kNodeCount); - SetOpForNodes(&graph); - graph[n_a].aop.op.set_type(InstallOperation::REPLACE_BZ); - graph[n_c].aop.op.set_type(InstallOperation::REPLACE); - - graph[n_a].out_edges.insert(EdgeWithWeight(n_b, 1)); - graph[n_c].out_edges.insert(EdgeWithWeight(n_b, 1)); - - CycleBreaker breaker; - - set broken_edges; - breaker.BreakCycles(graph, &broken_edges); - - EXPECT_EQ(2U, breaker.skipped_ops()); -} - -} // namespace chromeos_update_engine diff --git a/payload_generator/delta_diff_generator.cc b/payload_generator/delta_diff_generator.cc index d484d32b..595a41ec 100644 --- a/payload_generator/delta_diff_generator.cc +++ b/payload_generator/delta_diff_generator.cc @@ -37,7 +37,6 @@ #include "update_engine/payload_generator/blob_file_writer.h" #include "update_engine/payload_generator/delta_diff_utils.h" #include "update_engine/payload_generator/full_update_generator.h" -#include "update_engine/payload_generator/inplace_generator.h" #include "update_engine/payload_generator/payload_file.h" using std::string; @@ -93,13 +92,8 @@ bool GenerateUpdatePayloadFile(const PayloadGenerationConfig& config, unique_ptr strategy; if (!old_part.path.empty()) { // Delta update. - if (config.version.minor == kInPlaceMinorPayloadVersion) { - LOG(INFO) << "Using generator InplaceGenerator()."; - strategy.reset(new InplaceGenerator()); - } else { - LOG(INFO) << "Using generator ABGenerator()."; - strategy.reset(new ABGenerator()); - } + LOG(INFO) << "Using generator ABGenerator()."; + strategy.reset(new ABGenerator()); } else { LOG(INFO) << "Using generator FullUpdateGenerator()."; strategy.reset(new FullUpdateGenerator()); @@ -110,11 +104,6 @@ bool GenerateUpdatePayloadFile(const PayloadGenerationConfig& config, TEST_AND_RETURN_FALSE(strategy->GenerateOperations( config, old_part, new_part, &blob_file, &aops)); - // Filter the no-operations. OperationsGenerators should not output this - // kind of operations normally, but this is an extra step to fix that if - // happened. - diff_utils::FilterNoopOperations(&aops); - TEST_AND_RETURN_FALSE(payload.AddPartition(old_part, new_part, aops)); } } diff --git a/payload_generator/delta_diff_utils.cc b/payload_generator/delta_diff_utils.cc index 1bad4d75..db69d740 100644 --- a/payload_generator/delta_diff_utils.cc +++ b/payload_generator/delta_diff_utils.cc @@ -83,103 +83,6 @@ const uint64_t kMaxPuffdiffDestinationSize = 150 * 1024 * 1024; // bytes const int kBrotliCompressionQuality = 11; -// Process a range of blocks from |range_start| to |range_end| in the extent at -// position |*idx_p| of |extents|. If |do_remove| is true, this range will be -// removed, which may cause the extent to be trimmed, split or removed entirely. -// The value of |*idx_p| is updated to point to the next extent to be processed. -// Returns true iff the next extent to process is a new or updated one. -bool ProcessExtentBlockRange(vector* extents, - size_t* idx_p, - const bool do_remove, - uint64_t range_start, - uint64_t range_end) { - size_t idx = *idx_p; - uint64_t start_block = (*extents)[idx].start_block(); - uint64_t num_blocks = (*extents)[idx].num_blocks(); - uint64_t range_size = range_end - range_start; - - if (do_remove) { - if (range_size == num_blocks) { - // Remove the entire extent. - extents->erase(extents->begin() + idx); - } else if (range_end == num_blocks) { - // Trim the end of the extent. - (*extents)[idx].set_num_blocks(num_blocks - range_size); - idx++; - } else if (range_start == 0) { - // Trim the head of the extent. - (*extents)[idx].set_start_block(start_block + range_size); - (*extents)[idx].set_num_blocks(num_blocks - range_size); - } else { - // Trim the middle, splitting the remainder into two parts. - (*extents)[idx].set_num_blocks(range_start); - Extent e; - e.set_start_block(start_block + range_end); - e.set_num_blocks(num_blocks - range_end); - idx++; - extents->insert(extents->begin() + idx, e); - } - } else if (range_end == num_blocks) { - // Done with this extent. - idx++; - } else { - return false; - } - - *idx_p = idx; - return true; -} - -// Remove identical corresponding block ranges in |src_extents| and -// |dst_extents|. Used for preventing moving of blocks onto themselves during -// MOVE operations. The value of |total_bytes| indicates the actual length of -// content; this may be slightly less than the total size of blocks, in which -// case the last block is only partly occupied with data. Returns the total -// number of bytes removed. -size_t RemoveIdenticalBlockRanges(vector* src_extents, - vector* dst_extents, - const size_t total_bytes) { - size_t src_idx = 0; - size_t dst_idx = 0; - uint64_t src_offset = 0, dst_offset = 0; - size_t removed_bytes = 0, nonfull_block_bytes; - bool do_remove = false; - while (src_idx < src_extents->size() && dst_idx < dst_extents->size()) { - do_remove = ((*src_extents)[src_idx].start_block() + src_offset == - (*dst_extents)[dst_idx].start_block() + dst_offset); - - uint64_t src_num_blocks = (*src_extents)[src_idx].num_blocks(); - uint64_t dst_num_blocks = (*dst_extents)[dst_idx].num_blocks(); - uint64_t min_num_blocks = - std::min(src_num_blocks - src_offset, dst_num_blocks - dst_offset); - uint64_t prev_src_offset = src_offset; - uint64_t prev_dst_offset = dst_offset; - src_offset += min_num_blocks; - dst_offset += min_num_blocks; - - bool new_src = ProcessExtentBlockRange( - src_extents, &src_idx, do_remove, prev_src_offset, src_offset); - bool new_dst = ProcessExtentBlockRange( - dst_extents, &dst_idx, do_remove, prev_dst_offset, dst_offset); - if (new_src) { - src_offset = 0; - } - if (new_dst) { - dst_offset = 0; - } - - if (do_remove) - removed_bytes += min_num_blocks * kBlockSize; - } - - // If we removed the last block and this block is only partly used by file - // content, deduct the unused portion from the total removed byte count. - if (do_remove && (nonfull_block_bytes = total_bytes % kBlockSize)) - removed_bytes -= kBlockSize - nonfull_block_bytes; - - return removed_bytes; -} - // Storing a diff operation has more overhead over replace operation in the // manifest, we need to store an additional src_sha256_hash which is 32 bytes // and not compressible, and also src_extents which could use anywhere from a @@ -318,13 +221,11 @@ void FileDeltaProcessor::Run() { return; } - if (!version_.InplaceUpdate()) { - if (!ABGenerator::FragmentOperations( - version_, &file_aops_, new_part_, blob_file_)) { - LOG(ERROR) << "Failed to fragment operations for " << name_; - failed_ = true; - return; - } + if (!ABGenerator::FragmentOperations( + version_, &file_aops_, new_part_, blob_file_)) { + LOG(ERROR) << "Failed to fragment operations for " << name_; + failed_ = true; + return; } LOG(INFO) << "Encoded file " << name_ << " (" << new_extents_blocks_ @@ -447,12 +348,8 @@ bool DeltaReadPartition(vector* aops, // from the same source blocks. At that time, this code can die. -adlr FilesystemInterface::File old_file = GetOldFile(old_files_map, new_file.name); - vector old_file_extents; - if (version.InplaceUpdate()) - old_file_extents = - FilterExtentRanges(old_file.extents, old_visited_blocks); - else - old_file_extents = FilterExtentRanges(old_file.extents, old_zero_blocks); + auto old_file_extents = + FilterExtentRanges(old_file.extents, old_zero_blocks); old_visited_blocks.AddExtents(old_file_extents); file_delta_processors.emplace_back(old_part.path, @@ -541,21 +438,6 @@ bool DeltaMovedAndZeroBlocks(vector* aops, &old_block_ids, &new_block_ids)); - // If the update is inplace, we map all the blocks that didn't move, - // regardless of the contents since they are already copied and no operation - // is required. - if (version.InplaceUpdate()) { - uint64_t num_blocks = std::min(old_num_blocks, new_num_blocks); - for (uint64_t block = 0; block < num_blocks; block++) { - if (old_block_ids[block] == new_block_ids[block] && - !old_visited_blocks->ContainsBlock(block) && - !new_visited_blocks->ContainsBlock(block)) { - old_visited_blocks->AddBlock(block); - new_visited_blocks->AddBlock(block); - } - } - } - // A mapping from the block_id to the list of block numbers with that block id // in the old partition. This is used to lookup where in the old partition // is a block from the new partition. @@ -602,10 +484,6 @@ bool DeltaMovedAndZeroBlocks(vector* aops, AppendBlockToExtents(&old_identical_blocks, old_blocks_map_it->second.back()); AppendBlockToExtents(&new_identical_blocks, block); - // We can't reuse source blocks in minor version 1 because the cycle - // breaking algorithm used in the in-place update doesn't support that. - if (version.InplaceUpdate()) - old_blocks_map_it->second.pop_back(); } if (chunk_blocks == -1) @@ -657,9 +535,7 @@ bool DeltaMovedAndZeroBlocks(vector* aops, aops->emplace_back(); AnnotatedOperation* aop = &aops->back(); aop->name = ""; - aop->op.set_type(version.OperationAllowed(InstallOperation::SOURCE_COPY) - ? InstallOperation::SOURCE_COPY - : InstallOperation::MOVE); + aop->op.set_type(InstallOperation::SOURCE_COPY); uint64_t chunk_num_blocks = std::min(static_cast(extent.num_blocks()) - op_block_offset, @@ -732,13 +608,8 @@ bool DeltaReadFile(vector* aops, // Check if the operation writes nothing. if (operation.dst_extents_size() == 0) { - if (operation.type() == InstallOperation::MOVE) { - LOG(INFO) << "Empty MOVE operation (" << name << "), skipping"; - continue; - } else { - LOG(ERROR) << "Empty non-MOVE operation"; - return false; - } + LOG(ERROR) << "Empty non-MOVE operation"; + return false; } // Now, insert into the list of operations. @@ -828,8 +699,7 @@ bool ReadExtentsToDiff(const string& old_part, // Disable bsdiff, and puffdiff when the data is too big. bool bsdiff_allowed = - version.OperationAllowed(InstallOperation::SOURCE_BSDIFF) || - version.OperationAllowed(InstallOperation::BSDIFF); + version.OperationAllowed(InstallOperation::SOURCE_BSDIFF); if (bsdiff_allowed && blocks_to_read * kBlockSize > kMaxBsdiffDestinationSize) { LOG(INFO) << "bsdiff blacklisted, data too big: " @@ -878,9 +748,7 @@ bool ReadExtentsToDiff(const string& old_part, kBlockSize)); if (old_data == new_data) { // No change in data. - operation.set_type(version.OperationAllowed(InstallOperation::SOURCE_COPY) - ? InstallOperation::SOURCE_COPY - : InstallOperation::MOVE); + operation.set_type(InstallOperation::SOURCE_COPY); data_blob = brillo::Blob(); } else if (IsDiffOperationBetter( operation, data_blob.size(), 0, src_extents.size())) { @@ -892,7 +760,7 @@ bool ReadExtentsToDiff(const string& old_part, ScopedPathUnlinker unlinker(patch.value()); std::unique_ptr bsdiff_patch_writer; - InstallOperation_Type operation_type = InstallOperation::BSDIFF; + InstallOperation_Type operation_type = InstallOperation::SOURCE_BSDIFF; if (version.OperationAllowed(InstallOperation::BROTLI_BSDIFF)) { bsdiff_patch_writer = bsdiff::CreateBSDF2PatchWriter(patch.value(), @@ -901,9 +769,6 @@ bool ReadExtentsToDiff(const string& old_part, operation_type = InstallOperation::BROTLI_BSDIFF; } else { bsdiff_patch_writer = bsdiff::CreateBsdiffPatchWriter(patch.value()); - if (version.OperationAllowed(InstallOperation::SOURCE_BSDIFF)) { - operation_type = InstallOperation::SOURCE_BSDIFF; - } } brillo::Blob bsdiff_delta; @@ -976,23 +841,14 @@ bool ReadExtentsToDiff(const string& old_part, } } - // Remove identical src/dst block ranges in MOVE operations. - if (operation.type() == InstallOperation::MOVE) { - auto removed_bytes = - RemoveIdenticalBlockRanges(&src_extents, &dst_extents, new_data.size()); - operation.set_src_length(old_data.size() - removed_bytes); - operation.set_dst_length(new_data.size() - removed_bytes); - } - // WARNING: We always set legacy |src_length| and |dst_length| fields for // BSDIFF. For SOURCE_BSDIFF we only set them for minor version 3 and // lower. This is needed because we used to use these two parameters in the // SOURCE_BSDIFF for minor version 3 and lower, but we do not need them // anymore in higher minor versions. This means if we stop adding these // parameters for those minor versions, the delta payloads will be invalid. - if (operation.type() == InstallOperation::BSDIFF || - (operation.type() == InstallOperation::SOURCE_BSDIFF && - version.minor <= kOpSrcHashMinorPayloadVersion)) { + if (operation.type() == InstallOperation::SOURCE_BSDIFF && + version.minor <= kOpSrcHashMinorPayloadVersion) { operation.set_src_length(old_data.size()); operation.set_dst_length(new_data.size()); } @@ -1021,22 +877,6 @@ bool IsNoSourceOperation(InstallOperation_Type op_type) { op_type == InstallOperation::DISCARD); } -// Returns true if |op| is a no-op operation that doesn't do any useful work -// (e.g., a move operation that copies blocks onto themselves). -bool IsNoopOperation(const InstallOperation& op) { - return (op.type() == InstallOperation::MOVE && - ExpandExtents(op.src_extents()) == ExpandExtents(op.dst_extents())); -} - -void FilterNoopOperations(vector* ops) { - ops->erase(std::remove_if(ops->begin(), - ops->end(), - [](const AnnotatedOperation& aop) { - return IsNoopOperation(aop.op); - }), - ops->end()); -} - bool InitializePartitionInfo(const PartitionConfig& part, PartitionInfo* info) { info->set_size(part.size); HashCalculator hasher; diff --git a/payload_generator/delta_diff_utils.h b/payload_generator/delta_diff_utils.h index 2306572c..a062327d 100644 --- a/payload_generator/delta_diff_utils.h +++ b/payload_generator/delta_diff_utils.h @@ -127,14 +127,6 @@ bool IsAReplaceOperation(InstallOperation_Type op_type); // Returns true if an operation with type |op_type| has no |src_extents|. bool IsNoSourceOperation(InstallOperation_Type op_type); -// Returns true if |op| is a no-op operation that doesn't do any useful work -// (e.g., a move operation that copies blocks onto themselves). -bool IsNoopOperation(const InstallOperation& op); - -// Filters all the operations that are no-op, maintaining the relative order -// of the rest of the operations. -void FilterNoopOperations(std::vector* ops); - bool InitializePartitionInfo(const PartitionConfig& partition, PartitionInfo* info); diff --git a/payload_generator/delta_diff_utils_unittest.cc b/payload_generator/delta_diff_utils_unittest.cc index f730cc93..e25c867f 100644 --- a/payload_generator/delta_diff_utils_unittest.cc +++ b/payload_generator/delta_diff_utils_unittest.cc @@ -194,164 +194,6 @@ TEST_F(DeltaDiffUtilsTest, SkipVerityExtentsTest) { } } -TEST_F(DeltaDiffUtilsTest, MoveSmallTest) { - brillo::Blob data_blob(block_size_); - test_utils::FillWithData(&data_blob); - - // The old file is on a different block than the new one. - vector old_extents = {ExtentForRange(11, 1)}; - vector new_extents = {ExtentForRange(1, 1)}; - - EXPECT_TRUE(WriteExtents(old_part_.path, old_extents, kBlockSize, data_blob)); - EXPECT_TRUE(WriteExtents(new_part_.path, new_extents, kBlockSize, data_blob)); - - brillo::Blob data; - InstallOperation op; - EXPECT_TRUE(diff_utils::ReadExtentsToDiff( - old_part_.path, - new_part_.path, - old_extents, - new_extents, - {}, // old_deflates - {}, // new_deflates - PayloadVersion(kChromeOSMajorPayloadVersion, kInPlaceMinorPayloadVersion), - &data, - &op)); - EXPECT_TRUE(data.empty()); - - EXPECT_TRUE(op.has_type()); - EXPECT_EQ(InstallOperation::MOVE, op.type()); - EXPECT_FALSE(op.has_data_offset()); - EXPECT_FALSE(op.has_data_length()); - EXPECT_EQ(1, op.src_extents_size()); - EXPECT_EQ(kBlockSize, op.src_length()); - EXPECT_EQ(1, op.dst_extents_size()); - EXPECT_EQ(kBlockSize, op.dst_length()); - EXPECT_EQ(utils::BlocksInExtents(op.src_extents()), - utils::BlocksInExtents(op.dst_extents())); - EXPECT_EQ(1U, utils::BlocksInExtents(op.dst_extents())); -} - -TEST_F(DeltaDiffUtilsTest, MoveWithSameBlock) { - // Setup the old/new files so that it has immobile chunks; we make sure to - // utilize all sub-cases of such chunks: blocks 21--22 induce a split (src) - // and complete removal (dst), whereas blocks 24--25 induce trimming of the - // tail (src) and head (dst) of extents. The final block (29) is used for - // ensuring we properly account for the number of bytes removed in cases where - // the last block is partly filled. The detailed configuration: - // - // Old: [ 20 21 22 23 24 25 ] [ 28 29 ] - // New: [ 18 ] [ 21 22 ] [ 20 ] [ 24 25 26 ] [ 29 ] - // Same: ^^ ^^ ^^ ^^ ^^ - vector old_extents = {ExtentForRange(20, 6), ExtentForRange(28, 2)}; - vector new_extents = {ExtentForRange(18, 1), - ExtentForRange(21, 2), - ExtentForRange(20, 1), - ExtentForRange(24, 3), - ExtentForRange(29, 1)}; - - uint64_t num_blocks = utils::BlocksInExtents(old_extents); - EXPECT_EQ(num_blocks, utils::BlocksInExtents(new_extents)); - - // The size of the data should match the total number of blocks. Each block - // has a different content. - brillo::Blob file_data; - for (uint64_t i = 0; i < num_blocks; ++i) { - file_data.resize(file_data.size() + kBlockSize, 'a' + i); - } - - EXPECT_TRUE(WriteExtents(old_part_.path, old_extents, kBlockSize, file_data)); - EXPECT_TRUE(WriteExtents(new_part_.path, new_extents, kBlockSize, file_data)); - - brillo::Blob data; - InstallOperation op; - EXPECT_TRUE(diff_utils::ReadExtentsToDiff( - old_part_.path, - new_part_.path, - old_extents, - new_extents, - {}, // old_deflates - {}, // new_deflates - PayloadVersion(kChromeOSMajorPayloadVersion, kInPlaceMinorPayloadVersion), - &data, - &op)); - - EXPECT_TRUE(data.empty()); - - EXPECT_TRUE(op.has_type()); - EXPECT_EQ(InstallOperation::MOVE, op.type()); - EXPECT_FALSE(op.has_data_offset()); - EXPECT_FALSE(op.has_data_length()); - - // The expected old and new extents that actually moved. See comment above. - old_extents = { - ExtentForRange(20, 1), ExtentForRange(23, 1), ExtentForRange(28, 1)}; - new_extents = { - ExtentForRange(18, 1), ExtentForRange(20, 1), ExtentForRange(26, 1)}; - num_blocks = utils::BlocksInExtents(old_extents); - - EXPECT_EQ(num_blocks * kBlockSize, op.src_length()); - EXPECT_EQ(num_blocks * kBlockSize, op.dst_length()); - - EXPECT_EQ(old_extents.size(), static_cast(op.src_extents_size())); - for (int i = 0; i < op.src_extents_size(); i++) { - EXPECT_EQ(old_extents[i].start_block(), op.src_extents(i).start_block()) - << "i == " << i; - EXPECT_EQ(old_extents[i].num_blocks(), op.src_extents(i).num_blocks()) - << "i == " << i; - } - - EXPECT_EQ(new_extents.size(), static_cast(op.dst_extents_size())); - for (int i = 0; i < op.dst_extents_size(); i++) { - EXPECT_EQ(new_extents[i].start_block(), op.dst_extents(i).start_block()) - << "i == " << i; - EXPECT_EQ(new_extents[i].num_blocks(), op.dst_extents(i).num_blocks()) - << "i == " << i; - } -} - -TEST_F(DeltaDiffUtilsTest, BsdiffSmallTest) { - // Test a BSDIFF operation from block 1 to block 2. - brillo::Blob data_blob(kBlockSize); - test_utils::FillWithData(&data_blob); - - // The old file is on a different block than the new one. - vector old_extents = {ExtentForRange(1, 1)}; - vector new_extents = {ExtentForRange(2, 1)}; - - EXPECT_TRUE(WriteExtents(old_part_.path, old_extents, kBlockSize, data_blob)); - // Modify one byte in the new file. - data_blob[0]++; - EXPECT_TRUE(WriteExtents(new_part_.path, new_extents, kBlockSize, data_blob)); - - brillo::Blob data; - InstallOperation op; - EXPECT_TRUE(diff_utils::ReadExtentsToDiff( - old_part_.path, - new_part_.path, - old_extents, - new_extents, - {}, // old_deflates - {}, // new_deflates - PayloadVersion(kChromeOSMajorPayloadVersion, kInPlaceMinorPayloadVersion), - &data, - &op)); - - EXPECT_FALSE(data.empty()); - - EXPECT_TRUE(op.has_type()); - EXPECT_EQ(InstallOperation::BSDIFF, op.type()); - EXPECT_FALSE(op.has_data_offset()); - EXPECT_FALSE(op.has_data_length()); - EXPECT_EQ(1, op.src_extents_size()); - EXPECT_EQ(kBlockSize, op.src_length()); - EXPECT_EQ(1, op.dst_extents_size()); - EXPECT_EQ(kBlockSize, op.dst_length()); - EXPECT_EQ(utils::BlocksInExtents(op.src_extents()), - utils::BlocksInExtents(op.dst_extents())); - EXPECT_EQ(1U, utils::BlocksInExtents(op.dst_extents())); -} - TEST_F(DeltaDiffUtilsTest, ReplaceSmallTest) { // The old file is on a different block than the new one. vector old_extents = {ExtentForRange(1, 1)}; @@ -384,7 +226,7 @@ TEST_F(DeltaDiffUtilsTest, ReplaceSmallTest) { {}, // old_deflates {}, // new_deflates PayloadVersion(kChromeOSMajorPayloadVersion, - kInPlaceMinorPayloadVersion), + kSourceMinorPayloadVersion), &data, &op)); EXPECT_FALSE(data.empty()); @@ -500,49 +342,6 @@ TEST_F(DeltaDiffUtilsTest, PreferReplaceTest) { EXPECT_EQ(InstallOperation::REPLACE_BZ, op.type()); } -TEST_F(DeltaDiffUtilsTest, IsNoopOperationTest) { - InstallOperation op; - op.set_type(InstallOperation::REPLACE_BZ); - EXPECT_FALSE(diff_utils::IsNoopOperation(op)); - op.set_type(InstallOperation::MOVE); - EXPECT_TRUE(diff_utils::IsNoopOperation(op)); - *(op.add_src_extents()) = ExtentForRange(3, 2); - *(op.add_dst_extents()) = ExtentForRange(3, 2); - EXPECT_TRUE(diff_utils::IsNoopOperation(op)); - *(op.add_src_extents()) = ExtentForRange(7, 5); - *(op.add_dst_extents()) = ExtentForRange(7, 5); - EXPECT_TRUE(diff_utils::IsNoopOperation(op)); - *(op.add_src_extents()) = ExtentForRange(20, 2); - *(op.add_dst_extents()) = ExtentForRange(20, 1); - *(op.add_dst_extents()) = ExtentForRange(21, 1); - EXPECT_TRUE(diff_utils::IsNoopOperation(op)); - *(op.add_src_extents()) = ExtentForRange(24, 1); - *(op.add_dst_extents()) = ExtentForRange(25, 1); - EXPECT_FALSE(diff_utils::IsNoopOperation(op)); -} - -TEST_F(DeltaDiffUtilsTest, FilterNoopOperations) { - AnnotatedOperation aop1; - aop1.op.set_type(InstallOperation::REPLACE_BZ); - *(aop1.op.add_dst_extents()) = ExtentForRange(3, 2); - aop1.name = "aop1"; - - AnnotatedOperation aop2 = aop1; - aop2.name = "aop2"; - - AnnotatedOperation noop; - noop.op.set_type(InstallOperation::MOVE); - *(noop.op.add_src_extents()) = ExtentForRange(3, 2); - *(noop.op.add_dst_extents()) = ExtentForRange(3, 2); - noop.name = "noop"; - - vector ops = {noop, aop1, noop, noop, aop2, noop}; - diff_utils::FilterNoopOperations(&ops); - EXPECT_EQ(2u, ops.size()); - EXPECT_EQ("aop1", ops[0].name); - EXPECT_EQ("aop2", ops[1].name); -} - // Test the simple case where all the blocks are different and no new blocks are // zeroed. TEST_F(DeltaDiffUtilsTest, NoZeroedOrUniqueBlocksDetected) { @@ -550,7 +349,7 @@ TEST_F(DeltaDiffUtilsTest, NoZeroedOrUniqueBlocksDetected) { InitializePartitionWithUniqueBlocks(new_part_, block_size_, 42); EXPECT_TRUE(RunDeltaMovedAndZeroBlocks(-1, // chunk_blocks - kInPlaceMinorPayloadVersion)); + kSourceMinorPayloadVersion)); EXPECT_EQ(0U, old_visited_blocks_.blocks()); EXPECT_EQ(0U, new_visited_blocks_.blocks()); @@ -558,29 +357,6 @@ TEST_F(DeltaDiffUtilsTest, NoZeroedOrUniqueBlocksDetected) { EXPECT_TRUE(aops_.empty()); } -// Test that when the partitions have identical blocks in the same positions no -// MOVE operation is performed and all the blocks are handled. -TEST_F(DeltaDiffUtilsTest, IdenticalPartitionsDontMove) { - InitializePartitionWithUniqueBlocks(old_part_, block_size_, 42); - InitializePartitionWithUniqueBlocks(new_part_, block_size_, 42); - - // Mark some of the blocks as already visited. - vector already_visited = {ExtentForRange(5, 10), - ExtentForRange(25, 10)}; - old_visited_blocks_.AddExtents(already_visited); - new_visited_blocks_.AddExtents(already_visited); - - // Most of the blocks rest in the same place, but there's no need for MOVE - // operations on those blocks. - EXPECT_TRUE(RunDeltaMovedAndZeroBlocks(-1, // chunk_blocks - kInPlaceMinorPayloadVersion)); - - EXPECT_EQ(kDefaultBlockCount, old_visited_blocks_.blocks()); - EXPECT_EQ(kDefaultBlockCount, new_visited_blocks_.blocks()); - EXPECT_EQ(0, blob_size_); - EXPECT_TRUE(aops_.empty()); -} - // Test that when the partitions have identical blocks in the same positions // MOVE operation is performed and all the blocks are handled. TEST_F(DeltaDiffUtilsTest, IdenticalBlocksAreCopiedFromSource) { @@ -701,16 +477,14 @@ TEST_F(DeltaDiffUtilsTest, ZeroBlocksUseReplaceBz) { EXPECT_TRUE(WriteExtents(old_part_.path, old_zeros, block_size_, zeros_data)); EXPECT_TRUE(RunDeltaMovedAndZeroBlocks(5, // chunk_blocks - kInPlaceMinorPayloadVersion)); + kSourceMinorPayloadVersion)); - // Zeroed blocks from old_visited_blocks_ were copied over, so me actually - // use them regardless of the trivial MOVE operation not being emitted. + // Zeroed blocks from |old_visited_blocks_| were copied over. EXPECT_EQ(old_zeros, old_visited_blocks_.GetExtentsForBlockCount( old_visited_blocks_.blocks())); - // All the new zeroed blocks should be used, part with REPLACE_BZ and part - // trivial MOVE operations (not included). + // All the new zeroed blocks should be used with REPLACE_BZ. EXPECT_EQ(new_zeros, new_visited_blocks_.GetExtentsForBlockCount( new_visited_blocks_.blocks())); @@ -721,7 +495,8 @@ TEST_F(DeltaDiffUtilsTest, ZeroBlocksUseReplaceBz) { // This range should be split. ExtentForRange(30, 5), ExtentForRange(35, 5), - ExtentForRange(40, 3), + ExtentForRange(40, 5), + ExtentForRange(45, 5), }; EXPECT_EQ(expected_op_extents.size(), aops_.size()); diff --git a/payload_generator/generate_delta_main.cc b/payload_generator/generate_delta_main.cc index ddb9a355..16f360f8 100644 --- a/payload_generator/generate_delta_main.cc +++ b/payload_generator/generate_delta_main.cc @@ -642,7 +642,6 @@ int Main(int argc, char** argv) { // Autodetect minor_version by looking at the update_engine.conf in the old // image. if (payload_config.is_delta) { - payload_config.version.minor = kInPlaceMinorPayloadVersion; brillo::KeyValueStore store; uint32_t minor_version; bool minor_version_found = false; @@ -656,9 +655,10 @@ int Main(int argc, char** argv) { break; } } - LOG_IF(WARNING, !minor_version_found) - << "Failed to detect minor version defaulting to minor_version=" - << payload_config.version.minor; + if (!minor_version_found) { + LOG(FATAL) << "Failed to detect the minor version."; + return 1; + } } else { payload_config.version.minor = kFullPayloadMinorVersion; LOG(INFO) << "Using non-delta minor_version=" @@ -669,6 +669,13 @@ int Main(int argc, char** argv) { LOG(INFO) << "Using provided minor_version=" << FLAGS_minor_version; } + if (payload_config.version.minor != kFullPayloadMinorVersion && + (payload_config.version.minor < kMinSupportedMinorPayloadVersion || + payload_config.version.minor > kMaxSupportedMinorPayloadVersion)) { + LOG(FATAL) << "Unsupported minor version " << payload_config.version.minor; + return 1; + } + payload_config.max_timestamp = FLAGS_max_timestamp; if (payload_config.version.minor >= kVerityMinorPayloadVersion) diff --git a/payload_generator/graph_types.cc b/payload_generator/graph_types.cc deleted file mode 100644 index c03766d0..00000000 --- a/payload_generator/graph_types.cc +++ /dev/null @@ -1,23 +0,0 @@ -// -// Copyright (C) 2015 The Android Open Source Project -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -#include "update_engine/payload_generator/graph_types.h" - -namespace chromeos_update_engine { - -const Vertex::Index Vertex::kInvalidIndex = static_cast(-1); - -} // namespace chromeos_update_engine diff --git a/payload_generator/graph_types.h b/payload_generator/graph_types.h deleted file mode 100644 index f96b0f38..00000000 --- a/payload_generator/graph_types.h +++ /dev/null @@ -1,87 +0,0 @@ -// -// Copyright (C) 2009 The Android Open Source Project -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -#ifndef UPDATE_ENGINE_PAYLOAD_GENERATOR_GRAPH_TYPES_H_ -#define UPDATE_ENGINE_PAYLOAD_GENERATOR_GRAPH_TYPES_H_ - -#include -#include -#include -#include -#include - -#include - -#include "update_engine/payload_generator/annotated_operation.h" -#include "update_engine/payload_generator/extent_utils.h" -#include "update_engine/update_metadata.pb.h" - -// A few classes that help in generating delta images use these types -// for the graph work. - -namespace chromeos_update_engine { - -struct EdgeProperties { - // Read-before extents. I.e., blocks in |extents| must be read by the - // node pointed to before the pointing node runs (presumably b/c it - // overwrites these blocks). - std::vector extents; - - // Write before extents. I.e., blocks in |write_extents| must be written - // by the node pointed to before the pointing node runs (presumably - // b/c it reads the data written by the other node). - std::vector write_extents; - - bool operator==(const EdgeProperties& that) const { - return extents == that.extents && write_extents == that.write_extents; - } -}; - -struct Vertex { - Vertex() : valid(true), index(-1), lowlink(-1) {} - bool valid; - - typedef std::map::size_type, EdgeProperties> EdgeMap; - EdgeMap out_edges; - - // We sometimes wish to consider a subgraph of a graph. A subgraph would have - // a subset of the vertices from the graph and a subset of the edges. - // When considering this vertex within a subgraph, subgraph_edges stores - // the out-edges. - typedef std::set::size_type> SubgraphEdgeMap; - SubgraphEdgeMap subgraph_edges; - - // For Tarjan's algorithm: - std::vector::size_type index; - std::vector::size_type lowlink; - - // Other Vertex properties: - AnnotatedOperation aop; - - typedef std::vector::size_type Index; - static const Vertex::Index kInvalidIndex; -}; - -typedef std::vector Graph; - -typedef std::pair Edge; - -const uint64_t kTempBlockStart = 1ULL << 60; -static_assert(kTempBlockStart != 0, "kTempBlockStart invalid"); - -} // namespace chromeos_update_engine - -#endif // UPDATE_ENGINE_PAYLOAD_GENERATOR_GRAPH_TYPES_H_ diff --git a/payload_generator/graph_utils.cc b/payload_generator/graph_utils.cc deleted file mode 100644 index 7f5cf8fb..00000000 --- a/payload_generator/graph_utils.cc +++ /dev/null @@ -1,142 +0,0 @@ -// -// Copyright (C) 2009 The Android Open Source Project -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -#include "update_engine/payload_generator/graph_utils.h" - -#include -#include -#include - -#include -#include - -#include "update_engine/payload_consumer/payload_constants.h" -#include "update_engine/payload_generator/annotated_operation.h" -#include "update_engine/payload_generator/extent_utils.h" - -using std::make_pair; -using std::pair; -using std::string; -using std::vector; - -namespace chromeos_update_engine { -namespace graph_utils { - -uint64_t EdgeWeight(const Graph& graph, const Edge& edge) { - uint64_t weight = 0; - const vector& extents = - graph[edge.first].out_edges.find(edge.second)->second.extents; - for (vector::const_iterator it = extents.begin(); it != extents.end(); - ++it) { - if (it->start_block() != kSparseHole) - weight += it->num_blocks(); - } - return weight; -} - -void AddReadBeforeDep(Vertex* src, Vertex::Index dst, uint64_t block) { - Vertex::EdgeMap::iterator edge_it = src->out_edges.find(dst); - if (edge_it == src->out_edges.end()) { - // Must create new edge - pair result = - src->out_edges.insert(make_pair(dst, EdgeProperties())); - CHECK(result.second); - edge_it = result.first; - } - AppendBlockToExtents(&edge_it->second.extents, block); -} - -void AddReadBeforeDepExtents(Vertex* src, - Vertex::Index dst, - const vector& extents) { - // TODO(adlr): Be more efficient than adding each block individually. - for (vector::const_iterator it = extents.begin(), e = extents.end(); - it != e; - ++it) { - const Extent& extent = *it; - for (uint64_t block = extent.start_block(), - block_end = extent.start_block() + extent.num_blocks(); - block != block_end; - ++block) { - AddReadBeforeDep(src, dst, block); - } - } -} - -void DropWriteBeforeDeps(Vertex::EdgeMap* edge_map) { - // Specially crafted for-loop for the map-iterate-delete dance. - for (Vertex::EdgeMap::iterator it = edge_map->begin(); - it != edge_map->end();) { - if (!it->second.write_extents.empty()) - it->second.write_extents.clear(); - if (it->second.extents.empty()) { - // Erase *it, as it contains no blocks - edge_map->erase(it++); - } else { - ++it; - } - } -} - -// For each node N in graph, drop all edges N->|index|. -void DropIncomingEdgesTo(Graph* graph, Vertex::Index index) { - // This would be much more efficient if we had doubly-linked - // edges in the graph. - for (Graph::iterator it = graph->begin(), e = graph->end(); it != e; ++it) { - it->out_edges.erase(index); - } -} - -namespace { -template -void DumpExtents(const T& field, int prepend_space_count) { - string header(prepend_space_count, ' '); - for (const auto& extent : field) { - LOG(INFO) << header << "(" << extent.start_block() << ", " - << extent.num_blocks() << ")"; - } -} - -void DumpOutEdges(const Vertex::EdgeMap& out_edges) { - for (Vertex::EdgeMap::const_iterator it = out_edges.begin(), - e = out_edges.end(); - it != e; - ++it) { - LOG(INFO) << " " << it->first << " read-before:"; - DumpExtents(it->second.extents, 6); - LOG(INFO) << " write-before:"; - DumpExtents(it->second.write_extents, 6); - } -} -} // namespace - -void DumpGraph(const Graph& graph) { - LOG(INFO) << "Graph length: " << graph.size(); - for (Graph::size_type i = 0, e = graph.size(); i != e; ++i) { - LOG(INFO) << i << (graph[i].valid ? "" : "-INV") << ": " - << graph[i].aop.name << ": " - << InstallOperationTypeName(graph[i].aop.op.type()); - LOG(INFO) << " src_extents:"; - DumpExtents(graph[i].aop.op.src_extents(), 4); - LOG(INFO) << " dst_extents:"; - DumpExtents(graph[i].aop.op.dst_extents(), 4); - LOG(INFO) << " out edges:"; - DumpOutEdges(graph[i].out_edges); - } -} - -} // namespace graph_utils -} // namespace chromeos_update_engine diff --git a/payload_generator/graph_utils.h b/payload_generator/graph_utils.h deleted file mode 100644 index 7024215d..00000000 --- a/payload_generator/graph_utils.h +++ /dev/null @@ -1,54 +0,0 @@ -// -// Copyright (C) 2009 The Android Open Source Project -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -#ifndef UPDATE_ENGINE_PAYLOAD_GENERATOR_GRAPH_UTILS_H_ -#define UPDATE_ENGINE_PAYLOAD_GENERATOR_GRAPH_UTILS_H_ - -#include - -#include - -#include "update_engine/payload_generator/graph_types.h" -#include "update_engine/update_metadata.pb.h" - -// A few utility functions for graphs - -namespace chromeos_update_engine { - -namespace graph_utils { - -// Returns the number of blocks represented by all extents in the edge. -uint64_t EdgeWeight(const Graph& graph, const Edge& edge); - -// These add a read-before dependency from graph[src] -> graph[dst]. If the dep -// already exists, the block/s is/are added to the existing edge. -void AddReadBeforeDep(Vertex* src, Vertex::Index dst, uint64_t block); -void AddReadBeforeDepExtents(Vertex* src, - Vertex::Index dst, - const std::vector& extents); - -void DropWriteBeforeDeps(Vertex::EdgeMap* edge_map); - -// For each node N in graph, drop all edges N->|index|. -void DropIncomingEdgesTo(Graph* graph, Vertex::Index index); - -void DumpGraph(const Graph& graph); - -} // namespace graph_utils - -} // namespace chromeos_update_engine - -#endif // UPDATE_ENGINE_PAYLOAD_GENERATOR_GRAPH_UTILS_H_ diff --git a/payload_generator/graph_utils_unittest.cc b/payload_generator/graph_utils_unittest.cc deleted file mode 100644 index 07e76646..00000000 --- a/payload_generator/graph_utils_unittest.cc +++ /dev/null @@ -1,94 +0,0 @@ -// -// Copyright (C) 2009 The Android Open Source Project -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -#include "update_engine/payload_generator/graph_utils.h" - -#include -#include - -#include - -#include "update_engine/payload_consumer/payload_constants.h" -#include "update_engine/payload_generator/extent_ranges.h" -#include "update_engine/payload_generator/extent_utils.h" - -using std::make_pair; -using std::vector; - -namespace chromeos_update_engine { - -class GraphUtilsTest : public ::testing::Test {}; - -TEST(GraphUtilsTest, SimpleTest) { - Graph graph(2); - - graph[0].out_edges.insert(make_pair(1, EdgeProperties())); - - vector& extents = graph[0].out_edges[1].extents; - - EXPECT_EQ(0U, extents.size()); - AppendBlockToExtents(&extents, 0); - EXPECT_EQ(1U, extents.size()); - AppendBlockToExtents(&extents, 1); - AppendBlockToExtents(&extents, 2); - EXPECT_EQ(1U, extents.size()); - AppendBlockToExtents(&extents, 4); - - EXPECT_EQ(2U, extents.size()); - EXPECT_EQ(0U, extents[0].start_block()); - EXPECT_EQ(3U, extents[0].num_blocks()); - EXPECT_EQ(4U, extents[1].start_block()); - EXPECT_EQ(1U, extents[1].num_blocks()); - - EXPECT_EQ(4U, graph_utils::EdgeWeight(graph, make_pair(0, 1))); -} - -TEST(GraphUtilsTest, DepsTest) { - Graph graph(3); - - graph_utils::AddReadBeforeDep(&graph[0], 1, 3); - EXPECT_EQ(1U, graph[0].out_edges.size()); - { - Extent& extent = graph[0].out_edges[1].extents[0]; - EXPECT_EQ(3U, extent.start_block()); - EXPECT_EQ(1U, extent.num_blocks()); - } - graph_utils::AddReadBeforeDep(&graph[0], 1, 4); - EXPECT_EQ(1U, graph[0].out_edges.size()); - { - Extent& extent = graph[0].out_edges[1].extents[0]; - EXPECT_EQ(3U, extent.start_block()); - EXPECT_EQ(2U, extent.num_blocks()); - } - graph_utils::AddReadBeforeDepExtents( - &graph[2], 1, vector(1, ExtentForRange(5, 2))); - EXPECT_EQ(1U, graph[2].out_edges.size()); - { - Extent& extent = graph[2].out_edges[1].extents[0]; - EXPECT_EQ(5U, extent.start_block()); - EXPECT_EQ(2U, extent.num_blocks()); - } - // Change most recent edge from read-before to write-before - graph[2].out_edges[1].write_extents.swap(graph[2].out_edges[1].extents); - graph_utils::DropWriteBeforeDeps(&graph[2].out_edges); - EXPECT_EQ(0U, graph[2].out_edges.size()); - - EXPECT_EQ(1U, graph[0].out_edges.size()); - graph_utils::DropIncomingEdgesTo(&graph, 1); - EXPECT_EQ(0U, graph[0].out_edges.size()); -} - -} // namespace chromeos_update_engine diff --git a/payload_generator/inplace_generator.cc b/payload_generator/inplace_generator.cc deleted file mode 100644 index ee19b620..00000000 --- a/payload_generator/inplace_generator.cc +++ /dev/null @@ -1,798 +0,0 @@ -// -// Copyright (C) 2015 The Android Open Source Project -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -#include "update_engine/payload_generator/inplace_generator.h" - -#include -#include -#include -#include -#include -#include - -#include - -#include "update_engine/common/utils.h" -#include "update_engine/payload_consumer/payload_constants.h" -#include "update_engine/payload_generator/cycle_breaker.h" -#include "update_engine/payload_generator/delta_diff_generator.h" -#include "update_engine/payload_generator/delta_diff_utils.h" -#include "update_engine/payload_generator/extent_ranges.h" -#include "update_engine/payload_generator/graph_types.h" -#include "update_engine/payload_generator/graph_utils.h" -#include "update_engine/payload_generator/topological_sort.h" -#include "update_engine/update_metadata.pb.h" - -using std::make_pair; -using std::map; -using std::pair; -using std::set; -using std::string; -using std::vector; - -namespace chromeos_update_engine { - -using Block = InplaceGenerator::Block; - -namespace { - -// The only PayloadVersion supported by this implementation. -const PayloadVersion kInPlacePayloadVersion{kChromeOSMajorPayloadVersion, - kInPlaceMinorPayloadVersion}; - -// This class allocates non-existent temp blocks, starting from -// kTempBlockStart. Other code is responsible for converting these -// temp blocks into real blocks, as the client can't read or write to -// these blocks. -class DummyExtentAllocator { - public: - vector Allocate(const uint64_t block_count) { - vector ret(1); - ret[0].set_start_block(next_block_); - ret[0].set_num_blocks(block_count); - next_block_ += block_count; - return ret; - } - - private: - uint64_t next_block_{kTempBlockStart}; -}; - -// Takes a vector of blocks and returns an equivalent vector of Extent -// objects. -vector CompressExtents(const vector& blocks) { - vector new_extents; - for (uint64_t block : blocks) { - AppendBlockToExtents(&new_extents, block); - } - return new_extents; -} - -// Helper class to compare two operations by start block of the first Extent in -// their destination extents given the index of the operations in the graph. -class IndexedInstallOperationsDstComparator { - public: - explicit IndexedInstallOperationsDstComparator(Graph* graph) - : graph_(graph) {} - - // Compares the operations in the vertex a and b of graph_. - bool operator()(size_t a, size_t b) const { - return diff_utils::CompareAopsByDestination((*graph_)[a].aop, - (*graph_)[b].aop); - } - - private: - const Graph* const graph_; -}; - -} // namespace - -void InplaceGenerator::CheckGraph(const Graph& graph) { - for (const Vertex& v : graph) { - CHECK(v.aop.op.has_type()); - } -} - -void InplaceGenerator::SubstituteBlocks(Vertex* vertex, - const vector& remove_extents, - const vector& replace_extents) { - // First, expand out the blocks that op reads from - vector read_blocks = ExpandExtents(vertex->aop.op.src_extents()); - { - // Expand remove_extents and replace_extents - vector remove_extents_expanded = ExpandExtents(remove_extents); - vector replace_extents_expanded = ExpandExtents(replace_extents); - CHECK_EQ(remove_extents_expanded.size(), replace_extents_expanded.size()); - map conversion; - for (vector::size_type i = 0; i < replace_extents_expanded.size(); - i++) { - conversion[remove_extents_expanded[i]] = replace_extents_expanded[i]; - } - ApplyMap(&read_blocks, conversion); - for (auto& edge_prop_pair : vertex->out_edges) { - vector write_before_deps_expanded = - ExpandExtents(edge_prop_pair.second.write_extents); - ApplyMap(&write_before_deps_expanded, conversion); - edge_prop_pair.second.write_extents = - CompressExtents(write_before_deps_expanded); - } - } - // Convert read_blocks back to extents - vertex->aop.op.clear_src_extents(); - vector new_extents = CompressExtents(read_blocks); - StoreExtents(new_extents, vertex->aop.op.mutable_src_extents()); -} - -bool InplaceGenerator::CutEdges(Graph* graph, - const set& edges, - vector* out_cuts) { - DummyExtentAllocator scratch_allocator; - vector cuts; - cuts.reserve(edges.size()); - - uint64_t scratch_blocks_used = 0; - for (const Edge& edge : edges) { - cuts.resize(cuts.size() + 1); - vector old_extents = - (*graph)[edge.first].out_edges[edge.second].extents; - // Choose some scratch space - scratch_blocks_used += graph_utils::EdgeWeight(*graph, edge); - cuts.back().tmp_extents = - scratch_allocator.Allocate(graph_utils::EdgeWeight(*graph, edge)); - // create vertex to copy original->scratch - cuts.back().new_vertex = graph->size(); - graph->emplace_back(); - cuts.back().old_src = edge.first; - cuts.back().old_dst = edge.second; - - EdgeProperties& cut_edge_properties = - (*graph)[edge.first].out_edges.find(edge.second)->second; - - // This should never happen, as we should only be cutting edges between - // real file nodes, and write-before relationships are created from - // a real file node to a temp copy node: - CHECK(cut_edge_properties.write_extents.empty()) - << "Can't cut edge that has write-before relationship."; - - // make node depend on the copy operation - (*graph)[edge.first].out_edges.insert( - make_pair(graph->size() - 1, cut_edge_properties)); - - // Set src/dst extents and other proto variables for copy operation - graph->back().aop.op.set_type(InstallOperation::MOVE); - StoreExtents(cut_edge_properties.extents, - graph->back().aop.op.mutable_src_extents()); - StoreExtents(cuts.back().tmp_extents, - graph->back().aop.op.mutable_dst_extents()); - graph->back().aop.op.set_src_length(graph_utils::EdgeWeight(*graph, edge) * - kBlockSize); - graph->back().aop.op.set_dst_length(graph->back().aop.op.src_length()); - - // make the dest node read from the scratch space - SubstituteBlocks(&((*graph)[edge.second]), - (*graph)[edge.first].out_edges[edge.second].extents, - cuts.back().tmp_extents); - - // delete the old edge - CHECK_EQ(static_cast(1), - (*graph)[edge.first].out_edges.erase(edge.second)); - - // Add an edge from dst to copy operation - EdgeProperties write_before_edge_properties; - write_before_edge_properties.write_extents = cuts.back().tmp_extents; - (*graph)[edge.second].out_edges.insert( - make_pair(graph->size() - 1, write_before_edge_properties)); - } - out_cuts->swap(cuts); - return true; -} - -// Creates all the edges for the graph. Writers of a block point to -// readers of the same block. This is because for an edge A->B, B -// must complete before A executes. -void InplaceGenerator::CreateEdges(Graph* graph, const vector& blocks) { - for (vector::size_type i = 0; i < blocks.size(); i++) { - // Blocks with both a reader and writer get an edge - if (blocks[i].reader == Vertex::kInvalidIndex || - blocks[i].writer == Vertex::kInvalidIndex) - continue; - // Don't have a node depend on itself - if (blocks[i].reader == blocks[i].writer) - continue; - // See if there's already an edge we can add onto - Vertex::EdgeMap::iterator edge_it = - (*graph)[blocks[i].writer].out_edges.find(blocks[i].reader); - if (edge_it == (*graph)[blocks[i].writer].out_edges.end()) { - // No existing edge. Create one - (*graph)[blocks[i].writer].out_edges.insert( - make_pair(blocks[i].reader, EdgeProperties())); - edge_it = (*graph)[blocks[i].writer].out_edges.find(blocks[i].reader); - CHECK(edge_it != (*graph)[blocks[i].writer].out_edges.end()); - } - AppendBlockToExtents(&edge_it->second.extents, i); - } -} - -namespace { - -class SortCutsByTopoOrderLess { - public: - explicit SortCutsByTopoOrderLess( - const vector::size_type>& table) - : table_(table) {} - bool operator()(const CutEdgeVertexes& a, const CutEdgeVertexes& b) { - return table_[a.old_dst] < table_[b.old_dst]; - } - - private: - const vector::size_type>& table_; -}; - -} // namespace - -void InplaceGenerator::GenerateReverseTopoOrderMap( - const vector& op_indexes, - vector::size_type>* reverse_op_indexes) { - vector::size_type> table(op_indexes.size()); - for (vector::size_type i = 0, e = op_indexes.size(); i != e; - ++i) { - Vertex::Index node = op_indexes[i]; - if (table.size() < (node + 1)) { - table.resize(node + 1); - } - table[node] = i; - } - reverse_op_indexes->swap(table); -} - -void InplaceGenerator::SortCutsByTopoOrder( - const vector& op_indexes, vector* cuts) { - // first, make a reverse lookup table. - vector::size_type> table; - GenerateReverseTopoOrderMap(op_indexes, &table); - SortCutsByTopoOrderLess less(table); - sort(cuts->begin(), cuts->end(), less); -} - -void InplaceGenerator::MoveAndSortFullOpsToBack( - Graph* graph, vector* op_indexes) { - vector ret; - vector full_ops; - ret.reserve(op_indexes->size()); - for (auto op_index : *op_indexes) { - InstallOperation_Type type = (*graph)[op_index].aop.op.type(); - if (type == InstallOperation::REPLACE || - type == InstallOperation::REPLACE_BZ) { - full_ops.push_back(op_index); - } else { - ret.push_back(op_index); - } - } - LOG(INFO) << "Stats: " << full_ops.size() << " full ops out of " - << (full_ops.size() + ret.size()) << " total ops."; - // Sort full ops according to their dst_extents. - sort(full_ops.begin(), - full_ops.end(), - IndexedInstallOperationsDstComparator(graph)); - ret.insert(ret.end(), full_ops.begin(), full_ops.end()); - op_indexes->swap(ret); -} - -namespace { - -template -bool TempBlocksExistInExtents(const T& extents) { - for (const auto& extent : extents) { - uint64_t start = extent.start_block(); - uint64_t num = extent.num_blocks(); - if (start >= kTempBlockStart || (start + num) >= kTempBlockStart) { - LOG(ERROR) << "temp block!"; - LOG(ERROR) << "start: " << start << ", num: " << num; - LOG(ERROR) << "kTempBlockStart: " << kTempBlockStart; - LOG(ERROR) << "returning true"; - return true; - } - // check for wrap-around, which would be a bug: - CHECK(start <= (start + num)); - } - return false; -} - -// Converts the cuts, which must all have the same |old_dst| member, -// to full. It does this by converting the |old_dst| to REPLACE or -// REPLACE_BZ, dropping all incoming edges to |old_dst|, and marking -// all temp nodes invalid. -bool ConvertCutsToFull( - Graph* graph, - const string& new_part, - BlobFileWriter* blob_file, - vector* op_indexes, - vector::size_type>* reverse_op_indexes, - const vector& cuts) { - CHECK(!cuts.empty()); - set deleted_nodes; - for (const CutEdgeVertexes& cut : cuts) { - TEST_AND_RETURN_FALSE( - InplaceGenerator::ConvertCutToFullOp(graph, cut, new_part, blob_file)); - deleted_nodes.insert(cut.new_vertex); - } - deleted_nodes.insert(cuts[0].old_dst); - - vector new_op_indexes; - new_op_indexes.reserve(op_indexes->size()); - for (Vertex::Index vertex_index : *op_indexes) { - if (base::ContainsKey(deleted_nodes, vertex_index)) - continue; - new_op_indexes.push_back(vertex_index); - } - new_op_indexes.push_back(cuts[0].old_dst); - op_indexes->swap(new_op_indexes); - InplaceGenerator::GenerateReverseTopoOrderMap(*op_indexes, - reverse_op_indexes); - return true; -} - -// Tries to assign temp blocks for a collection of cuts, all of which share -// the same old_dst member. If temp blocks can't be found, old_dst will be -// converted to a REPLACE or REPLACE_BZ operation. Returns true on success, -// which can happen even if blocks are converted to full. Returns false -// on exceptional error cases. -bool AssignBlockForAdjoiningCuts( - Graph* graph, - const string& new_part, - BlobFileWriter* blob_file, - vector* op_indexes, - vector::size_type>* reverse_op_indexes, - const vector& cuts) { - CHECK(!cuts.empty()); - const Vertex::Index old_dst = cuts[0].old_dst; - // Calculate # of blocks needed - uint64_t blocks_needed = 0; - vector cuts_blocks_needed(cuts.size()); - for (vector::size_type i = 0; i < cuts.size(); ++i) { - uint64_t cut_blocks_needed = 0; - for (const Extent& extent : cuts[i].tmp_extents) { - cut_blocks_needed += extent.num_blocks(); - } - blocks_needed += cut_blocks_needed; - cuts_blocks_needed[i] = cut_blocks_needed; - } - - // Find enough blocks - ExtentRanges scratch_ranges; - // Each block that's supplying temp blocks and the corresponding blocks: - typedef vector> SupplierVector; - SupplierVector block_suppliers; - uint64_t scratch_blocks_found = 0; - for (vector::size_type i = (*reverse_op_indexes)[old_dst] + 1, - e = op_indexes->size(); - i < e; - ++i) { - Vertex::Index test_node = (*op_indexes)[i]; - if (!(*graph)[test_node].valid) - continue; - // See if this node has sufficient blocks - ExtentRanges ranges; - ranges.AddRepeatedExtents((*graph)[test_node].aop.op.dst_extents()); - ranges.SubtractExtent( - ExtentForRange(kTempBlockStart, kSparseHole - kTempBlockStart)); - ranges.SubtractRepeatedExtents((*graph)[test_node].aop.op.src_extents()); - // For now, for simplicity, subtract out all blocks in read-before - // dependencies. - for (Vertex::EdgeMap::const_iterator - edge_i = (*graph)[test_node].out_edges.begin(), - edge_e = (*graph)[test_node].out_edges.end(); - edge_i != edge_e; - ++edge_i) { - ranges.SubtractExtents(edge_i->second.extents); - } - - // Prevent using the block 0 as scratch space due to crbug.com/480751. - if (ranges.ContainsBlock(0)) { - LOG(INFO) << "Removing block 0 from the selected scratch range in vertex " - << i; - ranges.SubtractBlock(0); - } - - if (ranges.blocks() == 0) - continue; - - if (ranges.blocks() + scratch_blocks_found > blocks_needed) { - // trim down ranges - vector new_ranges = - ranges.GetExtentsForBlockCount(blocks_needed - scratch_blocks_found); - ranges = ExtentRanges(); - ranges.AddExtents(new_ranges); - } - scratch_ranges.AddRanges(ranges); - block_suppliers.push_back(make_pair(test_node, ranges)); - scratch_blocks_found += ranges.blocks(); - if (scratch_ranges.blocks() >= blocks_needed) - break; - } - if (scratch_ranges.blocks() < blocks_needed) { - LOG(INFO) << "Unable to find sufficient scratch"; - TEST_AND_RETURN_FALSE(ConvertCutsToFull( - graph, new_part, blob_file, op_indexes, reverse_op_indexes, cuts)); - return true; - } - // Use the scratch we found - TEST_AND_RETURN_FALSE(scratch_ranges.blocks() == scratch_blocks_found); - - // Make all the suppliers depend on this node - for (const auto& index_range_pair : block_suppliers) { - graph_utils::AddReadBeforeDepExtents( - &(*graph)[index_range_pair.first], - old_dst, - index_range_pair.second.GetExtentsForBlockCount( - index_range_pair.second.blocks())); - } - - // Replace temp blocks in each cut - for (vector::size_type i = 0; i < cuts.size(); ++i) { - const CutEdgeVertexes& cut = cuts[i]; - vector real_extents = - scratch_ranges.GetExtentsForBlockCount(cuts_blocks_needed[i]); - scratch_ranges.SubtractExtents(real_extents); - - // Fix the old dest node w/ the real blocks - InplaceGenerator::SubstituteBlocks( - &(*graph)[old_dst], cut.tmp_extents, real_extents); - - // Fix the new node w/ the real blocks. Since the new node is just a - // copy operation, we can replace all the dest extents w/ the real - // blocks. - InstallOperation* op = &(*graph)[cut.new_vertex].aop.op; - op->clear_dst_extents(); - StoreExtents(real_extents, op->mutable_dst_extents()); - } - return true; -} - -} // namespace - -bool InplaceGenerator::AssignTempBlocks( - Graph* graph, - const string& new_part, - BlobFileWriter* blob_file, - vector* op_indexes, - vector::size_type>* reverse_op_indexes, - const vector& cuts) { - CHECK(!cuts.empty()); - - // group of cuts w/ the same old_dst: - vector cuts_group; - - for (vector::size_type i = cuts.size() - 1, e = 0; true; - --i) { - LOG(INFO) << "Fixing temp blocks in cut " << i - << ": old dst: " << cuts[i].old_dst - << " new vertex: " << cuts[i].new_vertex - << " path: " << (*graph)[cuts[i].old_dst].aop.name; - - if (cuts_group.empty() || (cuts_group[0].old_dst == cuts[i].old_dst)) { - cuts_group.push_back(cuts[i]); - } else { - CHECK(!cuts_group.empty()); - TEST_AND_RETURN_FALSE(AssignBlockForAdjoiningCuts(graph, - new_part, - blob_file, - op_indexes, - reverse_op_indexes, - cuts_group)); - cuts_group.clear(); - cuts_group.push_back(cuts[i]); - } - - if (i == e) { - // break out of for() loop - break; - } - } - CHECK(!cuts_group.empty()); - TEST_AND_RETURN_FALSE(AssignBlockForAdjoiningCuts( - graph, new_part, blob_file, op_indexes, reverse_op_indexes, cuts_group)); - return true; -} - -bool InplaceGenerator::NoTempBlocksRemain(const Graph& graph) { - size_t idx = 0; - for (Graph::const_iterator it = graph.begin(), e = graph.end(); it != e; - ++it, ++idx) { - if (!it->valid) - continue; - const InstallOperation& op = it->aop.op; - if (TempBlocksExistInExtents(op.dst_extents()) || - TempBlocksExistInExtents(op.src_extents())) { - LOG(INFO) << "bad extents in node " << idx; - LOG(INFO) << "so yeah"; - return false; - } - - // Check out-edges: - for (const auto& edge_prop_pair : it->out_edges) { - if (TempBlocksExistInExtents(edge_prop_pair.second.extents) || - TempBlocksExistInExtents(edge_prop_pair.second.write_extents)) { - LOG(INFO) << "bad out edge in node " << idx; - LOG(INFO) << "so yeah"; - return false; - } - } - } - return true; -} - -bool InplaceGenerator::ConvertCutToFullOp(Graph* graph, - const CutEdgeVertexes& cut, - const string& new_part, - BlobFileWriter* blob_file) { - // Drop all incoming edges, keep all outgoing edges - - // Keep all outgoing edges - if ((*graph)[cut.old_dst].aop.op.type() != InstallOperation::REPLACE_BZ && - (*graph)[cut.old_dst].aop.op.type() != InstallOperation::REPLACE) { - Vertex::EdgeMap out_edges = (*graph)[cut.old_dst].out_edges; - graph_utils::DropWriteBeforeDeps(&out_edges); - - // Replace the operation with a REPLACE or REPLACE_BZ to generate the same - // |new_extents| list of blocks and update the graph. - vector new_aop; - vector new_extents; - ExtentsToVector((*graph)[cut.old_dst].aop.op.dst_extents(), &new_extents); - TEST_AND_RETURN_FALSE(diff_utils::DeltaReadFile( - &new_aop, - "", // old_part - new_part, - vector(), // old_extents - new_extents, - {}, // old_deflates - {}, // new_deflates - (*graph)[cut.old_dst].aop.name, - -1, // chunk_blocks, forces to have a single operation. - kInPlacePayloadVersion, - blob_file)); - TEST_AND_RETURN_FALSE(new_aop.size() == 1); - TEST_AND_RETURN_FALSE(AddInstallOpToGraph( - graph, cut.old_dst, nullptr, new_aop.front().op, new_aop.front().name)); - - (*graph)[cut.old_dst].out_edges = out_edges; - - // Right now we don't have doubly-linked edges, so we have to scan - // the whole graph. - graph_utils::DropIncomingEdgesTo(graph, cut.old_dst); - } - - // Delete temp node - (*graph)[cut.old_src].out_edges.erase(cut.new_vertex); - CHECK((*graph)[cut.old_dst].out_edges.find(cut.new_vertex) == - (*graph)[cut.old_dst].out_edges.end()); - (*graph)[cut.new_vertex].valid = false; - LOG(INFO) << "marked node invalid: " << cut.new_vertex; - return true; -} - -bool InplaceGenerator::ConvertGraphToDag(Graph* graph, - const string& new_part, - BlobFileWriter* blob_file, - vector* final_order, - Vertex::Index scratch_vertex) { - CycleBreaker cycle_breaker; - LOG(INFO) << "Finding cycles..."; - set cut_edges; - cycle_breaker.BreakCycles(*graph, &cut_edges); - LOG(INFO) << "done finding cycles"; - CheckGraph(*graph); - - // Calculate number of scratch blocks needed - - LOG(INFO) << "Cutting cycles..."; - vector cuts; - TEST_AND_RETURN_FALSE(CutEdges(graph, cut_edges, &cuts)); - LOG(INFO) << "done cutting cycles"; - LOG(INFO) << "There are " << cuts.size() << " cuts."; - CheckGraph(*graph); - - LOG(INFO) << "Creating initial topological order..."; - TopologicalSort(*graph, final_order); - LOG(INFO) << "done with initial topo order"; - CheckGraph(*graph); - - LOG(INFO) << "Moving full ops to the back"; - MoveAndSortFullOpsToBack(graph, final_order); - LOG(INFO) << "done moving full ops to back"; - - vector::size_type> inverse_final_order; - GenerateReverseTopoOrderMap(*final_order, &inverse_final_order); - - SortCutsByTopoOrder(*final_order, &cuts); - - if (!cuts.empty()) - TEST_AND_RETURN_FALSE(AssignTempBlocks( - graph, new_part, blob_file, final_order, &inverse_final_order, cuts)); - LOG(INFO) << "Making sure all temp blocks have been allocated"; - - // Remove the scratch node, if any - if (scratch_vertex != Vertex::kInvalidIndex) { - final_order->erase(final_order->begin() + - inverse_final_order[scratch_vertex]); - (*graph)[scratch_vertex].valid = false; - GenerateReverseTopoOrderMap(*final_order, &inverse_final_order); - } - - graph_utils::DumpGraph(*graph); - CHECK(NoTempBlocksRemain(*graph)); - LOG(INFO) << "done making sure all temp blocks are allocated"; - return true; -} - -void InplaceGenerator::CreateScratchNode(uint64_t start_block, - uint64_t num_blocks, - Vertex* vertex) { - vertex->aop.name = ""; - vertex->aop.op.set_type(InstallOperation::REPLACE_BZ); - vertex->aop.op.set_data_offset(0); - vertex->aop.op.set_data_length(0); - Extent* extent = vertex->aop.op.add_dst_extents(); - extent->set_start_block(start_block); - extent->set_num_blocks(num_blocks); -} - -bool InplaceGenerator::AddInstallOpToBlocksVector( - const InstallOperation& operation, - const Graph& graph, - Vertex::Index vertex, - vector* blocks) { - // See if this is already present. - TEST_AND_RETURN_FALSE(operation.dst_extents_size() > 0); - - enum BlockField { READER = 0, WRITER, BLOCK_FIELD_COUNT }; - for (int field = READER; field < BLOCK_FIELD_COUNT; field++) { - const char* past_participle = (field == READER) ? "read" : "written"; - const google::protobuf::RepeatedPtrField& extents = - (field == READER) ? operation.src_extents() : operation.dst_extents(); - Vertex::Index Block::*access_type = - (field == READER) ? &Block::reader : &Block::writer; - - for (const Extent& extent : extents) { - for (uint64_t block = extent.start_block(); - block < (extent.start_block() + extent.num_blocks()); - block++) { - if ((*blocks)[block].*access_type != Vertex::kInvalidIndex) { - LOG(FATAL) << "Block " << block << " is already " << past_participle - << " by " << (*blocks)[block].*access_type << "(" - << graph[(*blocks)[block].*access_type].aop.name - << ") and also " << vertex << "(" << graph[vertex].aop.name - << ")"; - } - (*blocks)[block].*access_type = vertex; - } - } - } - return true; -} - -bool InplaceGenerator::AddInstallOpToGraph(Graph* graph, - Vertex::Index existing_vertex, - vector* blocks, - const InstallOperation& operation, - const string& op_name) { - Vertex::Index vertex = existing_vertex; - if (vertex == Vertex::kInvalidIndex) { - graph->emplace_back(); - vertex = graph->size() - 1; - } - (*graph)[vertex].aop.op = operation; - CHECK((*graph)[vertex].aop.op.has_type()); - (*graph)[vertex].aop.name = op_name; - - if (blocks) - TEST_AND_RETURN_FALSE(InplaceGenerator::AddInstallOpToBlocksVector( - (*graph)[vertex].aop.op, *graph, vertex, blocks)); - return true; -} - -void InplaceGenerator::ApplyMap(vector* collection, - const map& the_map) { - for (uint64_t& elem : *collection) { - const auto& map_it = the_map.find(elem); - if (map_it != the_map.end()) - elem = map_it->second; - } -} - -bool InplaceGenerator::ResolveReadAfterWriteDependencies( - const PartitionConfig& old_part, - const PartitionConfig& new_part, - uint64_t partition_size, - size_t block_size, - BlobFileWriter* blob_file, - vector* aops) { - // Convert the operations to the graph. - Graph graph; - CheckGraph(graph); - vector blocks(std::max(old_part.size, new_part.size) / block_size); - for (const auto& aop : *aops) { - AddInstallOpToGraph( - &graph, Vertex::kInvalidIndex, &blocks, aop.op, aop.name); - } - CheckGraph(graph); - - // Final scratch block (if there's space) - Vertex::Index scratch_vertex = Vertex::kInvalidIndex; - if (blocks.size() < (partition_size / block_size)) { - scratch_vertex = graph.size(); - graph.emplace_back(); - size_t scratch_blocks = (partition_size / block_size) - blocks.size(); - LOG(INFO) << "Added " << scratch_blocks << " scratch space blocks."; - CreateScratchNode(blocks.size(), scratch_blocks, &graph.back()); - } - CheckGraph(graph); - - LOG(INFO) << "Creating edges..."; - CreateEdges(&graph, blocks); - LOG(INFO) << "Done creating edges"; - CheckGraph(graph); - - vector final_order; - TEST_AND_RETURN_FALSE(ConvertGraphToDag( - &graph, new_part.path, blob_file, &final_order, scratch_vertex)); - - // Copy operations over to the |aops| vector in the final_order generated by - // the topological sort. - aops->clear(); - aops->reserve(final_order.size()); - for (const Vertex::Index vertex_index : final_order) { - const Vertex& vertex = graph[vertex_index]; - aops->push_back(vertex.aop); - } - return true; -} - -bool InplaceGenerator::GenerateOperations(const PayloadGenerationConfig& config, - const PartitionConfig& old_part, - const PartitionConfig& new_part, - BlobFileWriter* blob_file, - vector* aops) { - TEST_AND_RETURN_FALSE(old_part.name == new_part.name); - TEST_AND_RETURN_FALSE(config.version.major == kInPlacePayloadVersion.major); - TEST_AND_RETURN_FALSE(config.version.minor == kInPlacePayloadVersion.minor); - - ssize_t hard_chunk_blocks = - (config.hard_chunk_size == -1 - ? -1 - : config.hard_chunk_size / config.block_size); - size_t soft_chunk_blocks = config.soft_chunk_size / config.block_size; - uint64_t partition_size = new_part.size; - if (new_part.name == kPartitionNameRoot) - partition_size = config.rootfs_partition_size; - - LOG(INFO) << "Delta compressing " << new_part.name << " partition..."; - TEST_AND_RETURN_FALSE(diff_utils::DeltaReadPartition(aops, - old_part, - new_part, - hard_chunk_blocks, - soft_chunk_blocks, - config.version, - blob_file)); - LOG(INFO) << "Done reading " << new_part.name; - - TEST_AND_RETURN_FALSE(ResolveReadAfterWriteDependencies( - old_part, new_part, partition_size, config.block_size, blob_file, aops)); - LOG(INFO) << "Done reordering " << new_part.name; - return true; -} - -}; // namespace chromeos_update_engine diff --git a/payload_generator/inplace_generator.h b/payload_generator/inplace_generator.h deleted file mode 100644 index e7298d21..00000000 --- a/payload_generator/inplace_generator.h +++ /dev/null @@ -1,240 +0,0 @@ -// -// Copyright (C) 2015 The Android Open Source Project -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -#ifndef UPDATE_ENGINE_PAYLOAD_GENERATOR_INPLACE_GENERATOR_H_ -#define UPDATE_ENGINE_PAYLOAD_GENERATOR_INPLACE_GENERATOR_H_ - -#include -#include -#include -#include - -#include "update_engine/payload_generator/blob_file_writer.h" -#include "update_engine/payload_generator/delta_diff_generator.h" -#include "update_engine/payload_generator/graph_types.h" -#include "update_engine/payload_generator/operations_generator.h" - -// InplaceGenerator contains all functionality related to the inplace algorithm -// for generating update payloads. These are the functions used when delta minor -// version is 1. - -namespace chromeos_update_engine { - -// This struct stores all relevant info for an edge that is cut between -// nodes old_src -> old_dst by creating new vertex new_vertex. The new -// relationship is: -// old_src -(read before)-> new_vertex <-(write before)- old_dst -// new_vertex is a MOVE operation that moves some existing blocks into -// temp space. The temp extents are, by necessity, stored in new_vertex -// (as dst extents) and old_dst (as src extents), but they are also broken -// out into tmp_extents, as the nodes themselves may contain many more -// extents. -struct CutEdgeVertexes { - Vertex::Index new_vertex; - Vertex::Index old_src; - Vertex::Index old_dst; - std::vector tmp_extents; -}; - -class InplaceGenerator : public OperationsGenerator { - public: - // Represents a disk block on the install partition. - struct Block { - // During install, each block on the install partition will be written - // and some may be read (in all likelihood, many will be read). - // The reading and writing will be performed by InstallOperations, - // each of which has a corresponding vertex in a graph. - // A Block object tells which vertex will read or write this block - // at install time. - // Generally, there will be a vector of Block objects whose length - // is the number of blocks on the install partition. - Block() : reader(Vertex::kInvalidIndex), writer(Vertex::kInvalidIndex) {} - Vertex::Index reader; - Vertex::Index writer; - }; - - InplaceGenerator() = default; - - // Checks all the operations in the graph have a type assigned. - static void CheckGraph(const Graph& graph); - - // Modifies blocks read by 'op' so that any blocks referred to by - // 'remove_extents' are replaced with blocks from 'replace_extents'. - // 'remove_extents' and 'replace_extents' must be the same number of blocks. - // Blocks will be substituted in the order listed in the vectors. - // E.g. if 'op' reads blocks 1, 2, 3, 4, 5, 6, 7, 8, remove_extents - // contains blocks 6, 2, 3, 5, and replace blocks contains - // 12, 13, 14, 15, then op will be changed to read from: - // 1, 13, 14, 4, 15, 12, 7, 8 - static void SubstituteBlocks(Vertex* vertex, - const std::vector& remove_extents, - const std::vector& replace_extents); - - // Cuts 'edges' from 'graph' according to the AU algorithm. This means - // for each edge A->B, remove the dependency that B occur before A. - // Do this by creating a new operation X that copies from the blocks - // specified by the edge's properties to temp space T. Modify B to read - // from T rather than the blocks in the edge. Modify A to depend on X, - // but not on B. Free space is found by looking in 'blocks'. - // Returns true on success. - static bool CutEdges(Graph* graph, - const std::set& edges, - std::vector* out_cuts); - - // Creates all the edges for the graph. Writers of a block point to - // readers of the same block. This is because for an edge A->B, B - // must complete before A executes. - static void CreateEdges(Graph* graph, const std::vector& blocks); - - // Takes |op_indexes|, which is effectively a mapping from order in - // which the op is performed -> graph vertex index, and produces the - // reverse: a mapping from graph vertex index -> op_indexes index. - static void GenerateReverseTopoOrderMap( - const std::vector& op_indexes, - std::vector::size_type>* reverse_op_indexes); - - // Sorts the vector |cuts| by its |cuts[].old_dest| member. Order is - // determined by the order of elements in op_indexes. - static void SortCutsByTopoOrder(const std::vector& op_indexes, - std::vector* cuts); - - // Given a topologically sorted graph |op_indexes| and |graph|, alters - // |op_indexes| to move all the full operations to the end of the vector. - // Full operations should not be depended on, so this is safe. - static void MoveAndSortFullOpsToBack(Graph* graph, - std::vector* op_indexes); - - // Returns true iff there are no extents in the graph that refer to temp - // blocks. Temp blocks are in the range [kTempBlockStart, kSparseHole). - static bool NoTempBlocksRemain(const Graph& graph); - - // Takes a |graph|, which has edges that must be cut, as listed in - // |cuts|. Cuts the edges. Maintains a list in which the operations - // will be performed (in |op_indexes|) and the reverse (in - // |reverse_op_indexes|). Cutting edges requires scratch space, and - // if insufficient scratch is found, the file is reread and will be - // send down (either as REPLACE or REPLACE_BZ). Returns true on - // success. - static bool AssignTempBlocks( - Graph* graph, - const std::string& new_part, - BlobFileWriter* blob_file, - std::vector* op_indexes, - std::vector::size_type>* reverse_op_indexes, - const std::vector& cuts); - - // Handles allocation of temp blocks to a cut edge by converting the - // dest node to a full op. This removes the need for temp blocks, but - // comes at the cost of a worse compression ratio. - // For example, say we have A->B->A. It would first be cut to form: - // A->B->N<-A, where N copies blocks to temp space. If there are no - // temp blocks, this function can be called to convert it to the form: - // A->B. Now, A is a full operation. - static bool ConvertCutToFullOp(Graph* graph, - const CutEdgeVertexes& cut, - const std::string& new_part, - BlobFileWriter* blob_file); - - // Takes a graph, which is not a DAG, which represents the files just - // read from disk, and converts it into a DAG by breaking all cycles - // and finding temp space to resolve broken edges. - // The final order of the nodes is given in |final_order| - // Some files may need to be reread from disk, thus |fd| and - // |data_file_size| are be passed. - // If |scratch_vertex| is not kInvalidIndex, removes it from - // |final_order| before returning. - // Returns true on success. - static bool ConvertGraphToDag(Graph* graph, - const std::string& new_part, - BlobFileWriter* blob_file, - std::vector* final_order, - Vertex::Index scratch_vertex); - - // Creates a dummy REPLACE_BZ node in the given |vertex|. This can be used - // to provide scratch space. The node writes |num_blocks| blocks starting at - // |start_block|The node should be marked invalid before writing all nodes to - // the output file. - static void CreateScratchNode(uint64_t start_block, - uint64_t num_blocks, - Vertex* vertex); - - // The |blocks| vector contains a reader and writer for each block on the - // filesystem that's being in-place updated. We populate the reader/writer - // fields of |blocks| by calling this function. - // For each block in |operation| that is read or written, find that block - // in |blocks| and set the reader/writer field to the vertex passed. - // |graph| is not strictly necessary, but useful for printing out - // error messages. - static bool AddInstallOpToBlocksVector(const InstallOperation& operation, - const Graph& graph, - Vertex::Index vertex, - std::vector* blocks); - - // Add a vertex (if |existing_vertex| is kInvalidVertex) or update an - // |existing_vertex| with the passed |operation|. - // This method will also register the vertex as the reader or writer of the - // blocks involved in the operation updating the |blocks| vector. The - // |op_name| associated with the Vertex is used for logging purposes. - static bool AddInstallOpToGraph(Graph* graph, - Vertex::Index existing_vertex, - std::vector* blocks, - const InstallOperation& operation, - const std::string& op_name); - - // Apply the transformation stored in |the_map| to the |collection| vector - // replacing the map keys found in |collection| with its associated value in - // |the_map|. - static void ApplyMap(std::vector* collection, - const std::map& the_map); - - // Resolve all read-after-write dependencies in the operation list |aops|. The - // operations in |aops| are such that they generate the desired |new_part| if - // applied reading always from the original image. This function reorders the - // operations and generates new operations when needed to make these - // operations produce the same |new_part| result when applied in-place. - // The new operations will create blobs in |data_file_fd| and update - // the file size pointed by |data_file_size| if needed. - // On success, stores the new operations in |aops| in the right order and - // returns true. - static bool ResolveReadAfterWriteDependencies( - const PartitionConfig& old_part, - const PartitionConfig& new_part, - uint64_t partition_size, - size_t block_size, - BlobFileWriter* blob_file, - std::vector* aops); - - // Generate the update payload operations for the given partition using - // only operations that read from the target and/or write to the target, - // hence, applying the payload "in-place" in the target partition. This method - // assumes that the contents of the source image are pre-copied to the target - // partition, up to the size of the source image. Use this method to generate - // a delta update with the minor version kInPlaceMinorPayloadVersion. - // The operations are stored in |aops|. All the offsets in the operations - // reference the data written to |blob_file|. - bool GenerateOperations(const PayloadGenerationConfig& config, - const PartitionConfig& old_part, - const PartitionConfig& new_part, - BlobFileWriter* blob_file, - std::vector* aops) override; - - private: - DISALLOW_COPY_AND_ASSIGN(InplaceGenerator); -}; - -}; // namespace chromeos_update_engine - -#endif // UPDATE_ENGINE_PAYLOAD_GENERATOR_INPLACE_GENERATOR_H_ diff --git a/payload_generator/inplace_generator_unittest.cc b/payload_generator/inplace_generator_unittest.cc deleted file mode 100644 index ab3b8671..00000000 --- a/payload_generator/inplace_generator_unittest.cc +++ /dev/null @@ -1,752 +0,0 @@ -// -// Copyright (C) 2015 The Android Open Source Project -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -#include "update_engine/payload_generator/inplace_generator.h" - -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -#include "update_engine/common/test_utils.h" -#include "update_engine/common/utils.h" -#include "update_engine/payload_generator/cycle_breaker.h" -#include "update_engine/payload_generator/delta_diff_generator.h" -#include "update_engine/payload_generator/delta_diff_utils.h" -#include "update_engine/payload_generator/extent_ranges.h" -#include "update_engine/payload_generator/graph_types.h" -#include "update_engine/payload_generator/graph_utils.h" - -using std::map; -using std::set; -using std::string; -using std::stringstream; -using std::vector; - -namespace chromeos_update_engine { - -using Block = InplaceGenerator::Block; - -namespace { - -void GenVertex(Vertex* out, - const vector& src_extents, - const vector& dst_extents, - const string& path, - InstallOperation_Type type) { - out->aop.op.set_type(type); - out->aop.name = path; - StoreExtents(src_extents, out->aop.op.mutable_src_extents()); - StoreExtents(dst_extents, out->aop.op.mutable_dst_extents()); -} - -vector VectOfExt(uint64_t start_block, uint64_t num_blocks) { - return vector(1, ExtentForRange(start_block, num_blocks)); -} - -EdgeProperties EdgeWithReadDep(const vector& extents) { - EdgeProperties ret; - ret.extents = extents; - return ret; -} - -EdgeProperties EdgeWithWriteDep(const vector& extents) { - EdgeProperties ret; - ret.write_extents = extents; - return ret; -} - -template -void DumpVect(const vector& vect) { - stringstream ss(stringstream::out); - for (typename vector::const_iterator it = vect.begin(), e = vect.end(); - it != e; - ++it) { - ss << *it << ", "; - } - LOG(INFO) << "{" << ss.str() << "}"; -} - -void AppendExtent(vector* vect, uint64_t start, uint64_t length) { - vect->resize(vect->size() + 1); - vect->back().set_start_block(start); - vect->back().set_num_blocks(length); -} - -void OpAppendExtent(InstallOperation* op, uint64_t start, uint64_t length) { - Extent* extent = op->add_src_extents(); - extent->set_start_block(start); - extent->set_num_blocks(length); -} - -} // namespace - -class InplaceGeneratorTest : public ::testing::Test { - protected: - // Initialize |blob_path_|, |blob_file_size_| and |blob_file_fd_| variables - // with a new blob file. The file is closed and removed automatically when - // the test finishes. - void CreateBlobFile() { - // blob_fd_closer_ takes a pointer to blob_fd_. Make sure we destroy a - // previous instance before overriding blob_fd_. - blob_fd_closer_.reset(); - EXPECT_TRUE(utils::MakeTempFile( - "InplaceGenerator_blob_file.XXXXXX", &blob_path_, &blob_fd_)); - blob_path_unlinker_.reset(new ScopedPathUnlinker(blob_path_)); - blob_fd_closer_.reset(new ScopedFdCloser(&blob_fd_)); - blob_file_size_ = 0; - EXPECT_GE(blob_fd_, 0); - blob_file_.reset(new BlobFileWriter(blob_fd_, &blob_file_size_)); - } - - // Dump the list of operations |aops| in case of test failure. - void DumpAopsOnFailure(const vector& aops) { - if (HasNonfatalFailure()) { - LOG(INFO) << "Result operation list:"; - for (const auto& aop : aops) { - LOG(INFO) << aop; - } - } - } - - // Blob file name, file descriptor and file size used to store operation - // blobs. - string blob_path_; - int blob_fd_{-1}; - off_t blob_file_size_{0}; - std::unique_ptr blob_file_; - std::unique_ptr blob_path_unlinker_; - std::unique_ptr blob_fd_closer_; -}; - -TEST_F(InplaceGeneratorTest, BlockDefaultValues) { - // Tests that a Block is initialized with the default values as a - // Vertex::kInvalidIndex. This is required by the delta generators. - Block block; - EXPECT_EQ(Vertex::kInvalidIndex, block.reader); - EXPECT_EQ(Vertex::kInvalidIndex, block.writer); -} - -TEST_F(InplaceGeneratorTest, SubstituteBlocksTest) { - vector remove_blocks; - AppendExtent(&remove_blocks, 3, 3); - AppendExtent(&remove_blocks, 7, 1); - vector replace_blocks; - AppendExtent(&replace_blocks, 10, 2); - AppendExtent(&replace_blocks, 13, 2); - Vertex vertex; - InstallOperation& op = vertex.aop.op; - OpAppendExtent(&op, 4, 3); - OpAppendExtent(&op, kSparseHole, 4); // Sparse hole in file - OpAppendExtent(&op, 3, 1); - OpAppendExtent(&op, 7, 3); - - InplaceGenerator::SubstituteBlocks(&vertex, remove_blocks, replace_blocks); - - EXPECT_EQ(7, op.src_extents_size()); - EXPECT_EQ(11U, op.src_extents(0).start_block()); - EXPECT_EQ(1U, op.src_extents(0).num_blocks()); - EXPECT_EQ(13U, op.src_extents(1).start_block()); - EXPECT_EQ(1U, op.src_extents(1).num_blocks()); - EXPECT_EQ(6U, op.src_extents(2).start_block()); - EXPECT_EQ(1U, op.src_extents(2).num_blocks()); - EXPECT_EQ(kSparseHole, op.src_extents(3).start_block()); - EXPECT_EQ(4U, op.src_extents(3).num_blocks()); - EXPECT_EQ(10U, op.src_extents(4).start_block()); - EXPECT_EQ(1U, op.src_extents(4).num_blocks()); - EXPECT_EQ(14U, op.src_extents(5).start_block()); - EXPECT_EQ(1U, op.src_extents(5).num_blocks()); - EXPECT_EQ(8U, op.src_extents(6).start_block()); - EXPECT_EQ(2U, op.src_extents(6).num_blocks()); -} - -TEST_F(InplaceGeneratorTest, CutEdgesTest) { - Graph graph; - vector blocks(9); - - // Create nodes in graph - { - graph.resize(graph.size() + 1); - graph.back().aop.op.set_type(InstallOperation::MOVE); - // Reads from blocks 3, 5, 7 - vector extents; - AppendBlockToExtents(&extents, 3); - AppendBlockToExtents(&extents, 5); - AppendBlockToExtents(&extents, 7); - StoreExtents(extents, graph.back().aop.op.mutable_src_extents()); - blocks[3].reader = graph.size() - 1; - blocks[5].reader = graph.size() - 1; - blocks[7].reader = graph.size() - 1; - - // Writes to blocks 1, 2, 4 - extents.clear(); - AppendBlockToExtents(&extents, 1); - AppendBlockToExtents(&extents, 2); - AppendBlockToExtents(&extents, 4); - StoreExtents(extents, graph.back().aop.op.mutable_dst_extents()); - blocks[1].writer = graph.size() - 1; - blocks[2].writer = graph.size() - 1; - blocks[4].writer = graph.size() - 1; - } - { - graph.resize(graph.size() + 1); - graph.back().aop.op.set_type(InstallOperation::MOVE); - // Reads from blocks 1, 2, 4 - vector extents; - AppendBlockToExtents(&extents, 1); - AppendBlockToExtents(&extents, 2); - AppendBlockToExtents(&extents, 4); - StoreExtents(extents, graph.back().aop.op.mutable_src_extents()); - blocks[1].reader = graph.size() - 1; - blocks[2].reader = graph.size() - 1; - blocks[4].reader = graph.size() - 1; - - // Writes to blocks 3, 5, 6 - extents.clear(); - AppendBlockToExtents(&extents, 3); - AppendBlockToExtents(&extents, 5); - AppendBlockToExtents(&extents, 6); - StoreExtents(extents, graph.back().aop.op.mutable_dst_extents()); - blocks[3].writer = graph.size() - 1; - blocks[5].writer = graph.size() - 1; - blocks[6].writer = graph.size() - 1; - } - - // Create edges - InplaceGenerator::CreateEdges(&graph, blocks); - - // Find cycles - CycleBreaker cycle_breaker; - set cut_edges; - cycle_breaker.BreakCycles(graph, &cut_edges); - - EXPECT_EQ(1U, cut_edges.size()); - EXPECT_TRUE(cut_edges.end() != - cut_edges.find(std::pair(1, 0))); - - vector cuts; - EXPECT_TRUE(InplaceGenerator::CutEdges(&graph, cut_edges, &cuts)); - - EXPECT_EQ(3U, graph.size()); - - // Check new node in graph: - EXPECT_EQ(InstallOperation::MOVE, graph.back().aop.op.type()); - EXPECT_EQ(2, graph.back().aop.op.src_extents_size()); - EXPECT_EQ(1, graph.back().aop.op.dst_extents_size()); - EXPECT_EQ(kTempBlockStart, graph.back().aop.op.dst_extents(0).start_block()); - EXPECT_EQ(2U, graph.back().aop.op.dst_extents(0).num_blocks()); - EXPECT_TRUE(graph.back().out_edges.empty()); - - // Check that old node reads from new blocks - EXPECT_EQ(2, graph[0].aop.op.src_extents_size()); - EXPECT_EQ(kTempBlockStart, graph[0].aop.op.src_extents(0).start_block()); - EXPECT_EQ(2U, graph[0].aop.op.src_extents(0).num_blocks()); - EXPECT_EQ(7U, graph[0].aop.op.src_extents(1).start_block()); - EXPECT_EQ(1U, graph[0].aop.op.src_extents(1).num_blocks()); - - // And that the old dst extents haven't changed - EXPECT_EQ(2, graph[0].aop.op.dst_extents_size()); - EXPECT_EQ(1U, graph[0].aop.op.dst_extents(0).start_block()); - EXPECT_EQ(2U, graph[0].aop.op.dst_extents(0).num_blocks()); - EXPECT_EQ(4U, graph[0].aop.op.dst_extents(1).start_block()); - EXPECT_EQ(1U, graph[0].aop.op.dst_extents(1).num_blocks()); - - // Ensure it only depends on the next node and the new temp node - EXPECT_EQ(2U, graph[0].out_edges.size()); - EXPECT_TRUE(graph[0].out_edges.end() != graph[0].out_edges.find(1)); - EXPECT_TRUE(graph[0].out_edges.end() != - graph[0].out_edges.find(graph.size() - 1)); - - // Check second node has unchanged extents - EXPECT_EQ(2, graph[1].aop.op.src_extents_size()); - EXPECT_EQ(1U, graph[1].aop.op.src_extents(0).start_block()); - EXPECT_EQ(2U, graph[1].aop.op.src_extents(0).num_blocks()); - EXPECT_EQ(4U, graph[1].aop.op.src_extents(1).start_block()); - EXPECT_EQ(1U, graph[1].aop.op.src_extents(1).num_blocks()); - - EXPECT_EQ(2, graph[1].aop.op.dst_extents_size()); - EXPECT_EQ(3U, graph[1].aop.op.dst_extents(0).start_block()); - EXPECT_EQ(1U, graph[1].aop.op.dst_extents(0).num_blocks()); - EXPECT_EQ(5U, graph[1].aop.op.dst_extents(1).start_block()); - EXPECT_EQ(2U, graph[1].aop.op.dst_extents(1).num_blocks()); - - // Ensure it only depends on the next node - EXPECT_EQ(1U, graph[1].out_edges.size()); - EXPECT_TRUE(graph[1].out_edges.end() != graph[1].out_edges.find(2)); -} - -TEST_F(InplaceGeneratorTest, AssignTempBlocksReuseTest) { - Graph graph(9); - - const vector empt; - uint64_t tmp = kTempBlockStart; - const string kFilename = "/foo"; - - vector cuts; - cuts.resize(3); - - // Simple broken loop: - GenVertex( - &graph[0], VectOfExt(0, 1), VectOfExt(1, 1), "", InstallOperation::MOVE); - GenVertex(&graph[1], - VectOfExt(tmp, 1), - VectOfExt(0, 1), - "", - InstallOperation::MOVE); - GenVertex(&graph[2], - VectOfExt(1, 1), - VectOfExt(tmp, 1), - "", - InstallOperation::MOVE); - // Corresponding edges: - graph[0].out_edges[2] = EdgeWithReadDep(VectOfExt(1, 1)); - graph[1].out_edges[2] = EdgeWithWriteDep(VectOfExt(tmp, 1)); - graph[1].out_edges[0] = EdgeWithReadDep(VectOfExt(0, 1)); - // Store the cut: - cuts[0].old_dst = 1; - cuts[0].old_src = 0; - cuts[0].new_vertex = 2; - cuts[0].tmp_extents = VectOfExt(tmp, 1); - tmp++; - - // Slightly more complex pair of loops: - GenVertex( - &graph[3], VectOfExt(4, 2), VectOfExt(2, 2), "", InstallOperation::MOVE); - GenVertex( - &graph[4], VectOfExt(6, 1), VectOfExt(7, 1), "", InstallOperation::MOVE); - GenVertex(&graph[5], - VectOfExt(tmp, 3), - VectOfExt(4, 3), - kFilename, - InstallOperation::MOVE); - GenVertex(&graph[6], - VectOfExt(2, 2), - VectOfExt(tmp, 2), - "", - InstallOperation::MOVE); - GenVertex(&graph[7], - VectOfExt(7, 1), - VectOfExt(tmp + 2, 1), - "", - InstallOperation::MOVE); - // Corresponding edges: - graph[3].out_edges[6] = EdgeWithReadDep(VectOfExt(2, 2)); - graph[4].out_edges[7] = EdgeWithReadDep(VectOfExt(7, 1)); - graph[5].out_edges[6] = EdgeWithWriteDep(VectOfExt(tmp, 2)); - graph[5].out_edges[7] = EdgeWithWriteDep(VectOfExt(tmp + 2, 1)); - graph[5].out_edges[3] = EdgeWithReadDep(VectOfExt(4, 2)); - graph[5].out_edges[4] = EdgeWithReadDep(VectOfExt(6, 1)); - // Store the cuts: - cuts[1].old_dst = 5; - cuts[1].old_src = 3; - cuts[1].new_vertex = 6; - cuts[1].tmp_extents = VectOfExt(tmp, 2); - cuts[2].old_dst = 5; - cuts[2].old_src = 4; - cuts[2].new_vertex = 7; - cuts[2].tmp_extents = VectOfExt(tmp + 2, 1); - - // Supplier of temp block: - GenVertex(&graph[8], empt, VectOfExt(8, 1), "", InstallOperation::REPLACE); - - // Specify the final order: - vector op_indexes; - op_indexes.push_back(2); - op_indexes.push_back(0); - op_indexes.push_back(1); - op_indexes.push_back(6); - op_indexes.push_back(3); - op_indexes.push_back(7); - op_indexes.push_back(4); - op_indexes.push_back(5); - op_indexes.push_back(8); - - vector::size_type> reverse_op_indexes; - InplaceGenerator::GenerateReverseTopoOrderMap(op_indexes, - &reverse_op_indexes); - - CreateBlobFile(); - EXPECT_TRUE(InplaceGenerator::AssignTempBlocks(&graph, - "/dev/zero", - blob_file_.get(), - &op_indexes, - &reverse_op_indexes, - cuts)); - EXPECT_FALSE(graph[6].valid); - EXPECT_FALSE(graph[7].valid); - EXPECT_EQ(1, graph[1].aop.op.src_extents_size()); - EXPECT_EQ(2U, graph[1].aop.op.src_extents(0).start_block()); - EXPECT_EQ(1U, graph[1].aop.op.src_extents(0).num_blocks()); - EXPECT_EQ(InstallOperation::REPLACE_BZ, graph[5].aop.op.type()); -} - -TEST_F(InplaceGeneratorTest, MoveAndSortFullOpsToBackTest) { - Graph graph(4); - graph[0].aop.name = "A"; - graph[0].aop.op.set_type(InstallOperation::REPLACE); - graph[1].aop.name = "B"; - graph[1].aop.op.set_type(InstallOperation::BSDIFF); - graph[2].aop.name = "C"; - graph[2].aop.op.set_type(InstallOperation::REPLACE_BZ); - graph[3].aop.name = "D"; - graph[3].aop.op.set_type(InstallOperation::MOVE); - - vector vect(graph.size()); - - for (vector::size_type i = 0; i < vect.size(); ++i) { - vect[i] = i; - } - InplaceGenerator::MoveAndSortFullOpsToBack(&graph, &vect); - EXPECT_EQ(vect.size(), graph.size()); - EXPECT_EQ(graph[vect[0]].aop.name, "B"); - EXPECT_EQ(graph[vect[1]].aop.name, "D"); - EXPECT_EQ(graph[vect[2]].aop.name, "A"); - EXPECT_EQ(graph[vect[3]].aop.name, "C"); -} - -TEST_F(InplaceGeneratorTest, AssignTempBlocksTest) { - Graph graph(9); - const vector empt; // empty - const string kFilename = "/foo"; - - // Some scratch space: - GenVertex(&graph[0], empt, VectOfExt(200, 1), "", InstallOperation::REPLACE); - GenVertex(&graph[1], empt, VectOfExt(210, 10), "", InstallOperation::REPLACE); - GenVertex(&graph[2], empt, VectOfExt(220, 1), "", InstallOperation::REPLACE); - - // A cycle that requires 10 blocks to break: - GenVertex(&graph[3], - VectOfExt(10, 11), - VectOfExt(0, 9), - "", - InstallOperation::BSDIFF); - graph[3].out_edges[4] = EdgeWithReadDep(VectOfExt(0, 9)); - GenVertex(&graph[4], - VectOfExt(0, 9), - VectOfExt(10, 11), - "", - InstallOperation::BSDIFF); - graph[4].out_edges[3] = EdgeWithReadDep(VectOfExt(10, 11)); - - // A cycle that requires 9 blocks to break: - GenVertex(&graph[5], - VectOfExt(40, 11), - VectOfExt(30, 10), - "", - InstallOperation::BSDIFF); - graph[5].out_edges[6] = EdgeWithReadDep(VectOfExt(30, 10)); - GenVertex(&graph[6], - VectOfExt(30, 10), - VectOfExt(40, 11), - "", - InstallOperation::BSDIFF); - graph[6].out_edges[5] = EdgeWithReadDep(VectOfExt(40, 11)); - - // A cycle that requires 40 blocks to break (which is too many): - GenVertex(&graph[7], - VectOfExt(120, 50), - VectOfExt(60, 40), - "", - InstallOperation::BSDIFF); - graph[7].out_edges[8] = EdgeWithReadDep(VectOfExt(60, 40)); - GenVertex(&graph[8], - VectOfExt(60, 40), - VectOfExt(120, 50), - kFilename, - InstallOperation::BSDIFF); - graph[8].out_edges[7] = EdgeWithReadDep(VectOfExt(120, 50)); - - graph_utils::DumpGraph(graph); - - vector final_order; - - CreateBlobFile(); - EXPECT_TRUE(InplaceGenerator::ConvertGraphToDag(&graph, - "/dev/zero", - blob_file_.get(), - &final_order, - Vertex::kInvalidIndex)); - - Graph expected_graph(12); - GenVertex(&expected_graph[0], - empt, - VectOfExt(200, 1), - "", - InstallOperation::REPLACE); - GenVertex(&expected_graph[1], - empt, - VectOfExt(210, 10), - "", - InstallOperation::REPLACE); - GenVertex(&expected_graph[2], - empt, - VectOfExt(220, 1), - "", - InstallOperation::REPLACE); - GenVertex(&expected_graph[3], - VectOfExt(10, 11), - VectOfExt(0, 9), - "", - InstallOperation::BSDIFF); - expected_graph[3].out_edges[9] = EdgeWithReadDep(VectOfExt(0, 9)); - GenVertex(&expected_graph[4], - VectOfExt(60, 9), - VectOfExt(10, 11), - "", - InstallOperation::BSDIFF); - expected_graph[4].out_edges[3] = EdgeWithReadDep(VectOfExt(10, 11)); - expected_graph[4].out_edges[9] = EdgeWithWriteDep(VectOfExt(60, 9)); - GenVertex(&expected_graph[5], - VectOfExt(40, 11), - VectOfExt(30, 10), - "", - InstallOperation::BSDIFF); - expected_graph[5].out_edges[10] = EdgeWithReadDep(VectOfExt(30, 10)); - - GenVertex(&expected_graph[6], - VectOfExt(60, 10), - VectOfExt(40, 11), - "", - InstallOperation::BSDIFF); - expected_graph[6].out_edges[5] = EdgeWithReadDep(VectOfExt(40, 11)); - expected_graph[6].out_edges[10] = EdgeWithWriteDep(VectOfExt(60, 10)); - - GenVertex(&expected_graph[7], - VectOfExt(120, 50), - VectOfExt(60, 40), - "", - InstallOperation::BSDIFF); - expected_graph[7].out_edges[6] = EdgeWithReadDep(VectOfExt(60, 10)); - - GenVertex(&expected_graph[8], - empt, - VectOfExt(0, 50), - "/foo", - InstallOperation::REPLACE_BZ); - expected_graph[8].out_edges[7] = EdgeWithReadDep(VectOfExt(120, 50)); - - GenVertex(&expected_graph[9], - VectOfExt(0, 9), - VectOfExt(60, 9), - "", - InstallOperation::MOVE); - - GenVertex(&expected_graph[10], - VectOfExt(30, 10), - VectOfExt(60, 10), - "", - InstallOperation::MOVE); - expected_graph[10].out_edges[4] = EdgeWithReadDep(VectOfExt(60, 9)); - - EXPECT_EQ(12U, graph.size()); - EXPECT_FALSE(graph.back().valid); - for (Graph::size_type i = 0; i < graph.size() - 1; i++) { - EXPECT_TRUE(graph[i].out_edges == expected_graph[i].out_edges); - if (i == 8) { - // special case - } else { - // EXPECT_TRUE(graph[i] == expected_graph[i]) << "i = " << i; - } - } -} - -TEST_F(InplaceGeneratorTest, CreateScratchNodeTest) { - Vertex vertex; - InplaceGenerator::CreateScratchNode(12, 34, &vertex); - EXPECT_EQ(InstallOperation::REPLACE_BZ, vertex.aop.op.type()); - EXPECT_EQ(0U, vertex.aop.op.data_offset()); - EXPECT_EQ(0U, vertex.aop.op.data_length()); - EXPECT_EQ(1, vertex.aop.op.dst_extents_size()); - EXPECT_EQ(12U, vertex.aop.op.dst_extents(0).start_block()); - EXPECT_EQ(34U, vertex.aop.op.dst_extents(0).num_blocks()); -} - -TEST_F(InplaceGeneratorTest, ApplyMapTest) { - vector collection = {1, 2, 3, 4, 6}; - vector expected_values = {1, 2, 5, 4, 8}; - map value_map; - value_map[3] = 5; - value_map[6] = 8; - value_map[5] = 10; - - InplaceGenerator::ApplyMap(&collection, value_map); - EXPECT_EQ(expected_values, collection); -} - -// We can't produce MOVE operations with a source or destination in the block 0. -// This test checks that the cycle breaker procedure doesn't produce such -// operations. -TEST_F(InplaceGeneratorTest, ResolveReadAfterWriteDependenciesAvoidMoveToZero) { - size_t block_size = 4096; - size_t num_blocks = 4; - vector aops; - - // Create a REPLACE_BZ for block 0, and a circular dependency among all other - // blocks. This situation would prefer to issue a MOVE to scratch space and - // the only available block is 0. - aops.emplace_back(); - aops.back().name = base::StringPrintf(""); - aops.back().op.set_type(InstallOperation::REPLACE_BZ); - StoreExtents({ExtentForRange(0, 1)}, aops.back().op.mutable_dst_extents()); - - for (size_t i = 1; i < num_blocks; i++) { - AnnotatedOperation aop; - aop.name = base::StringPrintf("", i); - aop.op.set_type(InstallOperation::BSDIFF); - StoreExtents({ExtentForRange(1 + i % (num_blocks - 1), 1)}, - aop.op.mutable_src_extents()); - StoreExtents({ExtentForRange(i, 1)}, aop.op.mutable_dst_extents()); - aops.push_back(aop); - } - - PartitionConfig part("part"); - part.path = "/dev/zero"; - part.size = num_blocks * block_size; - - CreateBlobFile(); - - // We ran two tests here. The first one without enough blocks for the scratch - // space, forcing it to create a new full operation and the second case with - // one extra block in the partition that can be used for the move operation. - for (const auto part_blocks : vector{num_blocks, num_blocks + 1}) { - SCOPED_TRACE( - base::StringPrintf("Using partition_blocks=%" PRIu64, part_blocks)); - vector result_aops = aops; - EXPECT_TRUE(InplaceGenerator::ResolveReadAfterWriteDependencies( - part, - part, - part_blocks * block_size, - block_size, - blob_file_.get(), - &result_aops)); - - size_t full_ops = 0; - for (const auto& aop : result_aops) { - if (diff_utils::IsAReplaceOperation(aop.op.type())) - full_ops++; - - if (aop.op.type() != InstallOperation::MOVE) - continue; - for (const Extent& extent : aop.op.src_extents()) { - EXPECT_NE(0U, extent.start_block()) - << "On src extents for aop: " << aop; - } - for (const Extent& extent : aop.op.dst_extents()) { - EXPECT_NE(0U, extent.start_block()) - << "On dst extents for aop: " << aop; - } - } - - // If there's extra space in the partition, it should not use a new full - // operation for it. - EXPECT_EQ(part_blocks == num_blocks ? 2U : 1U, full_ops); - - DumpAopsOnFailure(result_aops); - } -} - -// Test that we can shrink a filesystem and break cycles. -TEST_F(InplaceGeneratorTest, ResolveReadAfterWriteDependenciesShrinkData) { - size_t block_size = 4096; - size_t old_blocks = 10; - size_t new_blocks = 8; - vector aops; - - // Create a loop using the blocks 1-6 and one other operation writing to the - // block 7 from outside the new partition. The loop in the blocks 1-6 uses - // two-block operations, so it needs two blocks of scratch space. It can't use - // the block 0 as scratch space (see previous test) and it can't use the - // blocks 7 or 8 due the last move operation. - - aops.emplace_back(); - aops.back().name = base::StringPrintf(""); - aops.back().op.set_type(InstallOperation::REPLACE_BZ); - StoreExtents({ExtentForRange(0, 1)}, aops.back().op.mutable_dst_extents()); - - const size_t num_ops = 3; - for (size_t i = 0; i < num_ops; i++) { - AnnotatedOperation aop; - aop.name = base::StringPrintf("", i); - aop.op.set_type(InstallOperation::BSDIFF); - StoreExtents({ExtentForRange(1 + 2 * i, 2)}, aop.op.mutable_src_extents()); - StoreExtents({ExtentForRange(1 + 2 * ((i + 1) % num_ops), 2)}, - aop.op.mutable_dst_extents()); - aops.push_back(aop); - } - - { - AnnotatedOperation aop; - aop.name = ""; - aop.op.set_type(InstallOperation::BSDIFF); - StoreExtents({ExtentForRange(8, 1)}, aop.op.mutable_src_extents()); - StoreExtents({ExtentForRange(7, 1)}, aop.op.mutable_dst_extents()); - aops.push_back(aop); - } - - PartitionConfig old_part("part"); - old_part.path = "/dev/zero"; - old_part.size = old_blocks * block_size; - - PartitionConfig new_part("part"); - new_part.path = "/dev/zero"; - new_part.size = new_blocks * block_size; - - CreateBlobFile(); - - EXPECT_TRUE(InplaceGenerator::ResolveReadAfterWriteDependencies( - old_part, - new_part, - (old_blocks + 2) * block_size, // enough scratch space. - block_size, - blob_file_.get(), - &aops)); - - size_t full_ops = 0; - for (const auto& aop : aops) { - if (diff_utils::IsAReplaceOperation(aop.op.type())) - full_ops++; - } - // There should be only one REPLACE* operation, the one we added for block 0. - EXPECT_EQ(1U, full_ops); - - // There should be only one MOVE operation, the one used to break the loop - // which should write to scratch space past the block 7 (the last block of the - // new partition) which is being written later. - size_t move_ops = 0; - for (const auto& aop : aops) { - if (aop.op.type() == InstallOperation::MOVE) { - move_ops++; - for (const Extent& extent : aop.op.dst_extents()) { - EXPECT_LE(7U, extent.start_block()) - << "On dst extents for aop: " << aop; - } - } - } - EXPECT_EQ(1U, move_ops); - - DumpAopsOnFailure(aops); -} - -} // namespace chromeos_update_engine diff --git a/payload_generator/payload_generation_config.cc b/payload_generator/payload_generation_config.cc index c364797c..3b791c82 100644 --- a/payload_generator/payload_generation_config.cc +++ b/payload_generator/payload_generation_config.cc @@ -222,7 +222,6 @@ bool PayloadVersion::Validate() const { TEST_AND_RETURN_FALSE(major == kChromeOSMajorPayloadVersion || major == kBrilloMajorPayloadVersion); TEST_AND_RETURN_FALSE(minor == kFullPayloadMinorVersion || - minor == kInPlaceMinorPayloadVersion || minor == kSourceMinorPayloadVersion || minor == kOpSrcHashMinorPayloadVersion || minor == kBrotliBsdiffMinorPayloadVersion || @@ -252,14 +251,6 @@ bool PayloadVersion::OperationAllowed(InstallOperation_Type operation) const { // them for delta payloads for now. return minor >= kBrotliBsdiffMinorPayloadVersion; - // Delta operations: - case InstallOperation::MOVE: - case InstallOperation::BSDIFF: - // MOVE and BSDIFF were replaced by SOURCE_COPY and SOURCE_BSDIFF and - // should not be used in newer delta versions, since the idempotent checks - // were removed. - return minor == kInPlaceMinorPayloadVersion; - case InstallOperation::SOURCE_COPY: case InstallOperation::SOURCE_BSDIFF: return minor >= kSourceMinorPayloadVersion; @@ -269,6 +260,10 @@ bool PayloadVersion::OperationAllowed(InstallOperation_Type operation) const { case InstallOperation::PUFFDIFF: return minor >= kPuffdiffMinorPayloadVersion; + + case InstallOperation::MOVE: + case InstallOperation::BSDIFF: + NOTREACHED(); } return false; } @@ -277,10 +272,6 @@ bool PayloadVersion::IsDelta() const { return minor != kFullPayloadMinorVersion; } -bool PayloadVersion::InplaceUpdate() const { - return minor == kInPlaceMinorPayloadVersion; -} - bool PayloadGenerationConfig::Validate() const { TEST_AND_RETURN_FALSE(version.Validate()); TEST_AND_RETURN_FALSE(version.IsDelta() == is_delta); @@ -307,9 +298,6 @@ bool PayloadGenerationConfig::Validate() const { for (const PartitionConfig& part : target.partitions) { TEST_AND_RETURN_FALSE(part.ValidateExists()); TEST_AND_RETURN_FALSE(part.size % block_size == 0); - if (version.minor == kInPlaceMinorPayloadVersion && - part.name == kPartitionNameRoot) - TEST_AND_RETURN_FALSE(rootfs_partition_size >= part.size); if (version.major == kChromeOSMajorPayloadVersion) TEST_AND_RETURN_FALSE(part.postinstall.IsEmpty()); if (version.minor < kVerityMinorPayloadVersion) diff --git a/payload_generator/payload_generation_config.h b/payload_generator/payload_generation_config.h index 2153ab07..32f12292 100644 --- a/payload_generator/payload_generation_config.h +++ b/payload_generator/payload_generation_config.h @@ -170,10 +170,6 @@ struct PayloadVersion { // Whether this payload version is a delta payload. bool IsDelta() const; - // Tells whether the update is done in-place, that is, whether the operations - // read and write from the same partition. - bool InplaceUpdate() const; - // The major version of the payload. uint64_t major; diff --git a/payload_generator/tarjan.cc b/payload_generator/tarjan.cc deleted file mode 100644 index 2d4ca316..00000000 --- a/payload_generator/tarjan.cc +++ /dev/null @@ -1,83 +0,0 @@ -// -// Copyright (C) 2010 The Android Open Source Project -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -#include "update_engine/payload_generator/tarjan.h" - -#include -#include - -#include -#include - -using std::min; -using std::vector; - -namespace chromeos_update_engine { - -namespace { -const vector::size_type kInvalidIndex = -1; -} - -void TarjanAlgorithm::Execute(Vertex::Index vertex, - Graph* graph, - vector* out) { - stack_.clear(); - components_.clear(); - index_ = 0; - for (Graph::iterator it = graph->begin(); it != graph->end(); ++it) - it->index = it->lowlink = kInvalidIndex; - required_vertex_ = vertex; - - Tarjan(vertex, graph); - if (!components_.empty()) - out->swap(components_[0]); -} - -void TarjanAlgorithm::Tarjan(Vertex::Index vertex, Graph* graph) { - CHECK_EQ((*graph)[vertex].index, kInvalidIndex); - (*graph)[vertex].index = index_; - (*graph)[vertex].lowlink = index_; - index_++; - stack_.push_back(vertex); - for (Vertex::EdgeMap::iterator it = (*graph)[vertex].out_edges.begin(); - it != (*graph)[vertex].out_edges.end(); - ++it) { - Vertex::Index vertex_next = it->first; - if ((*graph)[vertex_next].index == kInvalidIndex) { - Tarjan(vertex_next, graph); - (*graph)[vertex].lowlink = - min((*graph)[vertex].lowlink, (*graph)[vertex_next].lowlink); - } else if (base::ContainsValue(stack_, vertex_next)) { - (*graph)[vertex].lowlink = - min((*graph)[vertex].lowlink, (*graph)[vertex_next].index); - } - } - if ((*graph)[vertex].lowlink == (*graph)[vertex].index) { - vector component; - Vertex::Index other_vertex; - do { - other_vertex = stack_.back(); - stack_.pop_back(); - component.push_back(other_vertex); - } while (other_vertex != vertex && !stack_.empty()); - - if (base::ContainsValue(component, required_vertex_)) { - components_.resize(components_.size() + 1); - component.swap(components_.back()); - } - } -} - -} // namespace chromeos_update_engine diff --git a/payload_generator/tarjan.h b/payload_generator/tarjan.h deleted file mode 100644 index 39ac4e4e..00000000 --- a/payload_generator/tarjan.h +++ /dev/null @@ -1,53 +0,0 @@ -// -// Copyright (C) 2010 The Android Open Source Project -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -#ifndef UPDATE_ENGINE_PAYLOAD_GENERATOR_TARJAN_H_ -#define UPDATE_ENGINE_PAYLOAD_GENERATOR_TARJAN_H_ - -// This is an implementation of Tarjan's algorithm which finds all -// Strongly Connected Components in a graph. - -// Note: a true Tarjan algorithm would find all strongly connected components -// in the graph. This implementation will only find the strongly connected -// component containing the vertex passed in. - -#include - -#include "update_engine/payload_generator/graph_types.h" - -namespace chromeos_update_engine { - -class TarjanAlgorithm { - public: - TarjanAlgorithm() : index_(0), required_vertex_(0) {} - - // 'out' is set to the result if there is one, otherwise it's untouched. - void Execute(Vertex::Index vertex, - Graph* graph, - std::vector* out); - - private: - void Tarjan(Vertex::Index vertex, Graph* graph); - - Vertex::Index index_; - Vertex::Index required_vertex_; - std::vector stack_; - std::vector> components_; -}; - -} // namespace chromeos_update_engine - -#endif // UPDATE_ENGINE_PAYLOAD_GENERATOR_TARJAN_H_ diff --git a/payload_generator/tarjan_unittest.cc b/payload_generator/tarjan_unittest.cc deleted file mode 100644 index b271227f..00000000 --- a/payload_generator/tarjan_unittest.cc +++ /dev/null @@ -1,94 +0,0 @@ -// -// Copyright (C) 2010 The Android Open Source Project -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -#include "update_engine/payload_generator/tarjan.h" - -#include -#include - -#include -#include -#include - -#include "update_engine/payload_generator/graph_types.h" - -using std::make_pair; -using std::string; -using std::vector; - -namespace chromeos_update_engine { - -class TarjanAlgorithmTest : public ::testing::Test {}; - -TEST(TarjanAlgorithmTest, SimpleTest) { - const Vertex::Index n_a = 0; - const Vertex::Index n_b = 1; - const Vertex::Index n_c = 2; - const Vertex::Index n_d = 3; - const Vertex::Index n_e = 4; - const Vertex::Index n_f = 5; - const Vertex::Index n_g = 6; - const Vertex::Index n_h = 7; - const Graph::size_type kNodeCount = 8; - - Graph graph(kNodeCount); - - graph[n_a].out_edges.insert(make_pair(n_e, EdgeProperties())); - graph[n_a].out_edges.insert(make_pair(n_f, EdgeProperties())); - graph[n_b].out_edges.insert(make_pair(n_a, EdgeProperties())); - graph[n_c].out_edges.insert(make_pair(n_d, EdgeProperties())); - graph[n_d].out_edges.insert(make_pair(n_e, EdgeProperties())); - graph[n_d].out_edges.insert(make_pair(n_f, EdgeProperties())); - graph[n_e].out_edges.insert(make_pair(n_b, EdgeProperties())); - graph[n_e].out_edges.insert(make_pair(n_c, EdgeProperties())); - graph[n_e].out_edges.insert(make_pair(n_f, EdgeProperties())); - graph[n_f].out_edges.insert(make_pair(n_g, EdgeProperties())); - graph[n_g].out_edges.insert(make_pair(n_h, EdgeProperties())); - graph[n_h].out_edges.insert(make_pair(n_g, EdgeProperties())); - - TarjanAlgorithm tarjan; - - for (Vertex::Index i = n_a; i <= n_e; i++) { - vector vertex_indexes; - tarjan.Execute(i, &graph, &vertex_indexes); - - EXPECT_EQ(5U, vertex_indexes.size()); - EXPECT_TRUE(base::ContainsValue(vertex_indexes, n_a)); - EXPECT_TRUE(base::ContainsValue(vertex_indexes, n_b)); - EXPECT_TRUE(base::ContainsValue(vertex_indexes, n_c)); - EXPECT_TRUE(base::ContainsValue(vertex_indexes, n_d)); - EXPECT_TRUE(base::ContainsValue(vertex_indexes, n_e)); - } - - { - vector vertex_indexes; - tarjan.Execute(n_f, &graph, &vertex_indexes); - - EXPECT_EQ(1U, vertex_indexes.size()); - EXPECT_TRUE(base::ContainsValue(vertex_indexes, n_f)); - } - - for (Vertex::Index i = n_g; i <= n_h; i++) { - vector vertex_indexes; - tarjan.Execute(i, &graph, &vertex_indexes); - - EXPECT_EQ(2U, vertex_indexes.size()); - EXPECT_TRUE(base::ContainsValue(vertex_indexes, n_g)); - EXPECT_TRUE(base::ContainsValue(vertex_indexes, n_h)); - } -} - -} // namespace chromeos_update_engine diff --git a/payload_generator/topological_sort.cc b/payload_generator/topological_sort.cc deleted file mode 100644 index 0abd7089..00000000 --- a/payload_generator/topological_sort.cc +++ /dev/null @@ -1,57 +0,0 @@ -// -// Copyright (C) 2010 The Android Open Source Project -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -#include "update_engine/payload_generator/topological_sort.h" - -#include -#include - -#include - -using std::set; -using std::vector; - -namespace chromeos_update_engine { - -namespace { -void TopologicalSortVisit(const Graph& graph, - set* visited_nodes, - vector* nodes, - Vertex::Index node) { - if (visited_nodes->find(node) != visited_nodes->end()) - return; - - visited_nodes->insert(node); - // Visit all children. - for (Vertex::EdgeMap::const_iterator it = graph[node].out_edges.begin(); - it != graph[node].out_edges.end(); - ++it) { - TopologicalSortVisit(graph, visited_nodes, nodes, it->first); - } - // Visit this node. - nodes->push_back(node); -} -} // namespace - -void TopologicalSort(const Graph& graph, vector* out) { - set visited_nodes; - - for (Vertex::Index i = 0; i < graph.size(); i++) { - TopologicalSortVisit(graph, &visited_nodes, out, i); - } -} - -} // namespace chromeos_update_engine diff --git a/payload_generator/topological_sort.h b/payload_generator/topological_sort.h deleted file mode 100644 index 461cbe16..00000000 --- a/payload_generator/topological_sort.h +++ /dev/null @@ -1,42 +0,0 @@ -// -// Copyright (C) 2010 The Android Open Source Project -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -#ifndef UPDATE_ENGINE_PAYLOAD_GENERATOR_TOPOLOGICAL_SORT_H_ -#define UPDATE_ENGINE_PAYLOAD_GENERATOR_TOPOLOGICAL_SORT_H_ - -#include - -#include "update_engine/payload_generator/graph_types.h" - -namespace chromeos_update_engine { - -// Performs a topological sort on the directed graph 'graph' and stores -// the nodes, in order visited, in 'out'. -// For example, this graph: -// A ---> C ----. -// \ v -// `--> B --> D -// Might result in this in 'out': -// out[0] = D -// out[1] = B -// out[2] = C -// out[3] = A -// Note: results are undefined if there is a cycle in the graph. -void TopologicalSort(const Graph& graph, std::vector* out); - -} // namespace chromeos_update_engine - -#endif // UPDATE_ENGINE_PAYLOAD_GENERATOR_TOPOLOGICAL_SORT_H_ diff --git a/payload_generator/topological_sort_unittest.cc b/payload_generator/topological_sort_unittest.cc deleted file mode 100644 index aa296d83..00000000 --- a/payload_generator/topological_sort_unittest.cc +++ /dev/null @@ -1,96 +0,0 @@ -// -// Copyright (C) 2010 The Android Open Source Project -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -#include "update_engine/payload_generator/topological_sort.h" - -#include -#include - -#include - -#include "update_engine/payload_generator/graph_types.h" - -using std::make_pair; -using std::vector; - -namespace chromeos_update_engine { - -class TopologicalSortTest : public ::testing::Test {}; - -namespace { -// Returns true if the value is found in vect. If found, the index is stored -// in out_index if out_index is not null. -template -bool IndexOf(const vector& vect, - const T& value, - typename vector::size_type* out_index) { - for (typename vector::size_type i = 0; i < vect.size(); i++) { - if (vect[i] == value) { - if (out_index) { - *out_index = i; - } - return true; - } - } - return false; -} -} // namespace - -TEST(TopologicalSortTest, SimpleTest) { - int counter = 0; - const Vertex::Index n_a = counter++; - const Vertex::Index n_b = counter++; - const Vertex::Index n_c = counter++; - const Vertex::Index n_d = counter++; - const Vertex::Index n_e = counter++; - const Vertex::Index n_f = counter++; - const Vertex::Index n_g = counter++; - const Vertex::Index n_h = counter++; - const Vertex::Index n_i = counter++; - const Vertex::Index n_j = counter++; - const Graph::size_type kNodeCount = counter++; - - Graph graph(kNodeCount); - - graph[n_i].out_edges.insert(make_pair(n_j, EdgeProperties())); - graph[n_i].out_edges.insert(make_pair(n_c, EdgeProperties())); - graph[n_i].out_edges.insert(make_pair(n_e, EdgeProperties())); - graph[n_i].out_edges.insert(make_pair(n_h, EdgeProperties())); - graph[n_c].out_edges.insert(make_pair(n_b, EdgeProperties())); - graph[n_b].out_edges.insert(make_pair(n_a, EdgeProperties())); - graph[n_e].out_edges.insert(make_pair(n_d, EdgeProperties())); - graph[n_e].out_edges.insert(make_pair(n_g, EdgeProperties())); - graph[n_g].out_edges.insert(make_pair(n_d, EdgeProperties())); - graph[n_g].out_edges.insert(make_pair(n_f, EdgeProperties())); - graph[n_d].out_edges.insert(make_pair(n_a, EdgeProperties())); - - vector sorted; - TopologicalSort(graph, &sorted); - - for (Vertex::Index i = 0; i < graph.size(); i++) { - vector::size_type src_index = 0; - EXPECT_TRUE(IndexOf(sorted, i, &src_index)); - for (Vertex::EdgeMap::const_iterator it = graph[i].out_edges.begin(); - it != graph[i].out_edges.end(); - ++it) { - vector::size_type dst_index = 0; - EXPECT_TRUE(IndexOf(sorted, it->first, &dst_index)); - EXPECT_LT(dst_index, src_index); - } - } -} - -} // namespace chromeos_update_engine diff --git a/scripts/payload_info.py b/scripts/payload_info.py index 09a7cf78..d10cb241 100755 --- a/scripts/payload_info.py +++ b/scripts/payload_info.py @@ -187,10 +187,6 @@ def _GetStats(self, manifest): num_write_seeks += 1 last_ext = curr_ext - if manifest.minor_version == 1: - # Rootfs and kernel are written during the filesystem copy in version 1. - written_blocks += manifest.old_rootfs_info.size / manifest.block_size - written_blocks += manifest.old_kernel_info.size / manifest.block_size # Old and new rootfs and kernel are read once during verification read_blocks += manifest.old_rootfs_info.size / manifest.block_size read_blocks += manifest.old_kernel_info.size / manifest.block_size diff --git a/scripts/update_payload/applier.py b/scripts/update_payload/applier.py index 21d8e875..3f644448 100644 --- a/scripts/update_payload/applier.py +++ b/scripts/update_payload/applier.py @@ -306,30 +306,6 @@ def _ApplyReplaceOperation(self, op, op_name, out_data, part_file, part_size): raise PayloadError('%s: wrote fewer bytes (%d) than expected (%d)' % (op_name, data_start, data_length)) - def _ApplyMoveOperation(self, op, op_name, part_file): - """Applies a MOVE operation. - - Note that this operation must read the whole block data from the input and - only then dump it, due to our in-place update semantics; otherwise, it - might clobber data midway through. - - Args: - op: the operation object - op_name: name string for error reporting - part_file: the partition file object - - Raises: - PayloadError if something goes wrong. - """ - block_size = self.block_size - - # Gather input raw data from src extents. - in_data = _ReadExtents(part_file, op.src_extents, block_size) - - # Dump extracted data to dst extents. - _WriteExtents(part_file, in_data, op.dst_extents, block_size, - '%s.dst_extents' % op_name) - def _ApplyZeroOperation(self, op, op_name, part_file): """Applies a ZERO operation. @@ -439,8 +415,7 @@ def _ApplyDiffOperation(self, op, op_name, patch_data, old_part_file, # Diff from source partition. old_file_name = '/dev/fd/%d' % old_part_file.fileno() - if op.type in (common.OpType.BSDIFF, common.OpType.SOURCE_BSDIFF, - common.OpType.BROTLI_BSDIFF): + if op.type in (common.OpType.SOURCE_BSDIFF, common.OpType.BROTLI_BSDIFF): # Invoke bspatch on partition file with extents args. bspatch_cmd = [self.bspatch_path, old_file_name, new_file_name, patch_file_name, in_extents_arg, out_extents_arg] @@ -477,8 +452,7 @@ def _ApplyDiffOperation(self, op, op_name, patch_data, old_part_file, with tempfile.NamedTemporaryFile(delete=False) as out_file: out_file_name = out_file.name - if op.type in (common.OpType.BSDIFF, common.OpType.SOURCE_BSDIFF, - common.OpType.BROTLI_BSDIFF): + if op.type in (common.OpType.SOURCE_BSDIFF, common.OpType.BROTLI_BSDIFF): # Invoke bspatch. bspatch_cmd = [self.bspatch_path, in_file_name, out_file_name, patch_file_name] @@ -520,10 +494,6 @@ def _ApplyOperations(self, operations, base_name, old_part_file, new_part_file, part_size): """Applies a sequence of update operations to a partition. - This assumes an in-place update semantics for MOVE and BSDIFF, namely all - reads are performed first, then the data is processed and written back to - the same file. - Args: operations: the sequence of operations base_name: the name of the operation sequence @@ -541,13 +511,8 @@ def _ApplyOperations(self, operations, base_name, old_part_file, if op.type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ, common.OpType.REPLACE_XZ): self._ApplyReplaceOperation(op, op_name, data, new_part_file, part_size) - elif op.type == common.OpType.MOVE: - self._ApplyMoveOperation(op, op_name, new_part_file) elif op.type == common.OpType.ZERO: self._ApplyZeroOperation(op, op_name, new_part_file) - elif op.type == common.OpType.BSDIFF: - self._ApplyDiffOperation(op, op_name, data, new_part_file, - new_part_file) elif op.type == common.OpType.SOURCE_COPY: self._ApplySourceCopyOperation(op, op_name, old_part_file, new_part_file) @@ -583,15 +548,8 @@ def _ApplyToPartition(self, operations, part_name, base_name, _VerifySha256(old_part_file, old_part_info.hash, 'old ' + part_name, length=old_part_info.size) new_part_file_mode = 'r+b' - if self.minor_version == common.INPLACE_MINOR_PAYLOAD_VERSION: - # Copy the src partition to the dst one; make sure we don't truncate it. - shutil.copyfile(old_part_file_name, new_part_file_name) - elif self.minor_version >= common.SOURCE_MINOR_PAYLOAD_VERSION: - # In minor version >= 2, we don't want to copy the partitions, so - # instead just make the new partition file. - open(new_part_file_name, 'w').close() - else: - raise PayloadError("Unknown minor version: %d" % self.minor_version) + open(new_part_file_name, 'w').close() + else: # We need to create/truncate the dst partition file. new_part_file_mode = 'w+b' diff --git a/scripts/update_payload/checker.py b/scripts/update_payload/checker.py index e4fec2d9..674d9f4e 100644 --- a/scripts/update_payload/checker.py +++ b/scripts/update_payload/checker.py @@ -66,7 +66,6 @@ # Supported minor version map to payload types allowed to be using them. _SUPPORTED_MINOR_VERSIONS = { 0: (_TYPE_FULL,), - 1: (_TYPE_DELTA,), 2: (_TYPE_DELTA,), 3: (_TYPE_DELTA,), 4: (_TYPE_DELTA,), @@ -74,8 +73,6 @@ 6: (_TYPE_DELTA,), } -_OLD_DELTA_USABLE_PART_SIZE = 2 * 1024 * 1024 * 1024 - # # Helper functions. # @@ -806,89 +803,6 @@ def _CheckReplaceOperation(self, op, data_length, total_dst_blocks, op_name): 'space (%d * %d).' % (op_name, data_length, total_dst_blocks, self.block_size)) - def _CheckMoveOperation(self, op, data_offset, total_src_blocks, - total_dst_blocks, op_name): - """Specific checks for MOVE operations. - - Args: - op: The operation object from the manifest. - data_offset: The offset of a data blob for the operation. - total_src_blocks: Total number of blocks in src_extents. - total_dst_blocks: Total number of blocks in dst_extents. - op_name: Operation name for error reporting. - - Raises: - error.PayloadError if any check fails. - """ - # Check: No data_{offset,length}. - if data_offset is not None: - raise error.PayloadError('%s: contains data_{offset,length}.' % op_name) - - # Check: total_src_blocks == total_dst_blocks. - if total_src_blocks != total_dst_blocks: - raise error.PayloadError( - '%s: total src blocks (%d) != total dst blocks (%d).' % - (op_name, total_src_blocks, total_dst_blocks)) - - # Check: For all i, i-th src block index != i-th dst block index. - i = 0 - src_extent_iter = iter(op.src_extents) - dst_extent_iter = iter(op.dst_extents) - src_extent = dst_extent = None - src_idx = src_num = dst_idx = dst_num = 0 - while i < total_src_blocks: - # Get the next source extent, if needed. - if not src_extent: - try: - src_extent = src_extent_iter.next() - except StopIteration: - raise error.PayloadError('%s: ran out of src extents (%d/%d).' % - (op_name, i, total_src_blocks)) - src_idx = src_extent.start_block - src_num = src_extent.num_blocks - - # Get the next dest extent, if needed. - if not dst_extent: - try: - dst_extent = dst_extent_iter.next() - except StopIteration: - raise error.PayloadError('%s: ran out of dst extents (%d/%d).' % - (op_name, i, total_dst_blocks)) - dst_idx = dst_extent.start_block - dst_num = dst_extent.num_blocks - - # Check: start block is not 0. See crbug/480751; there are still versions - # of update_engine which fail when seeking to 0 in PReadAll and PWriteAll, - # so we need to fail payloads that try to MOVE to/from block 0. - if src_idx == 0 or dst_idx == 0: - raise error.PayloadError( - '%s: MOVE operation cannot have extent with start block 0' % - op_name) - - if self.check_move_same_src_dst_block and src_idx == dst_idx: - raise error.PayloadError( - '%s: src/dst block number %d is the same (%d).' % - (op_name, i, src_idx)) - - advance = min(src_num, dst_num) - i += advance - - src_idx += advance - src_num -= advance - if src_num == 0: - src_extent = None - - dst_idx += advance - dst_num -= advance - if dst_num == 0: - dst_extent = None - - # Make sure we've exhausted all src/dst extents. - if src_extent: - raise error.PayloadError('%s: excess src blocks.' % op_name) - if dst_extent: - raise error.PayloadError('%s: excess dst blocks.' % op_name) - def _CheckZeroOperation(self, op, op_name): """Specific checks for ZERO operations. @@ -908,7 +822,7 @@ def _CheckZeroOperation(self, op, op_name): raise error.PayloadError('%s: contains data_offset.' % op_name) def _CheckAnyDiffOperation(self, op, data_length, total_dst_blocks, op_name): - """Specific checks for BSDIFF, SOURCE_BSDIFF, PUFFDIFF and BROTLI_BSDIFF + """Specific checks for SOURCE_BSDIFF, PUFFDIFF and BROTLI_BSDIFF operations. Args: @@ -933,8 +847,7 @@ def _CheckAnyDiffOperation(self, op, data_length, total_dst_blocks, op_name): total_dst_blocks * self.block_size)) # Check the existence of src_length and dst_length for legacy bsdiffs. - if (op.type == common.OpType.BSDIFF or - (op.type == common.OpType.SOURCE_BSDIFF and self.minor_version <= 3)): + if op.type == common.OpType.SOURCE_BSDIFF and self.minor_version <= 3: if not op.HasField('src_length') or not op.HasField('dst_length'): raise error.PayloadError('%s: require {src,dst}_length.' % op_name) else: @@ -1074,13 +987,8 @@ def _CheckOperation(self, op, op_name, is_last, old_block_counters, (self.minor_version >= 3 or self.major_version >= common.BRILLO_MAJOR_PAYLOAD_VERSION)): self._CheckReplaceOperation(op, data_length, total_dst_blocks, op_name) - elif op.type == common.OpType.MOVE and self.minor_version == 1: - self._CheckMoveOperation(op, data_offset, total_src_blocks, - total_dst_blocks, op_name) elif op.type == common.OpType.ZERO and self.minor_version >= 4: self._CheckZeroOperation(op, op_name) - elif op.type == common.OpType.BSDIFF and self.minor_version == 1: - self._CheckAnyDiffOperation(op, data_length, total_dst_blocks, op_name) elif op.type == common.OpType.SOURCE_COPY and self.minor_version >= 2: self._CheckSourceCopyOperation(data_offset, total_src_blocks, total_dst_blocks, op_name) @@ -1149,9 +1057,7 @@ def _CheckOperations(self, operations, report, base_name, old_fs_size, common.OpType.REPLACE: 0, common.OpType.REPLACE_BZ: 0, common.OpType.REPLACE_XZ: 0, - common.OpType.MOVE: 0, common.OpType.ZERO: 0, - common.OpType.BSDIFF: 0, common.OpType.SOURCE_COPY: 0, common.OpType.SOURCE_BSDIFF: 0, common.OpType.PUFFDIFF: 0, @@ -1162,8 +1068,6 @@ def _CheckOperations(self, operations, report, base_name, old_fs_size, common.OpType.REPLACE: 0, common.OpType.REPLACE_BZ: 0, common.OpType.REPLACE_XZ: 0, - # MOVE operations don't have blobs. - common.OpType.BSDIFF: 0, # SOURCE_COPY operations don't have blobs. common.OpType.SOURCE_BSDIFF: 0, common.OpType.PUFFDIFF: 0, @@ -1374,19 +1278,10 @@ def Run(self, pubkey_file_name=None, metadata_sig_file=None, metadata_size=0, if part_sizes is not None and part_sizes.get(part, None): new_fs_usable_size = old_fs_usable_size = part_sizes[part] - # Infer the usable partition size when validating rootfs operations: - # - If rootfs partition size was provided, use that. - # - Otherwise, if this is an older delta (minor version < 2), stick with - # a known constant size. This is necessary because older deltas may - # exceed the filesystem size when moving data blocks around. - # - Otherwise, use the encoded filesystem size. - elif self.payload_type == _TYPE_DELTA and part == common.ROOTFS and \ - self.minor_version in (None, 1): - new_fs_usable_size = old_fs_usable_size = _OLD_DELTA_USABLE_PART_SIZE - - # TODO(garnold)(chromium:243559) only default to the filesystem size if - # no explicit size provided *and* the partition size is not embedded in - # the payload; see issue for more details. + + # TODO(chromium:243559) only default to the filesystem size if no + # explicit size provided *and* the partition size is not embedded in the + # payload; see issue for more details. total_blob_size += self._CheckOperations( operations, report, '%s_install_operations' % part, self.old_fs_sizes[part], self.new_fs_sizes[part], diff --git a/scripts/update_payload/checker_unittest.py b/scripts/update_payload/checker_unittest.py index 7e52233e..b5f2f3e2 100755 --- a/scripts/update_payload/checker_unittest.py +++ b/scripts/update_payload/checker_unittest.py @@ -44,8 +44,6 @@ def _OpTypeByName(op_name): op_name_to_type = { 'REPLACE': common.OpType.REPLACE, 'REPLACE_BZ': common.OpType.REPLACE_BZ, - 'MOVE': common.OpType.MOVE, - 'BSDIFF': common.OpType.BSDIFF, 'SOURCE_COPY': common.OpType.SOURCE_COPY, 'SOURCE_BSDIFF': common.OpType.SOURCE_BSDIFF, 'ZERO': common.OpType.ZERO, @@ -429,10 +427,10 @@ def DoCheckManifestTest(self, fail_mismatched_block_size, fail_bad_sigs, payload_gen.SetBlockSize(test_utils.KiB(4)) # Add some operations. - payload_gen.AddOperation(False, common.OpType.MOVE, + payload_gen.AddOperation(False, common.OpType.SOURCE_COPY, src_extents=[(0, 16), (16, 497)], dst_extents=[(16, 496), (0, 16)]) - payload_gen.AddOperation(True, common.OpType.MOVE, + payload_gen.AddOperation(True, common.OpType.SOURCE_COPY, src_extents=[(0, 8), (8, 8)], dst_extents=[(8, 8), (0, 8)]) @@ -669,132 +667,6 @@ def testCheckReplaceXzOperation(self): PayloadError, payload_checker._CheckReplaceOperation, op, data_length, (data_length + block_size - 1) / block_size, 'foo') - def testCheckMoveOperation_Pass(self): - """Tests _CheckMoveOperation(); pass case.""" - payload_checker = checker.PayloadChecker(self.MockPayload()) - op = update_metadata_pb2.InstallOperation() - op.type = common.OpType.MOVE - - self.AddToMessage(op.src_extents, - self.NewExtentList((1, 4), (12, 2), (1024, 128))) - self.AddToMessage(op.dst_extents, - self.NewExtentList((16, 128), (512, 6))) - self.assertIsNone( - payload_checker._CheckMoveOperation(op, None, 134, 134, 'foo')) - - def testCheckMoveOperation_FailContainsData(self): - """Tests _CheckMoveOperation(); fails, message contains data.""" - payload_checker = checker.PayloadChecker(self.MockPayload()) - op = update_metadata_pb2.InstallOperation() - op.type = common.OpType.MOVE - - self.AddToMessage(op.src_extents, - self.NewExtentList((1, 4), (12, 2), (1024, 128))) - self.AddToMessage(op.dst_extents, - self.NewExtentList((16, 128), (512, 6))) - self.assertRaises( - PayloadError, payload_checker._CheckMoveOperation, - op, 1024, 134, 134, 'foo') - - def testCheckMoveOperation_FailInsufficientSrcBlocks(self): - """Tests _CheckMoveOperation(); fails, not enough actual src blocks.""" - payload_checker = checker.PayloadChecker(self.MockPayload()) - op = update_metadata_pb2.InstallOperation() - op.type = common.OpType.MOVE - - self.AddToMessage(op.src_extents, - self.NewExtentList((1, 4), (12, 2), (1024, 127))) - self.AddToMessage(op.dst_extents, - self.NewExtentList((16, 128), (512, 6))) - self.assertRaises( - PayloadError, payload_checker._CheckMoveOperation, - op, None, 134, 134, 'foo') - - def testCheckMoveOperation_FailInsufficientDstBlocks(self): - """Tests _CheckMoveOperation(); fails, not enough actual dst blocks.""" - payload_checker = checker.PayloadChecker(self.MockPayload()) - op = update_metadata_pb2.InstallOperation() - op.type = common.OpType.MOVE - - self.AddToMessage(op.src_extents, - self.NewExtentList((1, 4), (12, 2), (1024, 128))) - self.AddToMessage(op.dst_extents, - self.NewExtentList((16, 128), (512, 5))) - self.assertRaises( - PayloadError, payload_checker._CheckMoveOperation, - op, None, 134, 134, 'foo') - - def testCheckMoveOperation_FailExcessSrcBlocks(self): - """Tests _CheckMoveOperation(); fails, too many actual src blocks.""" - payload_checker = checker.PayloadChecker(self.MockPayload()) - op = update_metadata_pb2.InstallOperation() - op.type = common.OpType.MOVE - - self.AddToMessage(op.src_extents, - self.NewExtentList((1, 4), (12, 2), (1024, 128))) - self.AddToMessage(op.dst_extents, - self.NewExtentList((16, 128), (512, 5))) - self.assertRaises( - PayloadError, payload_checker._CheckMoveOperation, - op, None, 134, 134, 'foo') - self.AddToMessage(op.src_extents, - self.NewExtentList((1, 4), (12, 2), (1024, 129))) - self.AddToMessage(op.dst_extents, - self.NewExtentList((16, 128), (512, 6))) - self.assertRaises( - PayloadError, payload_checker._CheckMoveOperation, - op, None, 134, 134, 'foo') - - def testCheckMoveOperation_FailExcessDstBlocks(self): - """Tests _CheckMoveOperation(); fails, too many actual dst blocks.""" - payload_checker = checker.PayloadChecker(self.MockPayload()) - op = update_metadata_pb2.InstallOperation() - op.type = common.OpType.MOVE - - self.AddToMessage(op.src_extents, - self.NewExtentList((1, 4), (12, 2), (1024, 128))) - self.AddToMessage(op.dst_extents, - self.NewExtentList((16, 128), (512, 7))) - self.assertRaises( - PayloadError, payload_checker._CheckMoveOperation, - op, None, 134, 134, 'foo') - - def testCheckMoveOperation_FailStagnantBlocks(self): - """Tests _CheckMoveOperation(); fails, there are blocks that do not move.""" - payload_checker = checker.PayloadChecker(self.MockPayload()) - op = update_metadata_pb2.InstallOperation() - op.type = common.OpType.MOVE - - self.AddToMessage(op.src_extents, - self.NewExtentList((1, 4), (12, 2), (1024, 128))) - self.AddToMessage(op.dst_extents, - self.NewExtentList((8, 128), (512, 6))) - self.assertRaises( - PayloadError, payload_checker._CheckMoveOperation, - op, None, 134, 134, 'foo') - - def testCheckMoveOperation_FailZeroStartBlock(self): - """Tests _CheckMoveOperation(); fails, has extent with start block 0.""" - payload_checker = checker.PayloadChecker(self.MockPayload()) - op = update_metadata_pb2.InstallOperation() - op.type = common.OpType.MOVE - - self.AddToMessage(op.src_extents, - self.NewExtentList((0, 4), (12, 2), (1024, 128))) - self.AddToMessage(op.dst_extents, - self.NewExtentList((8, 128), (512, 6))) - self.assertRaises( - PayloadError, payload_checker._CheckMoveOperation, - op, None, 134, 134, 'foo') - - self.AddToMessage(op.src_extents, - self.NewExtentList((1, 4), (12, 2), (1024, 128))) - self.AddToMessage(op.dst_extents, - self.NewExtentList((0, 128), (512, 6))) - self.assertRaises( - PayloadError, payload_checker._CheckMoveOperation, - op, None, 134, 134, 'foo') - def testCheckAnyDiff(self): """Tests _CheckAnyDiffOperation().""" payload_checker = checker.PayloadChecker(self.MockPayload()) @@ -841,7 +713,7 @@ def DoCheckOperationTest(self, op_type_name, is_last, allow_signature, """Parametric testing of _CheckOperation(). Args: - op_type_name: 'REPLACE', 'REPLACE_BZ', 'REPLACE_XZ', 'MOVE', 'BSDIFF', + op_type_name: 'REPLACE', 'REPLACE_BZ', 'REPLACE_XZ', 'SOURCE_COPY', 'SOURCE_BSDIFF', BROTLI_BSDIFF or 'PUFFDIFF'. is_last: Whether we're testing the last operation in a sequence. allow_signature: Whether we're testing a signature-capable operation. @@ -880,8 +752,7 @@ def DoCheckOperationTest(self, op_type_name, is_last, allow_signature, op.type = op_type total_src_blocks = 0 - if op_type in (common.OpType.MOVE, common.OpType.BSDIFF, - common.OpType.SOURCE_COPY, common.OpType.SOURCE_BSDIFF, + if op_type in (common.OpType.SOURCE_COPY, common.OpType.SOURCE_BSDIFF, common.OpType.PUFFDIFF, common.OpType.BROTLI_BSDIFF): if fail_src_extents: self.AddToMessage(op.src_extents, @@ -895,8 +766,6 @@ def DoCheckOperationTest(self, op_type_name, is_last, allow_signature, payload_checker.major_version = common.CHROMEOS_MAJOR_PAYLOAD_VERSION if op_type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ): payload_checker.minor_version = 0 - elif op_type in (common.OpType.MOVE, common.OpType.BSDIFF): - payload_checker.minor_version = 2 if fail_bad_minor_version else 1 elif op_type in (common.OpType.SOURCE_COPY, common.OpType.SOURCE_BSDIFF): payload_checker.minor_version = 1 if fail_bad_minor_version else 2 if op_type == common.OpType.REPLACE_XZ: @@ -907,7 +776,7 @@ def DoCheckOperationTest(self, op_type_name, is_last, allow_signature, elif op_type == common.OpType.PUFFDIFF: payload_checker.minor_version = 4 if fail_bad_minor_version else 5 - if op_type not in (common.OpType.MOVE, common.OpType.SOURCE_COPY): + if op_type != common.OpType.SOURCE_COPY: if not fail_mismatched_data_offset_length: op.data_length = 16 * block_size - 8 if fail_prev_data_offset: @@ -944,8 +813,7 @@ def DoCheckOperationTest(self, op_type_name, is_last, allow_signature, if total_src_blocks: if fail_src_length: op.src_length = total_src_blocks * block_size + 8 - elif (op_type in (common.OpType.MOVE, common.OpType.BSDIFF, - common.OpType.SOURCE_BSDIFF) and + elif (op_type == common.OpType.SOURCE_BSDIFF and payload_checker.minor_version <= 3): op.src_length = total_src_blocks * block_size elif fail_src_length: @@ -955,8 +823,7 @@ def DoCheckOperationTest(self, op_type_name, is_last, allow_signature, if total_dst_blocks: if fail_dst_length: op.dst_length = total_dst_blocks * block_size + 8 - elif (op_type in (common.OpType.MOVE, common.OpType.BSDIFF, - common.OpType.SOURCE_BSDIFF) and + elif (op_type == common.OpType.SOURCE_BSDIFF and payload_checker.minor_version <= 3): op.dst_length = total_dst_blocks * block_size @@ -1120,7 +987,6 @@ def DoCheckManifestMinorVersionTest(self, minor_version, payload_type): should_succeed = ( (minor_version == 0 and payload_type == checker._TYPE_FULL) or - (minor_version == 1 and payload_type == checker._TYPE_DELTA) or (minor_version == 2 and payload_type == checker._TYPE_DELTA) or (minor_version == 3 and payload_type == checker._TYPE_DELTA) or (minor_version == 4 and payload_type == checker._TYPE_DELTA) or @@ -1244,8 +1110,8 @@ def ValidateCheckOperationTest(op_type_name, is_last, allow_signature, fail_bad_minor_version)): return False - # MOVE and SOURCE_COPY operations don't carry data. - if (op_type in (common.OpType.MOVE, common.OpType.SOURCE_COPY) and ( + # SOURCE_COPY operation does not carry data. + if (op_type == common.OpType.SOURCE_COPY and ( fail_mismatched_data_offset_length or fail_data_hash or fail_prev_data_offset)): return False @@ -1328,9 +1194,8 @@ def AddAllParametricTests(): # Add all _CheckOperation() test cases. AddParametricTests('CheckOperation', {'op_type_name': ('REPLACE', 'REPLACE_BZ', 'REPLACE_XZ', - 'MOVE', 'BSDIFF', 'SOURCE_COPY', - 'SOURCE_BSDIFF', 'PUFFDIFF', - 'BROTLI_BSDIFF'), + 'SOURCE_COPY', 'SOURCE_BSDIFF', + 'PUFFDIFF', 'BROTLI_BSDIFF'), 'is_last': (True, False), 'allow_signature': (True, False), 'allow_unhashed': (True, False), @@ -1360,7 +1225,7 @@ def AddAllParametricTests(): # Add all _CheckManifestMinorVersion() test cases. AddParametricTests('CheckManifestMinorVersion', - {'minor_version': (None, 0, 1, 2, 3, 4, 5, 555), + {'minor_version': (None, 0, 2, 3, 4, 5, 555), 'payload_type': (checker._TYPE_FULL, checker._TYPE_DELTA)}) diff --git a/scripts/update_payload/common.py b/scripts/update_payload/common.py index 9061a754..b7b53dc8 100644 --- a/scripts/update_payload/common.py +++ b/scripts/update_payload/common.py @@ -36,7 +36,6 @@ CHROMEOS_MAJOR_PAYLOAD_VERSION = 1 BRILLO_MAJOR_PAYLOAD_VERSION = 2 -INPLACE_MINOR_PAYLOAD_VERSION = 1 SOURCE_MINOR_PAYLOAD_VERSION = 2 OPSRCHASH_MINOR_PAYLOAD_VERSION = 3 BROTLI_BSDIFF_MINOR_PAYLOAD_VERSION = 4 @@ -55,8 +54,6 @@ class OpType(object): _CLASS = update_metadata_pb2.InstallOperation REPLACE = _CLASS.REPLACE REPLACE_BZ = _CLASS.REPLACE_BZ - MOVE = _CLASS.MOVE - BSDIFF = _CLASS.BSDIFF SOURCE_COPY = _CLASS.SOURCE_COPY SOURCE_BSDIFF = _CLASS.SOURCE_BSDIFF ZERO = _CLASS.ZERO @@ -64,13 +61,11 @@ class OpType(object): REPLACE_XZ = _CLASS.REPLACE_XZ PUFFDIFF = _CLASS.PUFFDIFF BROTLI_BSDIFF = _CLASS.BROTLI_BSDIFF - ALL = (REPLACE, REPLACE_BZ, MOVE, BSDIFF, SOURCE_COPY, SOURCE_BSDIFF, ZERO, + ALL = (REPLACE, REPLACE_BZ, SOURCE_COPY, SOURCE_BSDIFF, ZERO, DISCARD, REPLACE_XZ, PUFFDIFF, BROTLI_BSDIFF) NAMES = { REPLACE: 'REPLACE', REPLACE_BZ: 'REPLACE_BZ', - MOVE: 'MOVE', - BSDIFF: 'BSDIFF', SOURCE_COPY: 'SOURCE_COPY', SOURCE_BSDIFF: 'SOURCE_BSDIFF', ZERO: 'ZERO', diff --git a/scripts/update_payload/test_utils.py b/scripts/update_payload/test_utils.py index 1e2259d4..f0edad57 100644 --- a/scripts/update_payload/test_utils.py +++ b/scripts/update_payload/test_utils.py @@ -288,11 +288,11 @@ def AddOperationWithData(self, is_kernel, op_type, src_extents=None, Args: is_kernel: whether this is a kernel (True) or rootfs (False) operation - op_type: one of REPLACE, REPLACE_BZ, REPLACE_XZ, MOVE or BSDIFF + op_type: one of REPLACE, REPLACE_BZ, REPLACE_XZ. src_extents: list of (start, length) pairs indicating src block ranges - src_length: size of the src data in bytes (needed for BSDIFF) + src_length: size of the src data in bytes (needed for diff operations) dst_extents: list of (start, length) pairs indicating dst block ranges - dst_length: size of the dst data in bytes (needed for BSDIFF) + dst_length: size of the dst data in bytes (needed for diff operations) data_blob: a data blob associated with this operation do_hash_data_blob: whether or not to compute and add a data blob hash """ diff --git a/scripts/update_payload/update_metadata_pb2.py b/scripts/update_payload/update_metadata_pb2.py index 7f1648b2..62756423 100644 --- a/scripts/update_payload/update_metadata_pb2.py +++ b/scripts/update_payload/update_metadata_pb2.py @@ -1,19 +1,27 @@ +# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: update_metadata.proto +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection -from google.protobuf import descriptor_pb2 +from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) +_sym_db = _symbol_database.Default() + DESCRIPTOR = _descriptor.FileDescriptor( name='update_metadata.proto', package='chromeos_update_engine', - serialized_pb='\n\x15update_metadata.proto\x12\x16\x63hromeos_update_engine\"1\n\x06\x45xtent\x12\x13\n\x0bstart_block\x18\x01 \x01(\x04\x12\x12\n\nnum_blocks\x18\x02 \x01(\x04\"z\n\nSignatures\x12@\n\nsignatures\x18\x01 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x1a*\n\tSignature\x12\x0f\n\x07version\x18\x01 \x01(\r\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"+\n\rPartitionInfo\x12\x0c\n\x04size\x18\x01 \x01(\x04\x12\x0c\n\x04hash\x18\x02 \x01(\x0c\"w\n\tImageInfo\x12\r\n\x05\x62oard\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\x0f\n\x07\x63hannel\x18\x03 \x01(\t\x12\x0f\n\x07version\x18\x04 \x01(\t\x12\x15\n\rbuild_channel\x18\x05 \x01(\t\x12\x15\n\rbuild_version\x18\x06 \x01(\t\"\xe6\x03\n\x10InstallOperation\x12;\n\x04type\x18\x01 \x02(\x0e\x32-.chromeos_update_engine.InstallOperation.Type\x12\x13\n\x0b\x64\x61ta_offset\x18\x02 \x01(\x04\x12\x13\n\x0b\x64\x61ta_length\x18\x03 \x01(\x04\x12\x33\n\x0bsrc_extents\x18\x04 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\nsrc_length\x18\x05 \x01(\x04\x12\x33\n\x0b\x64st_extents\x18\x06 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\ndst_length\x18\x07 \x01(\x04\x12\x18\n\x10\x64\x61ta_sha256_hash\x18\x08 \x01(\x0c\x12\x17\n\x0fsrc_sha256_hash\x18\t \x01(\x0c\"\xa5\x01\n\x04Type\x12\x0b\n\x07REPLACE\x10\x00\x12\x0e\n\nREPLACE_BZ\x10\x01\x12\x08\n\x04MOVE\x10\x02\x12\n\n\x06\x42SDIFF\x10\x03\x12\x0f\n\x0bSOURCE_COPY\x10\x04\x12\x11\n\rSOURCE_BSDIFF\x10\x05\x12\x0e\n\nREPLACE_XZ\x10\x08\x12\x08\n\x04ZERO\x10\x06\x12\x0b\n\x07\x44ISCARD\x10\x07\x12\x11\n\rBROTLI_BSDIFF\x10\n\x12\x0c\n\x08PUFFDIFF\x10\t\"\xd7\x05\n\x0fPartitionUpdate\x12\x16\n\x0epartition_name\x18\x01 \x02(\t\x12\x17\n\x0frun_postinstall\x18\x02 \x01(\x08\x12\x18\n\x10postinstall_path\x18\x03 \x01(\t\x12\x17\n\x0f\x66ilesystem_type\x18\x04 \x01(\t\x12M\n\x17new_partition_signature\x18\x05 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x12\x41\n\x12old_partition_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x41\n\x12new_partition_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12<\n\noperations\x18\x08 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x1c\n\x14postinstall_optional\x18\t \x01(\x08\x12=\n\x15hash_tree_data_extent\x18\n \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x38\n\x10hash_tree_extent\x18\x0b \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x1b\n\x13hash_tree_algorithm\x18\x0c \x01(\t\x12\x16\n\x0ehash_tree_salt\x18\r \x01(\x0c\x12\x37\n\x0f\x66\x65\x63_data_extent\x18\x0e \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x32\n\nfec_extent\x18\x0f \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x14\n\tfec_roots\x18\x10 \x01(\r:\x01\x32\"L\n\x15\x44ynamicPartitionGroup\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\x0c\n\x04size\x18\x02 \x01(\x04\x12\x17\n\x0fpartition_names\x18\x03 \x03(\t\"Y\n\x18\x44ynamicPartitionMetadata\x12=\n\x06groups\x18\x01 \x03(\x0b\x32-.chromeos_update_engine.DynamicPartitionGroup\"\xb1\x06\n\x14\x44\x65ltaArchiveManifest\x12\x44\n\x12install_operations\x18\x01 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12K\n\x19kernel_install_operations\x18\x02 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x18\n\nblock_size\x18\x03 \x01(\r:\x04\x34\x30\x39\x36\x12\x19\n\x11signatures_offset\x18\x04 \x01(\x04\x12\x17\n\x0fsignatures_size\x18\x05 \x01(\x04\x12>\n\x0fold_kernel_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fnew_kernel_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fold_rootfs_info\x18\x08 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fnew_rootfs_info\x18\t \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x39\n\x0eold_image_info\x18\n \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x39\n\x0enew_image_info\x18\x0b \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x18\n\rminor_version\x18\x0c \x01(\r:\x01\x30\x12;\n\npartitions\x18\r \x03(\x0b\x32\'.chromeos_update_engine.PartitionUpdate\x12\x15\n\rmax_timestamp\x18\x0e \x01(\x03\x12T\n\x1a\x64ynamic_partition_metadata\x18\x0f \x01(\x0b\x32\x30.chromeos_update_engine.DynamicPartitionMetadataB\x02H\x03') + syntax='proto2', + serialized_options=_b('H\003'), + serialized_pb=_b('\n\x15update_metadata.proto\x12\x16\x63hromeos_update_engine\"1\n\x06\x45xtent\x12\x13\n\x0bstart_block\x18\x01 \x01(\x04\x12\x12\n\nnum_blocks\x18\x02 \x01(\x04\"z\n\nSignatures\x12@\n\nsignatures\x18\x01 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x1a*\n\tSignature\x12\x0f\n\x07version\x18\x01 \x01(\r\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"+\n\rPartitionInfo\x12\x0c\n\x04size\x18\x01 \x01(\x04\x12\x0c\n\x04hash\x18\x02 \x01(\x0c\"w\n\tImageInfo\x12\r\n\x05\x62oard\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\x0f\n\x07\x63hannel\x18\x03 \x01(\t\x12\x0f\n\x07version\x18\x04 \x01(\t\x12\x15\n\rbuild_channel\x18\x05 \x01(\t\x12\x15\n\rbuild_version\x18\x06 \x01(\t\"\xd0\x03\n\x10InstallOperation\x12;\n\x04type\x18\x01 \x02(\x0e\x32-.chromeos_update_engine.InstallOperation.Type\x12\x13\n\x0b\x64\x61ta_offset\x18\x02 \x01(\x04\x12\x13\n\x0b\x64\x61ta_length\x18\x03 \x01(\x04\x12\x33\n\x0bsrc_extents\x18\x04 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\nsrc_length\x18\x05 \x01(\x04\x12\x33\n\x0b\x64st_extents\x18\x06 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\ndst_length\x18\x07 \x01(\x04\x12\x18\n\x10\x64\x61ta_sha256_hash\x18\x08 \x01(\x0c\x12\x17\n\x0fsrc_sha256_hash\x18\t \x01(\x0c\"\x8f\x01\n\x04Type\x12\x0b\n\x07REPLACE\x10\x00\x12\x0e\n\nREPLACE_BZ\x10\x01\x12\x0f\n\x0bSOURCE_COPY\x10\x04\x12\x11\n\rSOURCE_BSDIFF\x10\x05\x12\x0e\n\nREPLACE_XZ\x10\x08\x12\x08\n\x04ZERO\x10\x06\x12\x0b\n\x07\x44ISCARD\x10\x07\x12\x11\n\rBROTLI_BSDIFF\x10\n\x12\x0c\n\x08PUFFDIFF\x10\t\"\xd7\x05\n\x0fPartitionUpdate\x12\x16\n\x0epartition_name\x18\x01 \x02(\t\x12\x17\n\x0frun_postinstall\x18\x02 \x01(\x08\x12\x18\n\x10postinstall_path\x18\x03 \x01(\t\x12\x17\n\x0f\x66ilesystem_type\x18\x04 \x01(\t\x12M\n\x17new_partition_signature\x18\x05 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x12\x41\n\x12old_partition_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x41\n\x12new_partition_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12<\n\noperations\x18\x08 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x1c\n\x14postinstall_optional\x18\t \x01(\x08\x12=\n\x15hash_tree_data_extent\x18\n \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x38\n\x10hash_tree_extent\x18\x0b \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x1b\n\x13hash_tree_algorithm\x18\x0c \x01(\t\x12\x16\n\x0ehash_tree_salt\x18\r \x01(\x0c\x12\x37\n\x0f\x66\x65\x63_data_extent\x18\x0e \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x32\n\nfec_extent\x18\x0f \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x14\n\tfec_roots\x18\x10 \x01(\r:\x01\x32\"L\n\x15\x44ynamicPartitionGroup\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\x0c\n\x04size\x18\x02 \x01(\x04\x12\x17\n\x0fpartition_names\x18\x03 \x03(\t\"Y\n\x18\x44ynamicPartitionMetadata\x12=\n\x06groups\x18\x01 \x03(\x0b\x32-.chromeos_update_engine.DynamicPartitionGroup\"\xb1\x06\n\x14\x44\x65ltaArchiveManifest\x12\x44\n\x12install_operations\x18\x01 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12K\n\x19kernel_install_operations\x18\x02 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x18\n\nblock_size\x18\x03 \x01(\r:\x04\x34\x30\x39\x36\x12\x19\n\x11signatures_offset\x18\x04 \x01(\x04\x12\x17\n\x0fsignatures_size\x18\x05 \x01(\x04\x12>\n\x0fold_kernel_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fnew_kernel_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fold_rootfs_info\x18\x08 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fnew_rootfs_info\x18\t \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x39\n\x0eold_image_info\x18\n \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x39\n\x0enew_image_info\x18\x0b \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x18\n\rminor_version\x18\x0c \x01(\r:\x01\x30\x12;\n\npartitions\x18\r \x03(\x0b\x32\'.chromeos_update_engine.PartitionUpdate\x12\x15\n\rmax_timestamp\x18\x0e \x01(\x03\x12T\n\x1a\x64ynamic_partition_metadata\x18\x0f \x01(\x0b\x32\x30.chromeos_update_engine.DynamicPartitionMetadataB\x02H\x03') +) @@ -25,54 +33,47 @@ values=[ _descriptor.EnumValueDescriptor( name='REPLACE', index=0, number=0, - options=None, + serialized_options=None, type=None), _descriptor.EnumValueDescriptor( name='REPLACE_BZ', index=1, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='MOVE', index=2, number=2, - options=None, + serialized_options=None, type=None), _descriptor.EnumValueDescriptor( - name='BSDIFF', index=3, number=3, - options=None, + name='SOURCE_COPY', index=2, number=4, + serialized_options=None, type=None), _descriptor.EnumValueDescriptor( - name='SOURCE_COPY', index=4, number=4, - options=None, + name='SOURCE_BSDIFF', index=3, number=5, + serialized_options=None, type=None), _descriptor.EnumValueDescriptor( - name='SOURCE_BSDIFF', index=5, number=5, - options=None, + name='REPLACE_XZ', index=4, number=8, + serialized_options=None, type=None), _descriptor.EnumValueDescriptor( - name='REPLACE_XZ', index=6, number=8, - options=None, + name='ZERO', index=5, number=6, + serialized_options=None, type=None), _descriptor.EnumValueDescriptor( - name='ZERO', index=7, number=6, - options=None, + name='DISCARD', index=6, number=7, + serialized_options=None, type=None), _descriptor.EnumValueDescriptor( - name='DISCARD', index=8, number=7, - options=None, + name='BROTLI_BSDIFF', index=7, number=10, + serialized_options=None, type=None), _descriptor.EnumValueDescriptor( - name='BROTLI_BSDIFF', index=9, number=10, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='PUFFDIFF', index=10, number=9, - options=None, + name='PUFFDIFF', index=8, number=9, + serialized_options=None, type=None), ], containing_type=None, - options=None, + serialized_options=None, serialized_start=712, - serialized_end=877, + serialized_end=855, ) +_sym_db.RegisterEnumDescriptor(_INSTALLOPERATION_TYPE) _EXTENT = _descriptor.Descriptor( @@ -88,23 +89,26 @@ has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='num_blocks', full_name='chromeos_update_engine.Extent.num_blocks', index=1, number=2, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], - options=None, + serialized_options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], + oneofs=[ + ], serialized_start=49, serialized_end=98, ) @@ -123,23 +127,26 @@ has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='data', full_name='chromeos_update_engine.Signatures.Signature.data', index=1, number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value="", + has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], - options=None, + serialized_options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], + oneofs=[ + ], serialized_start=180, serialized_end=222, ) @@ -157,16 +164,19 @@ has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[_SIGNATURES_SIGNATURE, ], enum_types=[ ], - options=None, + serialized_options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], + oneofs=[ + ], serialized_start=100, serialized_end=222, ) @@ -185,23 +195,26 @@ has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='hash', full_name='chromeos_update_engine.PartitionInfo.hash', index=1, number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value="", + has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], - options=None, + serialized_options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], + oneofs=[ + ], serialized_start=224, serialized_end=267, ) @@ -217,54 +230,57 @@ _descriptor.FieldDescriptor( name='board', full_name='chromeos_update_engine.ImageInfo.board', index=0, number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='key', full_name='chromeos_update_engine.ImageInfo.key', index=1, number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='channel', full_name='chromeos_update_engine.ImageInfo.channel', index=2, number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='version', full_name='chromeos_update_engine.ImageInfo.version', index=3, number=4, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='build_channel', full_name='chromeos_update_engine.ImageInfo.build_channel', index=4, number=5, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='build_version', full_name='chromeos_update_engine.ImageInfo.build_version', index=5, number=6, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], - options=None, + serialized_options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], + oneofs=[ + ], serialized_start=269, serialized_end=388, ) @@ -283,63 +299,63 @@ has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='data_offset', full_name='chromeos_update_engine.InstallOperation.data_offset', index=1, number=2, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='data_length', full_name='chromeos_update_engine.InstallOperation.data_length', index=2, number=3, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='src_extents', full_name='chromeos_update_engine.InstallOperation.src_extents', index=3, number=4, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='src_length', full_name='chromeos_update_engine.InstallOperation.src_length', index=4, number=5, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='dst_extents', full_name='chromeos_update_engine.InstallOperation.dst_extents', index=5, number=6, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='dst_length', full_name='chromeos_update_engine.InstallOperation.dst_length', index=6, number=7, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='data_sha256_hash', full_name='chromeos_update_engine.InstallOperation.data_sha256_hash', index=7, number=8, type=12, cpp_type=9, label=1, - has_default_value=False, default_value="", + has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='src_sha256_hash', full_name='chromeos_update_engine.InstallOperation.src_sha256_hash', index=8, number=9, type=12, cpp_type=9, label=1, - has_default_value=False, default_value="", + has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], @@ -347,11 +363,14 @@ enum_types=[ _INSTALLOPERATION_TYPE, ], - options=None, + serialized_options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], + oneofs=[ + ], serialized_start=391, - serialized_end=877, + serialized_end=855, ) @@ -365,126 +384,129 @@ _descriptor.FieldDescriptor( name='partition_name', full_name='chromeos_update_engine.PartitionUpdate.partition_name', index=0, number=1, type=9, cpp_type=9, label=2, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='run_postinstall', full_name='chromeos_update_engine.PartitionUpdate.run_postinstall', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='postinstall_path', full_name='chromeos_update_engine.PartitionUpdate.postinstall_path', index=2, number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='filesystem_type', full_name='chromeos_update_engine.PartitionUpdate.filesystem_type', index=3, number=4, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='new_partition_signature', full_name='chromeos_update_engine.PartitionUpdate.new_partition_signature', index=4, number=5, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='old_partition_info', full_name='chromeos_update_engine.PartitionUpdate.old_partition_info', index=5, number=6, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='new_partition_info', full_name='chromeos_update_engine.PartitionUpdate.new_partition_info', index=6, number=7, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='operations', full_name='chromeos_update_engine.PartitionUpdate.operations', index=7, number=8, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='postinstall_optional', full_name='chromeos_update_engine.PartitionUpdate.postinstall_optional', index=8, number=9, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='hash_tree_data_extent', full_name='chromeos_update_engine.PartitionUpdate.hash_tree_data_extent', index=9, number=10, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='hash_tree_extent', full_name='chromeos_update_engine.PartitionUpdate.hash_tree_extent', index=10, number=11, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='hash_tree_algorithm', full_name='chromeos_update_engine.PartitionUpdate.hash_tree_algorithm', index=11, number=12, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='hash_tree_salt', full_name='chromeos_update_engine.PartitionUpdate.hash_tree_salt', index=12, number=13, type=12, cpp_type=9, label=1, - has_default_value=False, default_value="", + has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='fec_data_extent', full_name='chromeos_update_engine.PartitionUpdate.fec_data_extent', index=13, number=14, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='fec_extent', full_name='chromeos_update_engine.PartitionUpdate.fec_extent', index=14, number=15, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='fec_roots', full_name='chromeos_update_engine.PartitionUpdate.fec_roots', index=15, number=16, type=13, cpp_type=3, label=1, has_default_value=True, default_value=2, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], - options=None, + serialized_options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=880, - serialized_end=1607, + oneofs=[ + ], + serialized_start=858, + serialized_end=1585, ) @@ -498,35 +520,38 @@ _descriptor.FieldDescriptor( name='name', full_name='chromeos_update_engine.DynamicPartitionGroup.name', index=0, number=1, type=9, cpp_type=9, label=2, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='size', full_name='chromeos_update_engine.DynamicPartitionGroup.size', index=1, number=2, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='partition_names', full_name='chromeos_update_engine.DynamicPartitionGroup.partition_names', index=2, number=3, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], - options=None, + serialized_options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=1609, - serialized_end=1685, + oneofs=[ + ], + serialized_start=1587, + serialized_end=1663, ) @@ -543,18 +568,21 @@ has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], - options=None, + serialized_options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=1687, - serialized_end=1776, + oneofs=[ + ], + serialized_start=1665, + serialized_end=1754, ) @@ -571,124 +599,127 @@ has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='kernel_install_operations', full_name='chromeos_update_engine.DeltaArchiveManifest.kernel_install_operations', index=1, number=2, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='block_size', full_name='chromeos_update_engine.DeltaArchiveManifest.block_size', index=2, number=3, type=13, cpp_type=3, label=1, has_default_value=True, default_value=4096, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='signatures_offset', full_name='chromeos_update_engine.DeltaArchiveManifest.signatures_offset', index=3, number=4, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='signatures_size', full_name='chromeos_update_engine.DeltaArchiveManifest.signatures_size', index=4, number=5, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='old_kernel_info', full_name='chromeos_update_engine.DeltaArchiveManifest.old_kernel_info', index=5, number=6, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='new_kernel_info', full_name='chromeos_update_engine.DeltaArchiveManifest.new_kernel_info', index=6, number=7, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='old_rootfs_info', full_name='chromeos_update_engine.DeltaArchiveManifest.old_rootfs_info', index=7, number=8, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='new_rootfs_info', full_name='chromeos_update_engine.DeltaArchiveManifest.new_rootfs_info', index=8, number=9, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='old_image_info', full_name='chromeos_update_engine.DeltaArchiveManifest.old_image_info', index=9, number=10, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='new_image_info', full_name='chromeos_update_engine.DeltaArchiveManifest.new_image_info', index=10, number=11, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='minor_version', full_name='chromeos_update_engine.DeltaArchiveManifest.minor_version', index=11, number=12, type=13, cpp_type=3, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='partitions', full_name='chromeos_update_engine.DeltaArchiveManifest.partitions', index=12, number=13, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='max_timestamp', full_name='chromeos_update_engine.DeltaArchiveManifest.max_timestamp', index=13, number=14, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='dynamic_partition_metadata', full_name='chromeos_update_engine.DeltaArchiveManifest.dynamic_partition_metadata', index=14, number=15, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], - options=None, + serialized_options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=1779, - serialized_end=2596, + oneofs=[ + ], + serialized_start=1757, + serialized_end=2574, ) -_SIGNATURES_SIGNATURE.containing_type = _SIGNATURES; +_SIGNATURES_SIGNATURE.containing_type = _SIGNATURES _SIGNATURES.fields_by_name['signatures'].message_type = _SIGNATURES_SIGNATURE _INSTALLOPERATION.fields_by_name['type'].enum_type = _INSTALLOPERATION_TYPE _INSTALLOPERATION.fields_by_name['src_extents'].message_type = _EXTENT _INSTALLOPERATION.fields_by_name['dst_extents'].message_type = _EXTENT -_INSTALLOPERATION_TYPE.containing_type = _INSTALLOPERATION; +_INSTALLOPERATION_TYPE.containing_type = _INSTALLOPERATION _PARTITIONUPDATE.fields_by_name['new_partition_signature'].message_type = _SIGNATURES_SIGNATURE _PARTITIONUPDATE.fields_by_name['old_partition_info'].message_type = _PARTITIONINFO _PARTITIONUPDATE.fields_by_name['new_partition_info'].message_type = _PARTITIONINFO @@ -717,68 +748,79 @@ DESCRIPTOR.message_types_by_name['DynamicPartitionGroup'] = _DYNAMICPARTITIONGROUP DESCRIPTOR.message_types_by_name['DynamicPartitionMetadata'] = _DYNAMICPARTITIONMETADATA DESCRIPTOR.message_types_by_name['DeltaArchiveManifest'] = _DELTAARCHIVEMANIFEST +_sym_db.RegisterFileDescriptor(DESCRIPTOR) -class Extent(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType - DESCRIPTOR = _EXTENT - +Extent = _reflection.GeneratedProtocolMessageType('Extent', (_message.Message,), dict( + DESCRIPTOR = _EXTENT, + __module__ = 'update_metadata_pb2' # @@protoc_insertion_point(class_scope:chromeos_update_engine.Extent) + )) +_sym_db.RegisterMessage(Extent) -class Signatures(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType - - class Signature(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType - DESCRIPTOR = _SIGNATURES_SIGNATURE +Signatures = _reflection.GeneratedProtocolMessageType('Signatures', (_message.Message,), dict( + Signature = _reflection.GeneratedProtocolMessageType('Signature', (_message.Message,), dict( + DESCRIPTOR = _SIGNATURES_SIGNATURE, + __module__ = 'update_metadata_pb2' # @@protoc_insertion_point(class_scope:chromeos_update_engine.Signatures.Signature) - DESCRIPTOR = _SIGNATURES - + )) + , + DESCRIPTOR = _SIGNATURES, + __module__ = 'update_metadata_pb2' # @@protoc_insertion_point(class_scope:chromeos_update_engine.Signatures) + )) +_sym_db.RegisterMessage(Signatures) +_sym_db.RegisterMessage(Signatures.Signature) -class PartitionInfo(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType - DESCRIPTOR = _PARTITIONINFO - +PartitionInfo = _reflection.GeneratedProtocolMessageType('PartitionInfo', (_message.Message,), dict( + DESCRIPTOR = _PARTITIONINFO, + __module__ = 'update_metadata_pb2' # @@protoc_insertion_point(class_scope:chromeos_update_engine.PartitionInfo) + )) +_sym_db.RegisterMessage(PartitionInfo) -class ImageInfo(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType - DESCRIPTOR = _IMAGEINFO - +ImageInfo = _reflection.GeneratedProtocolMessageType('ImageInfo', (_message.Message,), dict( + DESCRIPTOR = _IMAGEINFO, + __module__ = 'update_metadata_pb2' # @@protoc_insertion_point(class_scope:chromeos_update_engine.ImageInfo) + )) +_sym_db.RegisterMessage(ImageInfo) -class InstallOperation(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType - DESCRIPTOR = _INSTALLOPERATION - +InstallOperation = _reflection.GeneratedProtocolMessageType('InstallOperation', (_message.Message,), dict( + DESCRIPTOR = _INSTALLOPERATION, + __module__ = 'update_metadata_pb2' # @@protoc_insertion_point(class_scope:chromeos_update_engine.InstallOperation) + )) +_sym_db.RegisterMessage(InstallOperation) -class PartitionUpdate(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType - DESCRIPTOR = _PARTITIONUPDATE - +PartitionUpdate = _reflection.GeneratedProtocolMessageType('PartitionUpdate', (_message.Message,), dict( + DESCRIPTOR = _PARTITIONUPDATE, + __module__ = 'update_metadata_pb2' # @@protoc_insertion_point(class_scope:chromeos_update_engine.PartitionUpdate) + )) +_sym_db.RegisterMessage(PartitionUpdate) -class DynamicPartitionGroup(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType - DESCRIPTOR = _DYNAMICPARTITIONGROUP - +DynamicPartitionGroup = _reflection.GeneratedProtocolMessageType('DynamicPartitionGroup', (_message.Message,), dict( + DESCRIPTOR = _DYNAMICPARTITIONGROUP, + __module__ = 'update_metadata_pb2' # @@protoc_insertion_point(class_scope:chromeos_update_engine.DynamicPartitionGroup) + )) +_sym_db.RegisterMessage(DynamicPartitionGroup) -class DynamicPartitionMetadata(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType - DESCRIPTOR = _DYNAMICPARTITIONMETADATA - +DynamicPartitionMetadata = _reflection.GeneratedProtocolMessageType('DynamicPartitionMetadata', (_message.Message,), dict( + DESCRIPTOR = _DYNAMICPARTITIONMETADATA, + __module__ = 'update_metadata_pb2' # @@protoc_insertion_point(class_scope:chromeos_update_engine.DynamicPartitionMetadata) + )) +_sym_db.RegisterMessage(DynamicPartitionMetadata) -class DeltaArchiveManifest(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType - DESCRIPTOR = _DELTAARCHIVEMANIFEST - +DeltaArchiveManifest = _reflection.GeneratedProtocolMessageType('DeltaArchiveManifest', (_message.Message,), dict( + DESCRIPTOR = _DELTAARCHIVEMANIFEST, + __module__ = 'update_metadata_pb2' # @@protoc_insertion_point(class_scope:chromeos_update_engine.DeltaArchiveManifest) + )) +_sym_db.RegisterMessage(DeltaArchiveManifest) -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), 'H\003') +DESCRIPTOR._options = None # @@protoc_insertion_point(module_scope) diff --git a/update_metadata.proto b/update_metadata.proto index b0e8154a..3382f849 100644 --- a/update_metadata.proto +++ b/update_metadata.proto @@ -153,10 +153,10 @@ message ImageInfo { message InstallOperation { enum Type { - REPLACE = 0; // Replace destination extents w/ attached data - REPLACE_BZ = 1; // Replace destination extents w/ attached bzipped data - MOVE = 2; // Move source extents to destination extents - BSDIFF = 3; // The data is a bsdiff binary diff + REPLACE = 0; // Replace destination extents w/ attached data. + REPLACE_BZ = 1; // Replace destination extents w/ attached bzipped data. + MOVE = 2 [deprecated = true]; // Move source extents to target extents. + BSDIFF = 3 [deprecated = true]; // The data is a bsdiff binary diff. // On minor version 2 or newer, these operations are supported: SOURCE_COPY = 4; // Copy from source to target partition From c288d5ba2182a0b2742fa93d04db3e024b00e98e Mon Sep 17 00:00:00 2001 From: Tao Bao Date: Thu, 3 Oct 2019 13:47:06 -0700 Subject: [PATCH 117/624] Replace `look` with `grep`. `look` is an external host tool that's not coming from the Android repo. https://android.googlesource.com/platform/build/+/master/Changes.md#path_tools Bug: 142073223 Test: `m -j ota_from_target_files`; Generate incremental package. Change-Id: Idf5ee71846ea641387a36887a0ffc3d4a322743d --- scripts/brillo_update_payload | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/brillo_update_payload b/scripts/brillo_update_payload index 23d2d7ec..d9c18ff6 100755 --- a/scripts/brillo_update_payload +++ b/scripts/brillo_update_payload @@ -281,7 +281,7 @@ read_option_uint() { local option_key="$2" local default_value="${3:-}" local value - if value=$(look "${option_key}=" "${file_txt}" | tail -n 1); then + if value=$(grep "^${option_key}=" "${file_txt}" | tail -n 1); then if value=$(echo "${value}" | cut -f 2- -d "=" | grep -E "^[0-9]+$"); then echo "${value}" return From 6cf830b8913e741d7d3c7ed047b2cbdbb222034e Mon Sep 17 00:00:00 2001 From: Tianjie Xu Date: Mon, 30 Sep 2019 11:31:49 -0700 Subject: [PATCH 118/624] Factor out the RSA verification in payload verifier Right now, the payload is always signed with a RSA key, and the payload verifier takes the public key as a PEM encoded string. As we want to support payload signing with EC keys, we need to figure out the key types first in the verifier. So, add an overload function in payload verifier to accept EVP_PKEY as the public key. Bug: 141244025 Test: unittests pass Change-Id: Ibbdac5a7a3de48347100861aeac0013bff43da6f --- payload_consumer/delta_performer.cc | 8 +- payload_consumer/payload_metadata.cc | 38 ++++------ payload_consumer/payload_verifier.cc | 78 ++++++++++++++------ payload_consumer/payload_verifier.h | 63 ++++++++++------ payload_generator/payload_signer.cc | 57 +++++++++----- payload_generator/payload_signer_unittest.cc | 34 ++++----- 6 files changed, 173 insertions(+), 105 deletions(-) diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc index 3ff98ca2..8049af72 100644 --- a/payload_consumer/delta_performer.cc +++ b/payload_consumer/delta_performer.cc @@ -1794,8 +1794,12 @@ ErrorCode DeltaPerformer::VerifyPayload( TEST_AND_RETURN_VAL(ErrorCode::kDownloadPayloadPubKeyVerificationError, hash_data.size() == kSHA256Size); - if (!PayloadVerifier::VerifySignature( - signatures_message_data_, public_key, hash_data)) { + auto payload_verifier = PayloadVerifier::CreateInstance(public_key); + if (!payload_verifier) { + LOG(ERROR) << "Failed to create the payload verifier from " << public_key; + return ErrorCode::kDownloadPayloadPubKeyVerificationError; + } + if (!payload_verifier->VerifySignature(signatures_message_data_, hash_data)) { // The autoupdate_CatchBadSignatures test checks for this string // in log-files. Keep in sync. LOG(ERROR) << "Public key verification failed, thus update failed."; diff --git a/payload_consumer/payload_metadata.cc b/payload_consumer/payload_metadata.cc index 37397670..c81d3a91 100644 --- a/payload_consumer/payload_metadata.cc +++ b/payload_consumer/payload_metadata.cc @@ -201,32 +201,26 @@ ErrorCode PayloadMetadata::ValidateMetadataSignature( return ErrorCode::kDownloadMetadataSignatureVerificationError; } - if (!metadata_signature_blob.empty()) { - brillo::Blob expected_metadata_hash; - if (!PayloadVerifier::GetRawHashFromSignature( - metadata_signature_blob, pem_public_key, &expected_metadata_hash)) { - LOG(ERROR) << "Unable to compute expected hash from metadata signature"; - return ErrorCode::kDownloadMetadataSignatureError; - } - - brillo::Blob padded_metadata_hash = metadata_hash; - if (!PayloadVerifier::PadRSASHA256Hash(&padded_metadata_hash, - expected_metadata_hash.size())) { - LOG(ERROR) << "Failed to pad the SHA256 hash to " - << expected_metadata_hash.size() << " bytes."; - return ErrorCode::kDownloadMetadataSignatureVerificationError; - } + auto payload_verifier = PayloadVerifier::CreateInstance(pem_public_key); + if (!payload_verifier) { + LOG(ERROR) << "Failed to create the payload verifier from " + << pem_public_key; + return ErrorCode::kDownloadMetadataSignatureVerificationError; + } - if (padded_metadata_hash != expected_metadata_hash) { - LOG(ERROR) << "Manifest hash verification failed. Expected hash = "; - utils::HexDumpVector(expected_metadata_hash); - LOG(ERROR) << "Calculated hash = "; - utils::HexDumpVector(padded_metadata_hash); + if (!metadata_signature_blob.empty()) { + brillo::Blob decrypted_signature; + if (!payload_verifier->VerifyRawSignature( + metadata_signature_blob, metadata_hash, &decrypted_signature)) { + LOG(ERROR) << "Manifest hash verification failed. Decrypted hash = "; + utils::HexDumpVector(decrypted_signature); + LOG(ERROR) << "Calculated hash before padding = "; + utils::HexDumpVector(metadata_hash); return ErrorCode::kDownloadMetadataSignatureMismatch; } } else { - if (!PayloadVerifier::VerifySignature( - metadata_signature_protobuf, pem_public_key, metadata_hash)) { + if (!payload_verifier->VerifySignature(metadata_signature_protobuf, + metadata_hash)) { LOG(ERROR) << "Manifest hash verification failed."; return ErrorCode::kDownloadMetadataSignatureMismatch; } diff --git a/payload_consumer/payload_verifier.cc b/payload_consumer/payload_verifier.cc index 3a3ccbf6..b2c5be47 100644 --- a/payload_consumer/payload_verifier.cc +++ b/payload_consumer/payload_verifier.cc @@ -51,9 +51,30 @@ const uint8_t kSHA256DigestInfoPrefix[] = { } // namespace -bool PayloadVerifier::VerifySignature(const string& signature_proto, - const string& pem_public_key, - const brillo::Blob& sha256_hash_data) { +std::unique_ptr PayloadVerifier::CreateInstance( + const std::string& pem_public_key) { + std::unique_ptr bp( + BIO_new_mem_buf(pem_public_key.data(), pem_public_key.size()), BIO_free); + if (!bp) { + LOG(ERROR) << "Failed to read " << pem_public_key << " into buffer."; + return nullptr; + } + + auto pub_key = std::unique_ptr( + PEM_read_bio_PUBKEY(bp.get(), nullptr, nullptr, nullptr), EVP_PKEY_free); + if (!pub_key) { + LOG(ERROR) << "Failed to parse the public key in " << pem_public_key; + return nullptr; + } + + return std::unique_ptr( + new PayloadVerifier(std::move(pub_key))); +} + +bool PayloadVerifier::VerifySignature( + const string& signature_proto, const brillo::Blob& sha256_hash_data) const { + TEST_AND_RETURN_FALSE(public_key_ != nullptr); + Signatures signatures; LOG(INFO) << "signature blob size = " << signature_proto.size(); TEST_AND_RETURN_FALSE(signatures.ParseFromString(signature_proto)); @@ -69,17 +90,14 @@ bool PayloadVerifier::VerifySignature(const string& signature_proto, const Signatures::Signature& signature = signatures.signatures(i); brillo::Blob sig_data(signature.data().begin(), signature.data().end()); brillo::Blob sig_hash_data; - if (!GetRawHashFromSignature(sig_data, pem_public_key, &sig_hash_data)) - continue; - - brillo::Blob padded_hash_data = sha256_hash_data; - if (PadRSASHA256Hash(&padded_hash_data, sig_hash_data.size()) && - padded_hash_data == sig_hash_data) { + if (VerifyRawSignature(sig_data, sha256_hash_data, &sig_hash_data)) { LOG(INFO) << "Verified correct signature " << i + 1 << " out of " << signatures.signatures_size() << " signatures."; return true; } - tested_hashes.push_back(sig_hash_data); + if (!sig_hash_data.empty()) { + tested_hashes.push_back(sig_hash_data); + } } LOG(ERROR) << "None of the " << signatures.signatures_size() << " signatures is correct. Expected hash before padding:"; @@ -91,24 +109,43 @@ bool PayloadVerifier::VerifySignature(const string& signature_proto, return false; } -bool PayloadVerifier::GetRawHashFromSignature(const brillo::Blob& sig_data, - const string& pem_public_key, - brillo::Blob* out_hash_data) { +bool PayloadVerifier::VerifyRawSignature( + const brillo::Blob& sig_data, + const brillo::Blob& sha256_hash_data, + brillo::Blob* decrypted_sig_data) const { + TEST_AND_RETURN_FALSE(public_key_ != nullptr); + + int key_type = EVP_PKEY_id(public_key_.get()); + TEST_AND_RETURN_FALSE(key_type == EVP_PKEY_RSA); + brillo::Blob sig_hash_data; + TEST_AND_RETURN_FALSE( + GetRawHashFromSignature(sig_data, public_key_.get(), &sig_hash_data)); + + if (decrypted_sig_data != nullptr) { + *decrypted_sig_data = sig_hash_data; + } + + brillo::Blob padded_hash_data = sha256_hash_data; + TEST_AND_RETURN_FALSE( + PadRSASHA256Hash(&padded_hash_data, sig_hash_data.size())); + + return padded_hash_data == sig_hash_data; +} + +bool PayloadVerifier::GetRawHashFromSignature( + const brillo::Blob& sig_data, + const EVP_PKEY* public_key, + brillo::Blob* out_hash_data) const { // The code below executes the equivalent of: // - // openssl rsautl -verify -pubin -inkey <(echo |pem_public_key|) + // openssl rsautl -verify -pubin -inkey <(echo pem_public_key) // -in |sig_data| -out |out_hash_data| - - BIO* bp = BIO_new_mem_buf(pem_public_key.data(), pem_public_key.size()); - char dummy_password[] = {' ', 0}; // Ensure no password is read from stdin. - RSA* rsa = PEM_read_bio_RSA_PUBKEY(bp, nullptr, nullptr, dummy_password); - BIO_free(bp); + RSA* rsa = EVP_PKEY_get0_RSA(public_key); TEST_AND_RETURN_FALSE(rsa != nullptr); unsigned int keysize = RSA_size(rsa); if (sig_data.size() > 2 * keysize) { LOG(ERROR) << "Signature size is too big for public key size."; - RSA_free(rsa); return false; } @@ -116,7 +153,6 @@ bool PayloadVerifier::GetRawHashFromSignature(const brillo::Blob& sig_data, brillo::Blob hash_data(keysize); int decrypt_size = RSA_public_decrypt( sig_data.size(), sig_data.data(), hash_data.data(), rsa, RSA_NO_PADDING); - RSA_free(rsa); TEST_AND_RETURN_FALSE(decrypt_size > 0 && decrypt_size <= static_cast(hash_data.size())); hash_data.resize(decrypt_size); diff --git a/payload_consumer/payload_verifier.h b/payload_consumer/payload_verifier.h index af8e05fd..b5d54572 100644 --- a/payload_consumer/payload_verifier.h +++ b/payload_consumer/payload_verifier.h @@ -17,38 +17,23 @@ #ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_PAYLOAD_VERIFIER_H_ #define UPDATE_ENGINE_PAYLOAD_CONSUMER_PAYLOAD_VERIFIER_H_ +#include #include +#include -#include #include +#include #include "update_engine/update_metadata.pb.h" -// This class encapsulates methods used for payload signature verification. -// See payload_generator/payload_signer.h for payload signing. +// This class holds the public key and implements methods used for payload +// signature verification. See payload_generator/payload_signer.h for payload +// signing. namespace chromeos_update_engine { class PayloadVerifier { public: - // Interprets |signature_proto| as a protocol buffer containing the Signatures - // message and decrypts each signature data using the |pem_public_key|. - // |pem_public_key| should be a PEM format RSA public key data. - // Pads the 32 bytes |sha256_hash_data| to 256 or 512 bytes according to the - // PKCS#1 v1.5 standard; and returns whether *any* of the decrypted hashes - // matches the padded hash data. In case of any error parsing the signatures - // or the public key, returns false. - static bool VerifySignature(const std::string& signature_proto, - const std::string& pem_public_key, - const brillo::Blob& sha256_hash_data); - - // Decrypts |sig_data| with the given |pem_public_key| and populates - // |out_hash_data| with the decoded raw hash. |pem_public_key| should be a PEM - // format RSA public key data. Returns true if successful, false otherwise. - static bool GetRawHashFromSignature(const brillo::Blob& sig_data, - const std::string& pem_public_key, - brillo::Blob* out_hash_data); - // Pads a SHA256 hash so that it may be encrypted/signed with RSA2048 or // RSA4096 using the PKCS#1 v1.5 scheme. // hash should be a pointer to vector of exactly 256 bits. |rsa_size| must be @@ -57,9 +42,41 @@ class PayloadVerifier { // Returns true on success, false otherwise. static bool PadRSASHA256Hash(brillo::Blob* hash, size_t rsa_size); + // Parses the input as a PEM encoded public string. And creates a + // PayloadVerifier with that public key for signature verification. + static std::unique_ptr CreateInstance( + const std::string& pem_public_key); + + // Interprets |signature_proto| as a protocol buffer containing the + // |Signatures| message and decrypts each signature data using the stored + // public key. Pads the 32 bytes |sha256_hash_data| to 256 or 512 bytes + // according to the PKCS#1 v1.5 standard; and returns whether *any* of the + // decrypted hashes matches the padded hash data. In case of any error parsing + // the signatures, returns false. + bool VerifySignature(const std::string& signature_proto, + const brillo::Blob& sha256_hash_data) const; + + // Verifies if |sig_data| is a raw signature of the hash |sha256_hash_data|. + // If PayloadVerifier is using RSA as the public key, further puts the + // decrypted data of |sig_data| into |decrypted_sig_data|. + bool VerifyRawSignature(const brillo::Blob& sig_data, + const brillo::Blob& sha256_hash_data, + brillo::Blob* decrypted_sig_data) const; + private: - // This should never be constructed - DISALLOW_IMPLICIT_CONSTRUCTORS(PayloadVerifier); + explicit PayloadVerifier( + std::unique_ptr&& public_key) + : public_key_(std::move(public_key)) {} + + // Decrypts |sig_data| with the given |public_key| and populates + // |out_hash_data| with the decoded raw hash. Returns true if successful, + // false otherwise. + bool GetRawHashFromSignature(const brillo::Blob& sig_data, + const EVP_PKEY* public_key, + brillo::Blob* out_hash_data) const; + + std::unique_ptr public_key_{nullptr, + nullptr}; }; } // namespace chromeos_update_engine diff --git a/payload_generator/payload_signer.cc b/payload_generator/payload_signer.cc index 9739052f..3c9ce954 100644 --- a/payload_generator/payload_signer.cc +++ b/payload_generator/payload_signer.cc @@ -18,6 +18,7 @@ #include +#include #include #include @@ -255,14 +256,18 @@ bool PayloadSigner::VerifySignedPayload(const string& payload_path, string public_key; TEST_AND_RETURN_FALSE(utils::ReadFile(public_key_path, &public_key)); TEST_AND_RETURN_FALSE(payload_hash.size() == kSHA256Size); + + auto payload_verifier = PayloadVerifier::CreateInstance(public_key); + TEST_AND_RETURN_FALSE(payload_verifier != nullptr); + TEST_AND_RETURN_FALSE( - PayloadVerifier::VerifySignature(signature, public_key, payload_hash)); + payload_verifier->VerifySignature(signature, payload_hash)); if (metadata_signature_size) { signature.assign(payload.begin() + metadata_size, payload.begin() + metadata_size + metadata_signature_size); TEST_AND_RETURN_FALSE(metadata_hash.size() == kSHA256Size); TEST_AND_RETURN_FALSE( - PayloadVerifier::VerifySignature(signature, public_key, metadata_hash)); + payload_verifier->VerifySignature(signature, metadata_hash)); } return true; } @@ -280,27 +285,39 @@ bool PayloadSigner::SignHash(const brillo::Blob& hash, FILE* fprikey = fopen(private_key_path.c_str(), "rb"); TEST_AND_RETURN_FALSE(fprikey != nullptr); - RSA* rsa = PEM_read_RSAPrivateKey(fprikey, nullptr, nullptr, nullptr); + + std::unique_ptr private_key( + PEM_read_PrivateKey(fprikey, nullptr, nullptr, nullptr), EVP_PKEY_free); fclose(fprikey); - TEST_AND_RETURN_FALSE(rsa != nullptr); - - brillo::Blob padded_hash = hash; - PayloadVerifier::PadRSASHA256Hash(&padded_hash, RSA_size(rsa)); - - brillo::Blob signature(RSA_size(rsa)); - ssize_t signature_size = RSA_private_encrypt(padded_hash.size(), - padded_hash.data(), - signature.data(), - rsa, - RSA_NO_PADDING); - RSA_free(rsa); - if (signature_size < 0) { - LOG(ERROR) << "Signing hash failed: " - << ERR_error_string(ERR_get_error(), nullptr); + TEST_AND_RETURN_FALSE(private_key != nullptr); + + int key_type = EVP_PKEY_id(private_key.get()); + brillo::Blob signature; + if (key_type == EVP_PKEY_RSA) { + RSA* rsa = EVP_PKEY_get0_RSA(private_key.get()); + TEST_AND_RETURN_FALSE(rsa != nullptr); + + brillo::Blob padded_hash = hash; + PayloadVerifier::PadRSASHA256Hash(&padded_hash, RSA_size(rsa)); + + signature.resize(RSA_size(rsa)); + ssize_t signature_size = RSA_private_encrypt(padded_hash.size(), + padded_hash.data(), + signature.data(), + rsa, + RSA_NO_PADDING); + + if (signature_size < 0) { + LOG(ERROR) << "Signing hash failed: " + << ERR_error_string(ERR_get_error(), nullptr); + return false; + } + TEST_AND_RETURN_FALSE(static_cast(signature_size) == + signature.size()); + } else { + LOG(ERROR) << "key_type " << key_type << " isn't supported for signing"; return false; } - TEST_AND_RETURN_FALSE(static_cast(signature_size) == - signature.size()); out_signature->swap(signature); return true; } diff --git a/payload_generator/payload_signer_unittest.cc b/payload_generator/payload_signer_unittest.cc index eaf87768..457b34cd 100644 --- a/payload_generator/payload_signer_unittest.cc +++ b/payload_generator/payload_signer_unittest.cc @@ -131,19 +131,15 @@ TEST_F(PayloadSignerTest, VerifyAllSignatureTest) { GetBuildArtifactsPath(kUnittestPrivateKeyRSA4096Path)}); // Either public key should pass the verification. - string public_key; - EXPECT_TRUE(utils::ReadFile(GetBuildArtifactsPath(kUnittestPublicKeyPath), - &public_key)); - EXPECT_TRUE( - PayloadVerifier::VerifySignature(signature, public_key, hash_data_)); - EXPECT_TRUE(utils::ReadFile(GetBuildArtifactsPath(kUnittestPublicKey2Path), - &public_key)); - EXPECT_TRUE( - PayloadVerifier::VerifySignature(signature, public_key, hash_data_)); - EXPECT_TRUE(utils::ReadFile( - GetBuildArtifactsPath(kUnittestPublicKeyRSA4096Path), &public_key)); - EXPECT_TRUE( - PayloadVerifier::VerifySignature(signature, public_key, hash_data_)); + for (const auto& path : {kUnittestPublicKeyPath, + kUnittestPublicKey2Path, + kUnittestPublicKeyRSA4096Path}) { + string public_key; + EXPECT_TRUE(utils::ReadFile(GetBuildArtifactsPath(path), &public_key)); + auto payload_verifier = PayloadVerifier::CreateInstance(public_key); + EXPECT_TRUE(payload_verifier != nullptr); + EXPECT_TRUE(payload_verifier->VerifySignature(signature, hash_data_)); + } } TEST_F(PayloadSignerTest, VerifySignatureTest) { @@ -153,13 +149,17 @@ TEST_F(PayloadSignerTest, VerifySignatureTest) { string public_key; EXPECT_TRUE(utils::ReadFile(GetBuildArtifactsPath(kUnittestPublicKeyPath), &public_key)); - EXPECT_TRUE( - PayloadVerifier::VerifySignature(signature, public_key, hash_data_)); + auto payload_verifier = PayloadVerifier::CreateInstance(public_key); + EXPECT_TRUE(payload_verifier != nullptr); + EXPECT_TRUE(payload_verifier->VerifySignature(signature, hash_data_)); + // Passing the invalid key should fail the verification. + public_key.clear(); EXPECT_TRUE(utils::ReadFile(GetBuildArtifactsPath(kUnittestPublicKey2Path), &public_key)); - EXPECT_TRUE( - PayloadVerifier::VerifySignature(signature, public_key, hash_data_)); + payload_verifier = PayloadVerifier::CreateInstance(public_key); + EXPECT_TRUE(payload_verifier != nullptr); + EXPECT_FALSE(payload_verifier->VerifySignature(signature, hash_data_)); } TEST_F(PayloadSignerTest, SkipMetadataSignatureTest) { From 42132993563a200785df323460a886aae1af27ed Mon Sep 17 00:00:00 2001 From: "Kyeongkab.Nam" Date: Thu, 3 Oct 2019 18:04:02 +0900 Subject: [PATCH 119/624] Use ParcelFileDescriptor over FileDescriptor The java.io.FileDescriptor object has a poor definition of ownership, which can result in obscure use-after-close bugs. Instead, APIs should return or accept ParcelFileDescriptor instances. Bug: 130209137 Test: manual Change-Id: Iad17731f34109493fc62d0ba0941998ce3ecb98c --- binder_bindings/android/os/IUpdateEngine.aidl | 3 ++- binder_service_android.cc | 7 +++---- binder_service_android.h | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/binder_bindings/android/os/IUpdateEngine.aidl b/binder_bindings/android/os/IUpdateEngine.aidl index cde05bed..13050795 100644 --- a/binder_bindings/android/os/IUpdateEngine.aidl +++ b/binder_bindings/android/os/IUpdateEngine.aidl @@ -17,6 +17,7 @@ package android.os; import android.os.IUpdateEngineCallback; +import android.os.ParcelFileDescriptor; /** @hide */ interface IUpdateEngine { @@ -26,7 +27,7 @@ interface IUpdateEngine { in long payload_size, in String[] headerKeyValuePairs); /** @hide */ - void applyPayloadFd(in FileDescriptor fd, + void applyPayloadFd(in ParcelFileDescriptor pfd, in long payload_offset, in long payload_size, in String[] headerKeyValuePairs); diff --git a/binder_service_android.cc b/binder_service_android.cc index 1799438e..88bc1f2d 100644 --- a/binder_service_android.cc +++ b/binder_service_android.cc @@ -16,16 +16,15 @@ #include "update_engine/binder_service_android.h" -#include #include #include #include #include #include -using android::base::unique_fd; using android::binder::Status; using android::os::IUpdateEngineCallback; +using android::os::ParcelFileDescriptor; using std::string; using std::vector; using update_engine::UpdateEngineStatus; @@ -115,7 +114,7 @@ Status BinderUpdateEngineAndroidService::applyPayload( } Status BinderUpdateEngineAndroidService::applyPayloadFd( - const ::android::base::unique_fd& fd, + const ParcelFileDescriptor& pfd, int64_t payload_offset, int64_t payload_size, const vector& header_kv_pairs) { @@ -127,7 +126,7 @@ Status BinderUpdateEngineAndroidService::applyPayloadFd( brillo::ErrorPtr error; if (!service_delegate_->ApplyPayload( - fd.get(), payload_offset, payload_size, str_headers, &error)) { + pfd.get(), payload_offset, payload_size, str_headers, &error)) { return ErrorPtrToStatus(error); } return Status::ok(); diff --git a/binder_service_android.h b/binder_service_android.h index ec4a93ba..0dda93bd 100644 --- a/binder_service_android.h +++ b/binder_service_android.h @@ -54,7 +54,7 @@ class BinderUpdateEngineAndroidService : public android::os::BnUpdateEngine, int64_t payload_size, const std::vector& header_kv_pairs) override; android::binder::Status applyPayloadFd( - const ::android::base::unique_fd& fd, + const ::android::os::ParcelFileDescriptor& pfd, int64_t payload_offset, int64_t payload_size, const std::vector& header_kv_pairs) override; From 1e2573f97f2b2aafcf44e539afab4466d17e1014 Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Fri, 27 Sep 2019 13:40:53 -0700 Subject: [PATCH 120/624] Update update_metadata_pb2.py Generated with: aprotoc --python_out scripts/update_payload update_metadata.proto Test: cd system/update_engine/scripts && ./run_unittests Change-Id: I68b0659e5de7545fb0143a930859c703ec50ae13 --- scripts/update_payload/update_metadata_pb2.py | 361 ++++++++++-------- 1 file changed, 210 insertions(+), 151 deletions(-) diff --git a/scripts/update_payload/update_metadata_pb2.py b/scripts/update_payload/update_metadata_pb2.py index 7f1648b2..cb8f4c22 100644 --- a/scripts/update_payload/update_metadata_pb2.py +++ b/scripts/update_payload/update_metadata_pb2.py @@ -1,19 +1,27 @@ +# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: update_metadata.proto +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection -from google.protobuf import descriptor_pb2 +from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) +_sym_db = _symbol_database.Default() + DESCRIPTOR = _descriptor.FileDescriptor( name='update_metadata.proto', package='chromeos_update_engine', - serialized_pb='\n\x15update_metadata.proto\x12\x16\x63hromeos_update_engine\"1\n\x06\x45xtent\x12\x13\n\x0bstart_block\x18\x01 \x01(\x04\x12\x12\n\nnum_blocks\x18\x02 \x01(\x04\"z\n\nSignatures\x12@\n\nsignatures\x18\x01 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x1a*\n\tSignature\x12\x0f\n\x07version\x18\x01 \x01(\r\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"+\n\rPartitionInfo\x12\x0c\n\x04size\x18\x01 \x01(\x04\x12\x0c\n\x04hash\x18\x02 \x01(\x0c\"w\n\tImageInfo\x12\r\n\x05\x62oard\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\x0f\n\x07\x63hannel\x18\x03 \x01(\t\x12\x0f\n\x07version\x18\x04 \x01(\t\x12\x15\n\rbuild_channel\x18\x05 \x01(\t\x12\x15\n\rbuild_version\x18\x06 \x01(\t\"\xe6\x03\n\x10InstallOperation\x12;\n\x04type\x18\x01 \x02(\x0e\x32-.chromeos_update_engine.InstallOperation.Type\x12\x13\n\x0b\x64\x61ta_offset\x18\x02 \x01(\x04\x12\x13\n\x0b\x64\x61ta_length\x18\x03 \x01(\x04\x12\x33\n\x0bsrc_extents\x18\x04 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\nsrc_length\x18\x05 \x01(\x04\x12\x33\n\x0b\x64st_extents\x18\x06 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\ndst_length\x18\x07 \x01(\x04\x12\x18\n\x10\x64\x61ta_sha256_hash\x18\x08 \x01(\x0c\x12\x17\n\x0fsrc_sha256_hash\x18\t \x01(\x0c\"\xa5\x01\n\x04Type\x12\x0b\n\x07REPLACE\x10\x00\x12\x0e\n\nREPLACE_BZ\x10\x01\x12\x08\n\x04MOVE\x10\x02\x12\n\n\x06\x42SDIFF\x10\x03\x12\x0f\n\x0bSOURCE_COPY\x10\x04\x12\x11\n\rSOURCE_BSDIFF\x10\x05\x12\x0e\n\nREPLACE_XZ\x10\x08\x12\x08\n\x04ZERO\x10\x06\x12\x0b\n\x07\x44ISCARD\x10\x07\x12\x11\n\rBROTLI_BSDIFF\x10\n\x12\x0c\n\x08PUFFDIFF\x10\t\"\xd7\x05\n\x0fPartitionUpdate\x12\x16\n\x0epartition_name\x18\x01 \x02(\t\x12\x17\n\x0frun_postinstall\x18\x02 \x01(\x08\x12\x18\n\x10postinstall_path\x18\x03 \x01(\t\x12\x17\n\x0f\x66ilesystem_type\x18\x04 \x01(\t\x12M\n\x17new_partition_signature\x18\x05 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x12\x41\n\x12old_partition_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x41\n\x12new_partition_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12<\n\noperations\x18\x08 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x1c\n\x14postinstall_optional\x18\t \x01(\x08\x12=\n\x15hash_tree_data_extent\x18\n \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x38\n\x10hash_tree_extent\x18\x0b \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x1b\n\x13hash_tree_algorithm\x18\x0c \x01(\t\x12\x16\n\x0ehash_tree_salt\x18\r \x01(\x0c\x12\x37\n\x0f\x66\x65\x63_data_extent\x18\x0e \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x32\n\nfec_extent\x18\x0f \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x14\n\tfec_roots\x18\x10 \x01(\r:\x01\x32\"L\n\x15\x44ynamicPartitionGroup\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\x0c\n\x04size\x18\x02 \x01(\x04\x12\x17\n\x0fpartition_names\x18\x03 \x03(\t\"Y\n\x18\x44ynamicPartitionMetadata\x12=\n\x06groups\x18\x01 \x03(\x0b\x32-.chromeos_update_engine.DynamicPartitionGroup\"\xb1\x06\n\x14\x44\x65ltaArchiveManifest\x12\x44\n\x12install_operations\x18\x01 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12K\n\x19kernel_install_operations\x18\x02 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x18\n\nblock_size\x18\x03 \x01(\r:\x04\x34\x30\x39\x36\x12\x19\n\x11signatures_offset\x18\x04 \x01(\x04\x12\x17\n\x0fsignatures_size\x18\x05 \x01(\x04\x12>\n\x0fold_kernel_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fnew_kernel_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fold_rootfs_info\x18\x08 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fnew_rootfs_info\x18\t \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x39\n\x0eold_image_info\x18\n \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x39\n\x0enew_image_info\x18\x0b \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x18\n\rminor_version\x18\x0c \x01(\r:\x01\x30\x12;\n\npartitions\x18\r \x03(\x0b\x32\'.chromeos_update_engine.PartitionUpdate\x12\x15\n\rmax_timestamp\x18\x0e \x01(\x03\x12T\n\x1a\x64ynamic_partition_metadata\x18\x0f \x01(\x0b\x32\x30.chromeos_update_engine.DynamicPartitionMetadataB\x02H\x03') + syntax='proto2', + serialized_options=_b('H\003'), + serialized_pb=_b('\n\x15update_metadata.proto\x12\x16\x63hromeos_update_engine\"1\n\x06\x45xtent\x12\x13\n\x0bstart_block\x18\x01 \x01(\x04\x12\x12\n\nnum_blocks\x18\x02 \x01(\x04\"z\n\nSignatures\x12@\n\nsignatures\x18\x01 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x1a*\n\tSignature\x12\x0f\n\x07version\x18\x01 \x01(\r\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"+\n\rPartitionInfo\x12\x0c\n\x04size\x18\x01 \x01(\x04\x12\x0c\n\x04hash\x18\x02 \x01(\x0c\"w\n\tImageInfo\x12\r\n\x05\x62oard\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\x0f\n\x07\x63hannel\x18\x03 \x01(\t\x12\x0f\n\x07version\x18\x04 \x01(\t\x12\x15\n\rbuild_channel\x18\x05 \x01(\t\x12\x15\n\rbuild_version\x18\x06 \x01(\t\"\xee\x03\n\x10InstallOperation\x12;\n\x04type\x18\x01 \x02(\x0e\x32-.chromeos_update_engine.InstallOperation.Type\x12\x13\n\x0b\x64\x61ta_offset\x18\x02 \x01(\x04\x12\x13\n\x0b\x64\x61ta_length\x18\x03 \x01(\x04\x12\x33\n\x0bsrc_extents\x18\x04 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\nsrc_length\x18\x05 \x01(\x04\x12\x33\n\x0b\x64st_extents\x18\x06 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\ndst_length\x18\x07 \x01(\x04\x12\x18\n\x10\x64\x61ta_sha256_hash\x18\x08 \x01(\x0c\x12\x17\n\x0fsrc_sha256_hash\x18\t \x01(\x0c\"\xad\x01\n\x04Type\x12\x0b\n\x07REPLACE\x10\x00\x12\x0e\n\nREPLACE_BZ\x10\x01\x12\x0c\n\x04MOVE\x10\x02\x1a\x02\x08\x01\x12\x0e\n\x06\x42SDIFF\x10\x03\x1a\x02\x08\x01\x12\x0f\n\x0bSOURCE_COPY\x10\x04\x12\x11\n\rSOURCE_BSDIFF\x10\x05\x12\x0e\n\nREPLACE_XZ\x10\x08\x12\x08\n\x04ZERO\x10\x06\x12\x0b\n\x07\x44ISCARD\x10\x07\x12\x11\n\rBROTLI_BSDIFF\x10\n\x12\x0c\n\x08PUFFDIFF\x10\t\"\xd7\x05\n\x0fPartitionUpdate\x12\x16\n\x0epartition_name\x18\x01 \x02(\t\x12\x17\n\x0frun_postinstall\x18\x02 \x01(\x08\x12\x18\n\x10postinstall_path\x18\x03 \x01(\t\x12\x17\n\x0f\x66ilesystem_type\x18\x04 \x01(\t\x12M\n\x17new_partition_signature\x18\x05 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x12\x41\n\x12old_partition_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x41\n\x12new_partition_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12<\n\noperations\x18\x08 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x1c\n\x14postinstall_optional\x18\t \x01(\x08\x12=\n\x15hash_tree_data_extent\x18\n \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x38\n\x10hash_tree_extent\x18\x0b \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x1b\n\x13hash_tree_algorithm\x18\x0c \x01(\t\x12\x16\n\x0ehash_tree_salt\x18\r \x01(\x0c\x12\x37\n\x0f\x66\x65\x63_data_extent\x18\x0e \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x32\n\nfec_extent\x18\x0f \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x14\n\tfec_roots\x18\x10 \x01(\r:\x01\x32\"L\n\x15\x44ynamicPartitionGroup\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\x0c\n\x04size\x18\x02 \x01(\x04\x12\x17\n\x0fpartition_names\x18\x03 \x03(\t\"s\n\x18\x44ynamicPartitionMetadata\x12=\n\x06groups\x18\x01 \x03(\x0b\x32-.chromeos_update_engine.DynamicPartitionGroup\x12\x18\n\x10snapshot_enabled\x18\x02 \x01(\x08\"\xb1\x06\n\x14\x44\x65ltaArchiveManifest\x12\x44\n\x12install_operations\x18\x01 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12K\n\x19kernel_install_operations\x18\x02 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x18\n\nblock_size\x18\x03 \x01(\r:\x04\x34\x30\x39\x36\x12\x19\n\x11signatures_offset\x18\x04 \x01(\x04\x12\x17\n\x0fsignatures_size\x18\x05 \x01(\x04\x12>\n\x0fold_kernel_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fnew_kernel_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fold_rootfs_info\x18\x08 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fnew_rootfs_info\x18\t \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x39\n\x0eold_image_info\x18\n \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x39\n\x0enew_image_info\x18\x0b \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x18\n\rminor_version\x18\x0c \x01(\r:\x01\x30\x12;\n\npartitions\x18\r \x03(\x0b\x32\'.chromeos_update_engine.PartitionUpdate\x12\x15\n\rmax_timestamp\x18\x0e \x01(\x03\x12T\n\x1a\x64ynamic_partition_metadata\x18\x0f \x01(\x0b\x32\x30.chromeos_update_engine.DynamicPartitionMetadataB\x02H\x03') +) @@ -25,54 +33,55 @@ values=[ _descriptor.EnumValueDescriptor( name='REPLACE', index=0, number=0, - options=None, + serialized_options=None, type=None), _descriptor.EnumValueDescriptor( name='REPLACE_BZ', index=1, number=1, - options=None, + serialized_options=None, type=None), _descriptor.EnumValueDescriptor( name='MOVE', index=2, number=2, - options=None, + serialized_options=_b('\010\001'), type=None), _descriptor.EnumValueDescriptor( name='BSDIFF', index=3, number=3, - options=None, + serialized_options=_b('\010\001'), type=None), _descriptor.EnumValueDescriptor( name='SOURCE_COPY', index=4, number=4, - options=None, + serialized_options=None, type=None), _descriptor.EnumValueDescriptor( name='SOURCE_BSDIFF', index=5, number=5, - options=None, + serialized_options=None, type=None), _descriptor.EnumValueDescriptor( name='REPLACE_XZ', index=6, number=8, - options=None, + serialized_options=None, type=None), _descriptor.EnumValueDescriptor( name='ZERO', index=7, number=6, - options=None, + serialized_options=None, type=None), _descriptor.EnumValueDescriptor( name='DISCARD', index=8, number=7, - options=None, + serialized_options=None, type=None), _descriptor.EnumValueDescriptor( name='BROTLI_BSDIFF', index=9, number=10, - options=None, + serialized_options=None, type=None), _descriptor.EnumValueDescriptor( name='PUFFDIFF', index=10, number=9, - options=None, + serialized_options=None, type=None), ], containing_type=None, - options=None, + serialized_options=None, serialized_start=712, - serialized_end=877, + serialized_end=885, ) +_sym_db.RegisterEnumDescriptor(_INSTALLOPERATION_TYPE) _EXTENT = _descriptor.Descriptor( @@ -88,23 +97,26 @@ has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='num_blocks', full_name='chromeos_update_engine.Extent.num_blocks', index=1, number=2, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], - options=None, + serialized_options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], + oneofs=[ + ], serialized_start=49, serialized_end=98, ) @@ -123,23 +135,26 @@ has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='data', full_name='chromeos_update_engine.Signatures.Signature.data', index=1, number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value="", + has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], - options=None, + serialized_options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], + oneofs=[ + ], serialized_start=180, serialized_end=222, ) @@ -157,16 +172,19 @@ has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[_SIGNATURES_SIGNATURE, ], enum_types=[ ], - options=None, + serialized_options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], + oneofs=[ + ], serialized_start=100, serialized_end=222, ) @@ -185,23 +203,26 @@ has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='hash', full_name='chromeos_update_engine.PartitionInfo.hash', index=1, number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value="", + has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], - options=None, + serialized_options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], + oneofs=[ + ], serialized_start=224, serialized_end=267, ) @@ -217,54 +238,57 @@ _descriptor.FieldDescriptor( name='board', full_name='chromeos_update_engine.ImageInfo.board', index=0, number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='key', full_name='chromeos_update_engine.ImageInfo.key', index=1, number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='channel', full_name='chromeos_update_engine.ImageInfo.channel', index=2, number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='version', full_name='chromeos_update_engine.ImageInfo.version', index=3, number=4, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='build_channel', full_name='chromeos_update_engine.ImageInfo.build_channel', index=4, number=5, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='build_version', full_name='chromeos_update_engine.ImageInfo.build_version', index=5, number=6, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], - options=None, + serialized_options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], + oneofs=[ + ], serialized_start=269, serialized_end=388, ) @@ -283,63 +307,63 @@ has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='data_offset', full_name='chromeos_update_engine.InstallOperation.data_offset', index=1, number=2, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='data_length', full_name='chromeos_update_engine.InstallOperation.data_length', index=2, number=3, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='src_extents', full_name='chromeos_update_engine.InstallOperation.src_extents', index=3, number=4, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='src_length', full_name='chromeos_update_engine.InstallOperation.src_length', index=4, number=5, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='dst_extents', full_name='chromeos_update_engine.InstallOperation.dst_extents', index=5, number=6, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='dst_length', full_name='chromeos_update_engine.InstallOperation.dst_length', index=6, number=7, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='data_sha256_hash', full_name='chromeos_update_engine.InstallOperation.data_sha256_hash', index=7, number=8, type=12, cpp_type=9, label=1, - has_default_value=False, default_value="", + has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='src_sha256_hash', full_name='chromeos_update_engine.InstallOperation.src_sha256_hash', index=8, number=9, type=12, cpp_type=9, label=1, - has_default_value=False, default_value="", + has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], @@ -347,11 +371,14 @@ enum_types=[ _INSTALLOPERATION_TYPE, ], - options=None, + serialized_options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], + oneofs=[ + ], serialized_start=391, - serialized_end=877, + serialized_end=885, ) @@ -365,126 +392,129 @@ _descriptor.FieldDescriptor( name='partition_name', full_name='chromeos_update_engine.PartitionUpdate.partition_name', index=0, number=1, type=9, cpp_type=9, label=2, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='run_postinstall', full_name='chromeos_update_engine.PartitionUpdate.run_postinstall', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='postinstall_path', full_name='chromeos_update_engine.PartitionUpdate.postinstall_path', index=2, number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='filesystem_type', full_name='chromeos_update_engine.PartitionUpdate.filesystem_type', index=3, number=4, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='new_partition_signature', full_name='chromeos_update_engine.PartitionUpdate.new_partition_signature', index=4, number=5, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='old_partition_info', full_name='chromeos_update_engine.PartitionUpdate.old_partition_info', index=5, number=6, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='new_partition_info', full_name='chromeos_update_engine.PartitionUpdate.new_partition_info', index=6, number=7, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='operations', full_name='chromeos_update_engine.PartitionUpdate.operations', index=7, number=8, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='postinstall_optional', full_name='chromeos_update_engine.PartitionUpdate.postinstall_optional', index=8, number=9, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='hash_tree_data_extent', full_name='chromeos_update_engine.PartitionUpdate.hash_tree_data_extent', index=9, number=10, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='hash_tree_extent', full_name='chromeos_update_engine.PartitionUpdate.hash_tree_extent', index=10, number=11, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='hash_tree_algorithm', full_name='chromeos_update_engine.PartitionUpdate.hash_tree_algorithm', index=11, number=12, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='hash_tree_salt', full_name='chromeos_update_engine.PartitionUpdate.hash_tree_salt', index=12, number=13, type=12, cpp_type=9, label=1, - has_default_value=False, default_value="", + has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='fec_data_extent', full_name='chromeos_update_engine.PartitionUpdate.fec_data_extent', index=13, number=14, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='fec_extent', full_name='chromeos_update_engine.PartitionUpdate.fec_extent', index=14, number=15, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='fec_roots', full_name='chromeos_update_engine.PartitionUpdate.fec_roots', index=15, number=16, type=13, cpp_type=3, label=1, has_default_value=True, default_value=2, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], - options=None, + serialized_options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=880, - serialized_end=1607, + oneofs=[ + ], + serialized_start=888, + serialized_end=1615, ) @@ -498,35 +528,38 @@ _descriptor.FieldDescriptor( name='name', full_name='chromeos_update_engine.DynamicPartitionGroup.name', index=0, number=1, type=9, cpp_type=9, label=2, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='size', full_name='chromeos_update_engine.DynamicPartitionGroup.size', index=1, number=2, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='partition_names', full_name='chromeos_update_engine.DynamicPartitionGroup.partition_names', index=2, number=3, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], - options=None, + serialized_options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=1609, - serialized_end=1685, + oneofs=[ + ], + serialized_start=1617, + serialized_end=1693, ) @@ -543,18 +576,28 @@ has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='snapshot_enabled', full_name='chromeos_update_engine.DynamicPartitionMetadata.snapshot_enabled', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], - options=None, + serialized_options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=1687, - serialized_end=1776, + oneofs=[ + ], + serialized_start=1695, + serialized_end=1810, ) @@ -571,124 +614,127 @@ has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='kernel_install_operations', full_name='chromeos_update_engine.DeltaArchiveManifest.kernel_install_operations', index=1, number=2, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='block_size', full_name='chromeos_update_engine.DeltaArchiveManifest.block_size', index=2, number=3, type=13, cpp_type=3, label=1, has_default_value=True, default_value=4096, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='signatures_offset', full_name='chromeos_update_engine.DeltaArchiveManifest.signatures_offset', index=3, number=4, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='signatures_size', full_name='chromeos_update_engine.DeltaArchiveManifest.signatures_size', index=4, number=5, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='old_kernel_info', full_name='chromeos_update_engine.DeltaArchiveManifest.old_kernel_info', index=5, number=6, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='new_kernel_info', full_name='chromeos_update_engine.DeltaArchiveManifest.new_kernel_info', index=6, number=7, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='old_rootfs_info', full_name='chromeos_update_engine.DeltaArchiveManifest.old_rootfs_info', index=7, number=8, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='new_rootfs_info', full_name='chromeos_update_engine.DeltaArchiveManifest.new_rootfs_info', index=8, number=9, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='old_image_info', full_name='chromeos_update_engine.DeltaArchiveManifest.old_image_info', index=9, number=10, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='new_image_info', full_name='chromeos_update_engine.DeltaArchiveManifest.new_image_info', index=10, number=11, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='minor_version', full_name='chromeos_update_engine.DeltaArchiveManifest.minor_version', index=11, number=12, type=13, cpp_type=3, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='partitions', full_name='chromeos_update_engine.DeltaArchiveManifest.partitions', index=12, number=13, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='max_timestamp', full_name='chromeos_update_engine.DeltaArchiveManifest.max_timestamp', index=13, number=14, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='dynamic_partition_metadata', full_name='chromeos_update_engine.DeltaArchiveManifest.dynamic_partition_metadata', index=14, number=15, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], - options=None, + serialized_options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=1779, - serialized_end=2596, + oneofs=[ + ], + serialized_start=1813, + serialized_end=2630, ) -_SIGNATURES_SIGNATURE.containing_type = _SIGNATURES; +_SIGNATURES_SIGNATURE.containing_type = _SIGNATURES _SIGNATURES.fields_by_name['signatures'].message_type = _SIGNATURES_SIGNATURE _INSTALLOPERATION.fields_by_name['type'].enum_type = _INSTALLOPERATION_TYPE _INSTALLOPERATION.fields_by_name['src_extents'].message_type = _EXTENT _INSTALLOPERATION.fields_by_name['dst_extents'].message_type = _EXTENT -_INSTALLOPERATION_TYPE.containing_type = _INSTALLOPERATION; +_INSTALLOPERATION_TYPE.containing_type = _INSTALLOPERATION _PARTITIONUPDATE.fields_by_name['new_partition_signature'].message_type = _SIGNATURES_SIGNATURE _PARTITIONUPDATE.fields_by_name['old_partition_info'].message_type = _PARTITIONINFO _PARTITIONUPDATE.fields_by_name['new_partition_info'].message_type = _PARTITIONINFO @@ -717,68 +763,81 @@ DESCRIPTOR.message_types_by_name['DynamicPartitionGroup'] = _DYNAMICPARTITIONGROUP DESCRIPTOR.message_types_by_name['DynamicPartitionMetadata'] = _DYNAMICPARTITIONMETADATA DESCRIPTOR.message_types_by_name['DeltaArchiveManifest'] = _DELTAARCHIVEMANIFEST +_sym_db.RegisterFileDescriptor(DESCRIPTOR) -class Extent(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType - DESCRIPTOR = _EXTENT - +Extent = _reflection.GeneratedProtocolMessageType('Extent', (_message.Message,), { + 'DESCRIPTOR' : _EXTENT, + '__module__' : 'update_metadata_pb2' # @@protoc_insertion_point(class_scope:chromeos_update_engine.Extent) + }) +_sym_db.RegisterMessage(Extent) -class Signatures(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType - - class Signature(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType - DESCRIPTOR = _SIGNATURES_SIGNATURE +Signatures = _reflection.GeneratedProtocolMessageType('Signatures', (_message.Message,), { + 'Signature' : _reflection.GeneratedProtocolMessageType('Signature', (_message.Message,), { + 'DESCRIPTOR' : _SIGNATURES_SIGNATURE, + '__module__' : 'update_metadata_pb2' # @@protoc_insertion_point(class_scope:chromeos_update_engine.Signatures.Signature) - DESCRIPTOR = _SIGNATURES - + }) + , + 'DESCRIPTOR' : _SIGNATURES, + '__module__' : 'update_metadata_pb2' # @@protoc_insertion_point(class_scope:chromeos_update_engine.Signatures) + }) +_sym_db.RegisterMessage(Signatures) +_sym_db.RegisterMessage(Signatures.Signature) -class PartitionInfo(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType - DESCRIPTOR = _PARTITIONINFO - +PartitionInfo = _reflection.GeneratedProtocolMessageType('PartitionInfo', (_message.Message,), { + 'DESCRIPTOR' : _PARTITIONINFO, + '__module__' : 'update_metadata_pb2' # @@protoc_insertion_point(class_scope:chromeos_update_engine.PartitionInfo) + }) +_sym_db.RegisterMessage(PartitionInfo) -class ImageInfo(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType - DESCRIPTOR = _IMAGEINFO - +ImageInfo = _reflection.GeneratedProtocolMessageType('ImageInfo', (_message.Message,), { + 'DESCRIPTOR' : _IMAGEINFO, + '__module__' : 'update_metadata_pb2' # @@protoc_insertion_point(class_scope:chromeos_update_engine.ImageInfo) + }) +_sym_db.RegisterMessage(ImageInfo) -class InstallOperation(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType - DESCRIPTOR = _INSTALLOPERATION - +InstallOperation = _reflection.GeneratedProtocolMessageType('InstallOperation', (_message.Message,), { + 'DESCRIPTOR' : _INSTALLOPERATION, + '__module__' : 'update_metadata_pb2' # @@protoc_insertion_point(class_scope:chromeos_update_engine.InstallOperation) + }) +_sym_db.RegisterMessage(InstallOperation) -class PartitionUpdate(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType - DESCRIPTOR = _PARTITIONUPDATE - +PartitionUpdate = _reflection.GeneratedProtocolMessageType('PartitionUpdate', (_message.Message,), { + 'DESCRIPTOR' : _PARTITIONUPDATE, + '__module__' : 'update_metadata_pb2' # @@protoc_insertion_point(class_scope:chromeos_update_engine.PartitionUpdate) + }) +_sym_db.RegisterMessage(PartitionUpdate) -class DynamicPartitionGroup(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType - DESCRIPTOR = _DYNAMICPARTITIONGROUP - +DynamicPartitionGroup = _reflection.GeneratedProtocolMessageType('DynamicPartitionGroup', (_message.Message,), { + 'DESCRIPTOR' : _DYNAMICPARTITIONGROUP, + '__module__' : 'update_metadata_pb2' # @@protoc_insertion_point(class_scope:chromeos_update_engine.DynamicPartitionGroup) + }) +_sym_db.RegisterMessage(DynamicPartitionGroup) -class DynamicPartitionMetadata(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType - DESCRIPTOR = _DYNAMICPARTITIONMETADATA - +DynamicPartitionMetadata = _reflection.GeneratedProtocolMessageType('DynamicPartitionMetadata', (_message.Message,), { + 'DESCRIPTOR' : _DYNAMICPARTITIONMETADATA, + '__module__' : 'update_metadata_pb2' # @@protoc_insertion_point(class_scope:chromeos_update_engine.DynamicPartitionMetadata) + }) +_sym_db.RegisterMessage(DynamicPartitionMetadata) -class DeltaArchiveManifest(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType - DESCRIPTOR = _DELTAARCHIVEMANIFEST - +DeltaArchiveManifest = _reflection.GeneratedProtocolMessageType('DeltaArchiveManifest', (_message.Message,), { + 'DESCRIPTOR' : _DELTAARCHIVEMANIFEST, + '__module__' : 'update_metadata_pb2' # @@protoc_insertion_point(class_scope:chromeos_update_engine.DeltaArchiveManifest) + }) +_sym_db.RegisterMessage(DeltaArchiveManifest) -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), 'H\003') +DESCRIPTOR._options = None +_INSTALLOPERATION_TYPE.values_by_name["MOVE"]._options = None +_INSTALLOPERATION_TYPE.values_by_name["BSDIFF"]._options = None # @@protoc_insertion_point(module_scope) From 7bbe015a1bd1cbee5e2cdb0b297aec15b40cc03e Mon Sep 17 00:00:00 2001 From: Tianjie Xu Date: Wed, 9 Oct 2019 18:11:15 -0700 Subject: [PATCH 121/624] Add EC key signing support The DER encoded signature size of ECDSA with P-256 NIST CURVE is nondeterministic for different input of sha256 hash. For example, the signature size can be 70, 71, 72 bytes with the maximum possible size of 72 bytes. However, we need the size of the serialized signatures protobuf string to be fixed before signing; because the size is part of the content to be signed. To achieve that, we can add padding to the signature; and update the definition of the signature proto to include the unpadded signature size. message Signatures { message Signature { optional uint32 version = 1; optional bytes data = 2; optional fixed32 unpadded_signature_size = 3; } repeated Signature signatures = 1; } Therefore the payload verifier will read the unpadded signature and use it to verify against the public keys. For RSA signatures, the signature data already has the correct size. So the legacy update_engine on the old devices will still be able to verify these signatures in new proto format. We also need to update the version in signature proto, and the minor version of update_engine. The EC key in the unittest is generated with the command: openssl ecparam -name prime256v1 -genkey -noout -out prime256v1-key.pem openssl pkey -in prime256v1-key.pem -out unittest_key_EC.pem Bug: 141244025 Test: unit tests pass, sign a package with EC key and and install on sailfish Change-Id: I0a16c9f2f2c7fe9ccc1070c87fbbd6b94bc1f542 --- Android.bp | 6 +- .../delta_performer_integration_test.cc | 211 +++++++++++------- payload_consumer/payload_verifier.cc | 51 ++++- payload_generator/generate_delta_main.cc | 6 +- payload_generator/payload_signer.cc | 109 +++++++-- payload_generator/payload_signer.h | 8 + payload_generator/payload_signer_unittest.cc | 9 +- unittest_key_EC.pem | 5 + update_metadata.proto | 11 +- 9 files changed, 295 insertions(+), 121 deletions(-) create mode 100644 unittest_key_EC.pem diff --git a/Android.bp b/Android.bp index 1be0d630..e5e592c4 100644 --- a/Android.bp +++ b/Android.bp @@ -598,16 +598,19 @@ genrule { name: "ue_unittest_keys", cmd: "openssl rsa -in $(location unittest_key.pem) -pubout -out $(location unittest_key.pub.pem) &&" + "openssl rsa -in $(location unittest_key2.pem) -pubout -out $(location unittest_key2.pub.pem) &&" + - "openssl rsa -in $(location unittest_key_RSA4096.pem) -pubout -out $(location unittest_key_RSA4096.pub.pem)", + "openssl rsa -in $(location unittest_key_RSA4096.pem) -pubout -out $(location unittest_key_RSA4096.pub.pem) &&" + + "openssl pkey -in $(location unittest_key_EC.pem) -pubout -out $(location unittest_key_EC.pub.pem)", srcs: [ "unittest_key.pem", "unittest_key2.pem", "unittest_key_RSA4096.pem", + "unittest_key_EC.pem", ], out: [ "unittest_key.pub.pem", "unittest_key2.pub.pem", "unittest_key_RSA4096.pub.pem", + "unittest_key_EC.pub.pem", ], } @@ -659,6 +662,7 @@ cc_test { "unittest_key.pem", "unittest_key2.pem", "unittest_key_RSA4096.pem", + "unittest_key_EC.pem", "update_engine.conf", ], diff --git a/payload_consumer/delta_performer_integration_test.cc b/payload_consumer/delta_performer_integration_test.cc index 38494f21..28c11b67 100644 --- a/payload_consumer/delta_performer_integration_test.cc +++ b/payload_consumer/delta_performer_integration_test.cc @@ -60,6 +60,8 @@ extern const char* kUnittestPrivateKeyPath; extern const char* kUnittestPublicKeyPath; extern const char* kUnittestPrivateKey2Path; extern const char* kUnittestPublicKey2Path; +extern const char* kUnittestPrivateKeyECPath; +extern const char* kUnittestPublicKeyECPath; static const uint32_t kDefaultKernelSize = 4096; // Something small for a test // clang-format off @@ -107,6 +109,7 @@ enum SignatureTest { kSignatureGeneratedPlaceholder, // Insert placeholder signatures, then real. kSignatureGeneratedPlaceholderMismatch, // Insert a wrong sized placeholder. kSignatureGeneratedShell, // Sign the generated payload through shell cmds. + kSignatureGeneratedShellECKey, // Sign with a EC key through shell cmds. kSignatureGeneratedShellBadKey, // Sign with a bad key through shell cmds. kSignatureGeneratedShellRotateCl1, // Rotate key, test client v1 kSignatureGeneratedShellRotateCl2, // Rotate key, test client v2 @@ -164,53 +167,127 @@ static bool WriteByteAtOffset(const string& path, off_t offset) { return true; } -static size_t GetSignatureSize(const string& private_key_path) { - const brillo::Blob data(1, 'x'); - brillo::Blob hash; - EXPECT_TRUE(HashCalculator::RawHashOfData(data, &hash)); - brillo::Blob signature; - EXPECT_TRUE(PayloadSigner::SignHash(hash, private_key_path, &signature)); - return signature.size(); -} - static bool InsertSignaturePlaceholder(size_t signature_size, const string& payload_path, uint64_t* out_metadata_size) { vector signatures; signatures.push_back(brillo::Blob(signature_size, 0)); - return PayloadSigner::AddSignatureToPayload( - payload_path, signatures, {}, payload_path, out_metadata_size); + return PayloadSigner::AddSignatureToPayload(payload_path, + {signature_size}, + signatures, + {}, + payload_path, + out_metadata_size); } static void SignGeneratedPayload(const string& payload_path, uint64_t* out_metadata_size) { string private_key_path = GetBuildArtifactsPath(kUnittestPrivateKeyPath); - size_t signature_size = GetSignatureSize(private_key_path); + size_t signature_size; + ASSERT_TRUE(PayloadSigner::GetMaximumSignatureSize(private_key_path, + &signature_size)); brillo::Blob hash; ASSERT_TRUE(PayloadSigner::HashPayloadForSigning( payload_path, {signature_size}, &hash, nullptr)); brillo::Blob signature; ASSERT_TRUE(PayloadSigner::SignHash(hash, private_key_path, &signature)); - ASSERT_TRUE(PayloadSigner::AddSignatureToPayload( - payload_path, {signature}, {}, payload_path, out_metadata_size)); + ASSERT_TRUE(PayloadSigner::AddSignatureToPayload(payload_path, + {signature_size}, + {signature}, + {}, + payload_path, + out_metadata_size)); EXPECT_TRUE(PayloadSigner::VerifySignedPayload( payload_path, GetBuildArtifactsPath(kUnittestPublicKeyPath))); } +static void SignGeneratedShellPayloadWithKeys( + const string& payload_path, + const vector& private_key_paths, + const string& public_key_path, + bool verification_success) { + vector signature_size_strings; + for (const auto& key_path : private_key_paths) { + size_t signature_size; + ASSERT_TRUE( + PayloadSigner::GetMaximumSignatureSize(key_path, &signature_size)); + signature_size_strings.push_back(base::StringPrintf("%zu", signature_size)); + } + string signature_size_string = base::JoinString(signature_size_strings, ":"); + + test_utils::ScopedTempFile hash_file("hash.XXXXXX"); + string delta_generator_path = GetBuildArtifactsPath("delta_generator"); + ASSERT_EQ(0, + System(base::StringPrintf( + "%s -in_file=%s -signature_size=%s -out_hash_file=%s", + delta_generator_path.c_str(), + payload_path.c_str(), + signature_size_string.c_str(), + hash_file.path().c_str()))); + + // Sign the hash with all private keys. + vector sig_files; + vector sig_file_paths; + for (const auto& key_path : private_key_paths) { + brillo::Blob hash, signature; + ASSERT_TRUE(utils::ReadFile(hash_file.path(), &hash)); + ASSERT_TRUE(PayloadSigner::SignHash(hash, key_path, &signature)); + + test_utils::ScopedTempFile sig_file("signature.XXXXXX"); + ASSERT_TRUE(test_utils::WriteFileVector(sig_file.path(), signature)); + sig_file_paths.push_back(sig_file.path()); + sig_files.push_back(std::move(sig_file)); + } + string sig_files_string = base::JoinString(sig_file_paths, ":"); + + // Add the signature to the payload. + ASSERT_EQ(0, + System(base::StringPrintf("%s --signature_size=%s -in_file=%s " + "-payload_signature_file=%s -out_file=%s", + delta_generator_path.c_str(), + signature_size_string.c_str(), + payload_path.c_str(), + sig_files_string.c_str(), + payload_path.c_str()))); + + int verify_result = System(base::StringPrintf("%s -in_file=%s -public_key=%s", + delta_generator_path.c_str(), + payload_path.c_str(), + public_key_path.c_str())); + + if (verification_success) { + ASSERT_EQ(0, verify_result); + } else { + ASSERT_NE(0, verify_result); + } +} + static void SignGeneratedShellPayload(SignatureTest signature_test, const string& payload_path) { - string private_key_path = GetBuildArtifactsPath(kUnittestPrivateKeyPath); + vector supported_test = { + kSignatureGeneratedShell, + kSignatureGeneratedShellBadKey, + kSignatureGeneratedShellECKey, + kSignatureGeneratedShellRotateCl1, + kSignatureGeneratedShellRotateCl2, + }; + ASSERT_TRUE(std::find(supported_test.begin(), + supported_test.end(), + signature_test) != supported_test.end()); + + string private_key_path; if (signature_test == kSignatureGeneratedShellBadKey) { ASSERT_TRUE(utils::MakeTempFile("key.XXXXXX", &private_key_path, nullptr)); + } else if (signature_test == kSignatureGeneratedShellECKey) { + private_key_path = GetBuildArtifactsPath(kUnittestPrivateKeyECPath); } else { - ASSERT_TRUE(signature_test == kSignatureGeneratedShell || - signature_test == kSignatureGeneratedShellRotateCl1 || - signature_test == kSignatureGeneratedShellRotateCl2); + private_key_path = GetBuildArtifactsPath(kUnittestPrivateKeyPath); } ScopedPathUnlinker key_unlinker(private_key_path); key_unlinker.set_should_remove(signature_test == kSignatureGeneratedShellBadKey); + // Generates a new private key that will not match the public key. if (signature_test == kSignatureGeneratedShellBadKey) { LOG(INFO) << "Generating a mismatched private key."; @@ -229,64 +306,26 @@ static void SignGeneratedShellPayload(SignatureTest signature_test, fclose(fprikey); RSA_free(rsa); } - size_t signature_size = GetSignatureSize(private_key_path); - test_utils::ScopedTempFile hash_file("hash.XXXXXX"); - string signature_size_string; - if (signature_test == kSignatureGeneratedShellRotateCl1 || - signature_test == kSignatureGeneratedShellRotateCl2) - signature_size_string = - base::StringPrintf("%zu:%zu", signature_size, signature_size); - else - signature_size_string = base::StringPrintf("%zu", signature_size); - string delta_generator_path = GetBuildArtifactsPath("delta_generator"); - ASSERT_EQ(0, - System(base::StringPrintf( - "%s -in_file=%s -signature_size=%s -out_hash_file=%s", - delta_generator_path.c_str(), - payload_path.c_str(), - signature_size_string.c_str(), - hash_file.path().c_str()))); - // Sign the hash - brillo::Blob hash, signature; - ASSERT_TRUE(utils::ReadFile(hash_file.path(), &hash)); - ASSERT_TRUE(PayloadSigner::SignHash(hash, private_key_path, &signature)); - - test_utils::ScopedTempFile sig_file("signature.XXXXXX"); - ASSERT_TRUE(test_utils::WriteFileVector(sig_file.path(), signature)); - string sig_files = sig_file.path(); - - test_utils::ScopedTempFile sig_file2("signature.XXXXXX"); + vector private_key_paths = {private_key_path}; if (signature_test == kSignatureGeneratedShellRotateCl1 || signature_test == kSignatureGeneratedShellRotateCl2) { - ASSERT_TRUE(PayloadSigner::SignHash( - hash, GetBuildArtifactsPath(kUnittestPrivateKey2Path), &signature)); - ASSERT_TRUE(test_utils::WriteFileVector(sig_file2.path(), signature)); - // Append second sig file to first path - sig_files += ":" + sig_file2.path(); + private_key_paths.push_back( + GetBuildArtifactsPath(kUnittestPrivateKey2Path)); } - ASSERT_EQ(0, - System(base::StringPrintf( - "%s -in_file=%s -payload_signature_file=%s -out_file=%s", - delta_generator_path.c_str(), - payload_path.c_str(), - sig_files.c_str(), - payload_path.c_str()))); - int verify_result = System(base::StringPrintf( - "%s -in_file=%s -public_key=%s -public_key_version=%d", - delta_generator_path.c_str(), - payload_path.c_str(), - (signature_test == kSignatureGeneratedShellRotateCl2 - ? GetBuildArtifactsPath(kUnittestPublicKey2Path) - : GetBuildArtifactsPath(kUnittestPublicKeyPath)) - .c_str(), - signature_test == kSignatureGeneratedShellRotateCl2 ? 2 : 1)); - if (signature_test == kSignatureGeneratedShellBadKey) { - ASSERT_NE(0, verify_result); + std::string public_key; + if (signature_test == kSignatureGeneratedShellRotateCl2) { + public_key = GetBuildArtifactsPath(kUnittestPublicKey2Path); + } else if (signature_test == kSignatureGeneratedShellECKey) { + public_key = GetBuildArtifactsPath(kUnittestPublicKeyECPath); } else { - ASSERT_EQ(0, verify_result); + public_key = GetBuildArtifactsPath(kUnittestPublicKeyPath); } + + bool verification_success = signature_test != kSignatureGeneratedShellBadKey; + SignGeneratedShellPayloadWithKeys( + payload_path, private_key_paths, public_key, verification_success); } static void GenerateDeltaFile(bool full_kernel, @@ -531,8 +570,9 @@ static void GenerateDeltaFile(bool full_kernel, if (signature_test == kSignatureGeneratedPlaceholder || signature_test == kSignatureGeneratedPlaceholderMismatch) { - size_t signature_size = - GetSignatureSize(GetBuildArtifactsPath(kUnittestPrivateKeyPath)); + size_t signature_size; + ASSERT_TRUE(PayloadSigner::GetMaximumSignatureSize( + GetBuildArtifactsPath(kUnittestPrivateKeyPath), &signature_size)); LOG(INFO) << "Inserting placeholder signature."; ASSERT_TRUE(InsertSignaturePlaceholder( signature_size, state->delta_path, &state->metadata_size)); @@ -555,6 +595,7 @@ static void GenerateDeltaFile(bool full_kernel, LOG(INFO) << "Signing payload."; SignGeneratedPayload(state->delta_path, &state->metadata_size); } else if (signature_test == kSignatureGeneratedShell || + signature_test == kSignatureGeneratedShellECKey || signature_test == kSignatureGeneratedShellBadKey || signature_test == kSignatureGeneratedShellRotateCl1 || signature_test == kSignatureGeneratedShellRotateCl2) { @@ -597,14 +638,15 @@ static void ApplyDeltaFile(bool full_kernel, else EXPECT_EQ(1, sigs_message.signatures_size()); const Signatures::Signature& signature = sigs_message.signatures(0); - EXPECT_EQ(1U, signature.version()); - uint64_t expected_sig_data_length = 0; vector key_paths{GetBuildArtifactsPath(kUnittestPrivateKeyPath)}; - if (signature_test == kSignatureGeneratedShellRotateCl1 || - signature_test == kSignatureGeneratedShellRotateCl2) { + if (signature_test == kSignatureGeneratedShellECKey) { + key_paths = {GetBuildArtifactsPath(kUnittestPrivateKeyECPath)}; + } else if (signature_test == kSignatureGeneratedShellRotateCl1 || + signature_test == kSignatureGeneratedShellRotateCl2) { key_paths.push_back(GetBuildArtifactsPath(kUnittestPrivateKey2Path)); } + uint64_t expected_sig_data_length = 0; EXPECT_TRUE(PayloadSigner::SignatureBlobLength( key_paths, &expected_sig_data_length)); EXPECT_EQ(expected_sig_data_length, manifest.signatures_size()); @@ -717,7 +759,9 @@ static void ApplyDeltaFile(bool full_kernel, ASSERT_TRUE(PayloadSigner::GetMetadataSignature( state->delta.data(), state->metadata_size, - GetBuildArtifactsPath(kUnittestPrivateKeyPath), + (signature_test == kSignatureGeneratedShellECKey) + ? GetBuildArtifactsPath(kUnittestPrivateKeyECPath) + : GetBuildArtifactsPath(kUnittestPrivateKeyPath), &install_plan->payloads[0].metadata_signature)); EXPECT_FALSE(install_plan->payloads[0].metadata_signature.empty()); @@ -728,7 +772,9 @@ static void ApplyDeltaFile(bool full_kernel, install_plan, &install_plan->payloads[0], false /* interactive */); - string public_key_path = GetBuildArtifactsPath(kUnittestPublicKeyPath); + string public_key_path = signature_test == kSignatureGeneratedShellECKey + ? GetBuildArtifactsPath(kUnittestPublicKeyECPath) + : GetBuildArtifactsPath(kUnittestPublicKeyPath); EXPECT_TRUE(utils::FileExists(public_key_path.c_str())); (*performer)->set_public_key_path(public_key_path); @@ -1059,6 +1105,17 @@ TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignGeneratedShellTest) { kInPlaceMinorPayloadVersion); } +TEST(DeltaPerformerIntegrationTest, + RunAsRootSmallImageSignGeneratedShellECKeyTest) { + DoSmallImageTest(false, + false, + false, + -1, + kSignatureGeneratedShellECKey, + false, + kInPlaceMinorPayloadVersion); +} + TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignGeneratedShellBadKeyTest) { DoSmallImageTest(false, diff --git a/payload_consumer/payload_verifier.cc b/payload_consumer/payload_verifier.cc index b2c5be47..02eeb76e 100644 --- a/payload_consumer/payload_verifier.cc +++ b/payload_consumer/payload_verifier.cc @@ -88,7 +88,19 @@ bool PayloadVerifier::VerifySignature( // Tries every signature in the signature blob. for (int i = 0; i < signatures.signatures_size(); i++) { const Signatures::Signature& signature = signatures.signatures(i); - brillo::Blob sig_data(signature.data().begin(), signature.data().end()); + brillo::Blob sig_data; + if (signature.has_unpadded_signature_size()) { + TEST_AND_RETURN_FALSE(signature.unpadded_signature_size() <= + signature.data().size()); + LOG(INFO) << "Truncating the signature to its unpadded size: " + << signature.unpadded_signature_size() << "."; + sig_data.assign( + signature.data().begin(), + signature.data().begin() + signature.unpadded_signature_size()); + } else { + sig_data.assign(signature.data().begin(), signature.data().end()); + } + brillo::Blob sig_hash_data; if (VerifyRawSignature(sig_data, sha256_hash_data, &sig_hash_data)) { LOG(INFO) << "Verified correct signature " << i + 1 << " out of " @@ -102,7 +114,7 @@ bool PayloadVerifier::VerifySignature( LOG(ERROR) << "None of the " << signatures.signatures_size() << " signatures is correct. Expected hash before padding:"; utils::HexDumpVector(sha256_hash_data); - LOG(ERROR) << "But found decrypted hashes:"; + LOG(ERROR) << "But found RSA decrypted hashes:"; for (const auto& sig_hash_data : tested_hashes) { utils::HexDumpVector(sig_hash_data); } @@ -116,20 +128,35 @@ bool PayloadVerifier::VerifyRawSignature( TEST_AND_RETURN_FALSE(public_key_ != nullptr); int key_type = EVP_PKEY_id(public_key_.get()); - TEST_AND_RETURN_FALSE(key_type == EVP_PKEY_RSA); - brillo::Blob sig_hash_data; - TEST_AND_RETURN_FALSE( - GetRawHashFromSignature(sig_data, public_key_.get(), &sig_hash_data)); + if (key_type == EVP_PKEY_RSA) { + brillo::Blob sig_hash_data; + TEST_AND_RETURN_FALSE( + GetRawHashFromSignature(sig_data, public_key_.get(), &sig_hash_data)); + + if (decrypted_sig_data != nullptr) { + *decrypted_sig_data = sig_hash_data; + } + + brillo::Blob padded_hash_data = sha256_hash_data; + TEST_AND_RETURN_FALSE( + PadRSASHA256Hash(&padded_hash_data, sig_hash_data.size())); - if (decrypted_sig_data != nullptr) { - *decrypted_sig_data = sig_hash_data; + return padded_hash_data == sig_hash_data; } - brillo::Blob padded_hash_data = sha256_hash_data; - TEST_AND_RETURN_FALSE( - PadRSASHA256Hash(&padded_hash_data, sig_hash_data.size())); + if (key_type == EVP_PKEY_EC) { + EC_KEY* ec_key = EVP_PKEY_get0_EC_KEY(public_key_.get()); + TEST_AND_RETURN_FALSE(ec_key != nullptr); + return ECDSA_verify(0, + sha256_hash_data.data(), + sha256_hash_data.size(), + sig_data.data(), + sig_data.size(), + ec_key) == 1; + } - return padded_hash_data == sig_hash_data; + LOG(ERROR) << "Unsupported key type " << key_type; + return false; } bool PayloadVerifier::GetRawHashFromSignature( diff --git a/payload_generator/generate_delta_main.cc b/payload_generator/generate_delta_main.cc index bef09bb0..1323534f 100644 --- a/payload_generator/generate_delta_main.cc +++ b/payload_generator/generate_delta_main.cc @@ -63,9 +63,6 @@ void ParseSignatureSizes(const string& signature_sizes_flag, bool parsing_successful = base::StringToSizeT(str, &size); LOG_IF(FATAL, !parsing_successful) << "Invalid signature size: " << str; - LOG_IF(FATAL, size != 256 && size != 512) - << "Only signature sizes of 256 or 512 bytes are supported."; - signature_sizes->push_back(size); } } @@ -138,6 +135,7 @@ void SignatureFileFlagToBlobs(const string& signature_file_flag, void SignPayload(const string& in_file, const string& out_file, + const vector& signature_sizes, const string& payload_signature_file, const string& metadata_signature_file, const string& out_metadata_size_file) { @@ -151,6 +149,7 @@ void SignPayload(const string& in_file, SignatureFileFlagToBlobs(metadata_signature_file, &metadata_signatures); uint64_t final_metadata_size; CHECK(PayloadSigner::AddSignatureToPayload(in_file, + signature_sizes, payload_signatures, metadata_signatures, out_file, @@ -461,6 +460,7 @@ int Main(int argc, char** argv) { if (!FLAGS_payload_signature_file.empty()) { SignPayload(FLAGS_in_file, FLAGS_out_file, + signature_sizes, FLAGS_payload_signature_file, FLAGS_metadata_signature_file, FLAGS_out_metadata_size_file); diff --git a/payload_generator/payload_signer.cc b/payload_generator/payload_signer.cc index 3c9ce954..72780b12 100644 --- a/payload_generator/payload_signer.cc +++ b/payload_generator/payload_signer.cc @@ -47,23 +47,29 @@ using std::vector; namespace chromeos_update_engine { namespace { - -// The payload verifier will check all the signatures included in the payload -// regardless of the version field. Old version of the verifier require the -// version field to be included and be 1. -const uint32_t kSignatureMessageLegacyVersion = 1; - // Given raw |signatures|, packs them into a protobuf and serializes it into a // string. Returns true on success, false otherwise. bool ConvertSignaturesToProtobuf(const vector& signatures, + const vector& padded_signature_sizes, string* out_serialized_signature) { + TEST_AND_RETURN_FALSE(signatures.size() == padded_signature_sizes.size()); // Pack it into a protobuf Signatures out_message; - for (const brillo::Blob& signature : signatures) { + for (size_t i = 0; i < signatures.size(); i++) { + const auto& signature = signatures[i]; + const auto& padded_signature_size = padded_signature_sizes[i]; + TEST_AND_RETURN_FALSE(padded_signature_size >= signature.size()); Signatures::Signature* sig_message = out_message.add_signatures(); - // Set all the signatures with the same version number. - sig_message->set_version(kSignatureMessageLegacyVersion); - sig_message->set_data(signature.data(), signature.size()); + // Skip assigning the same version number because we don't need to be + // compatible with old major version 1 client anymore. + + // TODO(Xunchang) don't need to set the unpadded_signature_size field for + // RSA key signed signatures. + sig_message->set_unpadded_signature_size(signature.size()); + brillo::Blob padded_signature = signature; + padded_signature.insert( + padded_signature.end(), padded_signature_size - signature.size(), 0); + sig_message->set_data(padded_signature.data(), padded_signature.size()); } // Serialize protobuf @@ -204,8 +210,35 @@ bool CalculateHashFromPayload(const brillo::Blob& payload, return true; } +std::unique_ptr CreatePrivateKeyFromPath( + const string& private_key_path) { + FILE* fprikey = fopen(private_key_path.c_str(), "rb"); + if (!fprikey) { + PLOG(ERROR) << "Failed to read " << private_key_path; + return {nullptr, nullptr}; + } + + auto private_key = std::unique_ptr( + PEM_read_PrivateKey(fprikey, nullptr, nullptr, nullptr), EVP_PKEY_free); + fclose(fprikey); + return private_key; +} + } // namespace +bool PayloadSigner::GetMaximumSignatureSize(const string& private_key_path, + size_t* signature_size) { + *signature_size = 0; + auto private_key = CreatePrivateKeyFromPath(private_key_path); + if (!private_key) { + LOG(ERROR) << "Failed to create private key from " << private_key_path; + return false; + } + + *signature_size = EVP_PKEY_size(private_key.get()); + return true; +} + void PayloadSigner::AddSignatureToManifest(uint64_t signature_blob_offset, uint64_t signature_blob_length, bool add_dummy_op, @@ -283,13 +316,11 @@ bool PayloadSigner::SignHash(const brillo::Blob& hash, // openssl rsautl -raw -sign -inkey |private_key_path| // -in |padded_hash| -out |out_signature| - FILE* fprikey = fopen(private_key_path.c_str(), "rb"); - TEST_AND_RETURN_FALSE(fprikey != nullptr); - - std::unique_ptr private_key( - PEM_read_PrivateKey(fprikey, nullptr, nullptr, nullptr), EVP_PKEY_free); - fclose(fprikey); - TEST_AND_RETURN_FALSE(private_key != nullptr); + auto private_key = CreatePrivateKeyFromPath(private_key_path); + if (!private_key) { + LOG(ERROR) << "Failed to create private key from " << private_key_path; + return false; + } int key_type = EVP_PKEY_id(private_key.get()); brillo::Blob signature; @@ -314,6 +345,28 @@ bool PayloadSigner::SignHash(const brillo::Blob& hash, } TEST_AND_RETURN_FALSE(static_cast(signature_size) == signature.size()); + } else if (key_type == EVP_PKEY_EC) { + EC_KEY* ec_key = EVP_PKEY_get0_EC_KEY(private_key.get()); + TEST_AND_RETURN_FALSE(ec_key != nullptr); + + signature.resize(ECDSA_size(ec_key)); + unsigned int signature_size; + if (ECDSA_sign(0, + hash.data(), + hash.size(), + signature.data(), + &signature_size, + ec_key) != 1) { + LOG(ERROR) << "Signing hash failed: " + << ERR_error_string(ERR_get_error(), nullptr); + return false; + } + + // NIST P-256 + LOG(ERROR) << "signature max size " << signature.size() << " size " + << signature_size; + TEST_AND_RETURN_FALSE(signature.size() >= signature_size); + signature.resize(signature_size); } else { LOG(ERROR) << "key_type " << key_type << " isn't supported for signing"; return false; @@ -326,13 +379,19 @@ bool PayloadSigner::SignHashWithKeys(const brillo::Blob& hash_data, const vector& private_key_paths, string* out_serialized_signature) { vector signatures; + vector padded_signature_sizes; for (const string& path : private_key_paths) { brillo::Blob signature; TEST_AND_RETURN_FALSE(SignHash(hash_data, path, &signature)); signatures.push_back(signature); + + size_t padded_signature_size; + TEST_AND_RETURN_FALSE( + GetMaximumSignatureSize(path, &padded_signature_size)); + padded_signature_sizes.push_back(padded_signature_size); } - TEST_AND_RETURN_FALSE( - ConvertSignaturesToProtobuf(signatures, out_serialized_signature)); + TEST_AND_RETURN_FALSE(ConvertSignaturesToProtobuf( + signatures, padded_signature_sizes, out_serialized_signature)); return true; } @@ -379,7 +438,8 @@ bool PayloadSigner::HashPayloadForSigning(const string& payload_path, signatures.emplace_back(signature_size, 0); } string signature; - TEST_AND_RETURN_FALSE(ConvertSignaturesToProtobuf(signatures, &signature)); + TEST_AND_RETURN_FALSE( + ConvertSignaturesToProtobuf(signatures, signature_sizes, &signature)); brillo::Blob payload; uint64_t metadata_size, signatures_offset; @@ -403,6 +463,7 @@ bool PayloadSigner::HashPayloadForSigning(const string& payload_path, bool PayloadSigner::AddSignatureToPayload( const string& payload_path, + const vector& padded_signature_sizes, const vector& payload_signatures, const vector& metadata_signatures, const string& signed_payload_path, @@ -411,11 +472,11 @@ bool PayloadSigner::AddSignatureToPayload( // Loads the payload and adds the signature op to it. string payload_signature, metadata_signature; - TEST_AND_RETURN_FALSE( - ConvertSignaturesToProtobuf(payload_signatures, &payload_signature)); + TEST_AND_RETURN_FALSE(ConvertSignaturesToProtobuf( + payload_signatures, padded_signature_sizes, &payload_signature)); if (!metadata_signatures.empty()) { - TEST_AND_RETURN_FALSE( - ConvertSignaturesToProtobuf(metadata_signatures, &metadata_signature)); + TEST_AND_RETURN_FALSE(ConvertSignaturesToProtobuf( + metadata_signatures, padded_signature_sizes, &metadata_signature)); } brillo::Blob payload; uint64_t signatures_offset; diff --git a/payload_generator/payload_signer.h b/payload_generator/payload_signer.h index 76e583b8..bd1e32f6 100644 --- a/payload_generator/payload_signer.h +++ b/payload_generator/payload_signer.h @@ -105,6 +105,7 @@ class PayloadSigner { // otherwise. static bool AddSignatureToPayload( const std::string& payload_path, + const std::vector& padded_signature_sizes, const std::vector& payload_signatures, const std::vector& metadata_signatures, const std::string& signed_payload_path, @@ -122,6 +123,13 @@ class PayloadSigner { static bool ExtractPayloadProperties(const std::string& payload_path, brillo::KeyValueStore* properties); + // This function calculates the maximum size, in bytes, of a signature signed + // by private_key_path. For an RSA key, this returns the number of bytes + // needed to represent the modulus. For an EC key, this returns the maximum + // size of a DER-encoded ECDSA signature. + static bool GetMaximumSignatureSize(const std::string& private_key_path, + size_t* signature_size); + private: // This should never be constructed DISALLOW_IMPLICIT_CONSTRUCTORS(PayloadSigner); diff --git a/payload_generator/payload_signer_unittest.cc b/payload_generator/payload_signer_unittest.cc index 457b34cd..bf7100b5 100644 --- a/payload_generator/payload_signer_unittest.cc +++ b/payload_generator/payload_signer_unittest.cc @@ -46,6 +46,8 @@ const char* kUnittestPrivateKey2Path = "unittest_key2.pem"; const char* kUnittestPublicKey2Path = "unittest_key2.pub.pem"; const char* kUnittestPrivateKeyRSA4096Path = "unittest_key_RSA4096.pem"; const char* kUnittestPublicKeyRSA4096Path = "unittest_key_RSA4096.pub.pem"; +const char* kUnittestPrivateKeyECPath = "unittest_key_EC.pem"; +const char* kUnittestPublicKeyECPath = "unittest_key_EC.pub.pem"; // Some data and its corresponding hash and signature: const char kDataToSign[] = "This is some data to sign."; @@ -115,7 +117,6 @@ TEST_F(PayloadSignerTest, SignSimpleTextTest) { EXPECT_TRUE(signatures.ParseFromString(signature)); EXPECT_EQ(1, signatures.signatures_size()); const Signatures::Signature& sig = signatures.signatures(0); - EXPECT_EQ(1U, sig.version()); const string& sig_data = sig.data(); ASSERT_EQ(arraysize(kDataSignature), sig_data.size()); for (size_t i = 0; i < arraysize(kDataSignature); i++) { @@ -128,12 +129,14 @@ TEST_F(PayloadSignerTest, VerifyAllSignatureTest) { SignSampleData(&signature, {GetBuildArtifactsPath(kUnittestPrivateKeyPath), GetBuildArtifactsPath(kUnittestPrivateKey2Path), - GetBuildArtifactsPath(kUnittestPrivateKeyRSA4096Path)}); + GetBuildArtifactsPath(kUnittestPrivateKeyRSA4096Path), + GetBuildArtifactsPath(kUnittestPrivateKeyECPath)}); // Either public key should pass the verification. for (const auto& path : {kUnittestPublicKeyPath, kUnittestPublicKey2Path, - kUnittestPublicKeyRSA4096Path}) { + kUnittestPublicKeyRSA4096Path, + kUnittestPublicKeyECPath}) { string public_key; EXPECT_TRUE(utils::ReadFile(GetBuildArtifactsPath(path), &public_key)); auto payload_verifier = PayloadVerifier::CreateInstance(public_key); diff --git a/unittest_key_EC.pem b/unittest_key_EC.pem new file mode 100644 index 00000000..9e65a68a --- /dev/null +++ b/unittest_key_EC.pem @@ -0,0 +1,5 @@ +-----BEGIN PRIVATE KEY----- +MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgGaguGj8Yb1KkqKHd +ISblUsjtOCbzAuVpX81i02sm8FWhRANCAARBnuotwKOsuvjH6iwTDhOAi7Q5pLWz +xDkZjg2pcfbfi9FFTvLYETas7B2W6fx9PUezUmHTFTDV2JZuMYYFdZOw +-----END PRIVATE KEY----- diff --git a/update_metadata.proto b/update_metadata.proto index 1657a7e2..9bc0d8a5 100644 --- a/update_metadata.proto +++ b/update_metadata.proto @@ -126,8 +126,17 @@ message Extent { message Signatures { message Signature { - optional uint32 version = 1; + optional uint32 version = 1 [deprecated = true]; optional bytes data = 2; + + // The DER encoded signature size of EC keys is nondeterministic for + // different input of sha256 hash. However, we need the size of the + // serialized signatures protobuf string to be fixed before signing; + // because this size is part of the content to be signed. Therefore, we + // always pad the signature data to the maximum possible signature size of + // a given key. And the payload verifier will truncate the signature to + // its correct size based on the value of |unpadded_signature_size|. + optional fixed32 unpadded_signature_size = 3; } repeated Signature signatures = 1; } From 9cad266457c59e8330c830c369784764b36386c1 Mon Sep 17 00:00:00 2001 From: Tianjie Xu Date: Fri, 11 Oct 2019 17:10:47 -0700 Subject: [PATCH 122/624] Add a command in delta_generator to calculate the maximum signature size The ota generation script used to call openssl functions to get the signature size. This is no longer viable for EC keys. So we add this new functionality here in the delta_generator. Because the signature size will be later used by the delta_generator to sign the payload. Bug: 141244025 Test: call the binary with new option Change-Id: Id743325242faf7a2b2dcec5e218219dba12a8e88 --- payload_generator/generate_delta_main.cc | 28 ++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/payload_generator/generate_delta_main.cc b/payload_generator/generate_delta_main.cc index 1323534f..f035ff16 100644 --- a/payload_generator/generate_delta_main.cc +++ b/payload_generator/generate_delta_main.cc @@ -423,6 +423,10 @@ int Main(int argc, char** argv) { DEFINE_bool(disable_fec_computation, false, "Disables the fec data computation on device."); + DEFINE_string( + out_maximum_signature_size_file, + "", + "Path to the output maximum signature size given a private key."); brillo::FlagHelper::Init( argc, @@ -444,6 +448,30 @@ int Main(int argc, char** argv) { // Initialize the Xz compressor. XzCompressInit(); + if (!FLAGS_out_maximum_signature_size_file.empty()) { + LOG_IF(FATAL, FLAGS_private_key.empty()) + << "Private key is not provided when calculating the maximum signature " + "size."; + + size_t maximum_signature_size; + if (!PayloadSigner::GetMaximumSignatureSize(FLAGS_private_key, + &maximum_signature_size)) { + LOG(ERROR) << "Failed to get the maximum signature size of private key: " + << FLAGS_private_key; + return 1; + } + // Write the size string to output file. + string signature_size_string = std::to_string(maximum_signature_size); + if (!utils::WriteFile(FLAGS_out_maximum_signature_size_file.c_str(), + signature_size_string.c_str(), + signature_size_string.size())) { + PLOG(ERROR) << "Failed to write the maximum signature size to " + << FLAGS_out_maximum_signature_size_file << "."; + return 1; + } + return 0; + } + vector signature_sizes; if (!FLAGS_signature_size.empty()) { ParseSignatureSizes(FLAGS_signature_size, &signature_sizes); From 7a78d630e11532d6f854ba7d794362804b33c950 Mon Sep 17 00:00:00 2001 From: Tianjie Xu Date: Tue, 8 Oct 2019 16:32:39 -0700 Subject: [PATCH 123/624] Allow update engine read public keys from otacerts.zip The android build system installs both otacerts.zip and update-payload-key.pub.pem on the device. And the latter is converted from the X509 certificates inside the otacerts.zip during the build time. We can consolidate these two by letting update engine to parse the public keys from otacerts.zip directly. This also allows update engine to use multiple keys to verify the payload. Bug: 116660991 Test: unittests pass Change-Id: I0a499405f2835e1ff8b7916452cb3123046306a7 --- Android.bp | 4 + common/platform_constants.h | 4 + common/platform_constants_android.cc | 4 +- common/platform_constants_chromeos.cc | 1 + otacerts.zip | Bin 0 -> 1083 bytes .../certificate_parser_android.cc | 121 ++++++++++++++++++ payload_consumer/certificate_parser_android.h | 46 +++++++ .../certificate_parser_android_unittest.cc | 61 +++++++++ .../certificate_parser_interface.h | 44 +++++++ payload_consumer/certificate_parser_stub.cc | 31 +++++ payload_consumer/certificate_parser_stub.h | 44 +++++++ payload_consumer/delta_performer.cc | 53 +++++--- payload_consumer/delta_performer.h | 17 +++ .../delta_performer_integration_test.cc | 1 + payload_consumer/delta_performer_unittest.cc | 12 +- payload_consumer/payload_metadata.cc | 15 +-- payload_consumer/payload_metadata.h | 8 +- payload_consumer/payload_verifier.cc | 96 +++++++++----- payload_consumer/payload_verifier.h | 16 ++- update_attempter_android.cc | 16 ++- 20 files changed, 513 insertions(+), 81 deletions(-) create mode 100644 otacerts.zip create mode 100644 payload_consumer/certificate_parser_android.cc create mode 100644 payload_consumer/certificate_parser_android.h create mode 100644 payload_consumer/certificate_parser_android_unittest.cc create mode 100644 payload_consumer/certificate_parser_interface.h create mode 100644 payload_consumer/certificate_parser_stub.cc create mode 100644 payload_consumer/certificate_parser_stub.h diff --git a/Android.bp b/Android.bp index e5e592c4..a691e7e8 100644 --- a/Android.bp +++ b/Android.bp @@ -128,6 +128,7 @@ cc_defaults { "libverity_tree", ], shared_libs: [ + "libziparchive", "libbase", "libcrypto", "libfec", @@ -164,6 +165,7 @@ cc_library_static { "common/utils.cc", "payload_consumer/bzip_extent_writer.cc", "payload_consumer/cached_file_descriptor.cc", + "payload_consumer/certificate_parser_android.cc", "payload_consumer/delta_performer.cc", "payload_consumer/download_action.cc", "payload_consumer/extent_reader.cc", @@ -659,6 +661,7 @@ cc_test { ":ue_unittest_delta_generator", ":ue_unittest_disk_imgs", ":ue_unittest_keys", + "otacerts.zip", "unittest_key.pem", "unittest_key2.pem", "unittest_key_RSA4096.pem", @@ -693,6 +696,7 @@ cc_test { "dynamic_partition_control_android_unittest.cc", "payload_consumer/bzip_extent_writer_unittest.cc", "payload_consumer/cached_file_descriptor_unittest.cc", + "payload_consumer/certificate_parser_android_unittest.cc", "payload_consumer/delta_performer_integration_test.cc", "payload_consumer/delta_performer_unittest.cc", "payload_consumer/extent_reader_unittest.cc", diff --git a/common/platform_constants.h b/common/platform_constants.h index 6eaa940e..243af69e 100644 --- a/common/platform_constants.h +++ b/common/platform_constants.h @@ -38,6 +38,10 @@ extern const char kOmahaPlatformName[]; // whole payload. extern const char kUpdatePayloadPublicKeyPath[]; +// Path to the location of the zip archive file that contains PEM encoded X509 +// certificates. e.g. 'system/etc/security/otacerts.zip'. +extern const char kUpdateCertificatesPath[]; + // Path to the directory containing all the SSL certificates accepted by // update_engine when sending requests to Omaha and the download server (if // HTTPS is used for that as well). diff --git a/common/platform_constants_android.cc b/common/platform_constants_android.cc index 9d8d30e8..f468c3ba 100644 --- a/common/platform_constants_android.cc +++ b/common/platform_constants_android.cc @@ -25,8 +25,8 @@ const char kOmahaDefaultAUTestURL[] = "https://clients2.google.com/service/update2/brillo"; const char kOmahaUpdaterID[] = "Brillo"; const char kOmahaPlatformName[] = "Brillo"; -const char kUpdatePayloadPublicKeyPath[] = - "/etc/update_engine/update-payload-key.pub.pem"; +const char kUpdatePayloadPublicKeyPath[] = ""; +const char kUpdateCertificatesPath[] = "/system/etc/security/otacerts.zip"; const char kCACertificatesPath[] = "/system/etc/security/cacerts_google"; // No deadline file API support on Android. const char kOmahaResponseDeadlineFile[] = ""; diff --git a/common/platform_constants_chromeos.cc b/common/platform_constants_chromeos.cc index f1ac4909..fe94a45a 100644 --- a/common/platform_constants_chromeos.cc +++ b/common/platform_constants_chromeos.cc @@ -27,6 +27,7 @@ const char kOmahaUpdaterID[] = "ChromeOSUpdateEngine"; const char kOmahaPlatformName[] = "Chrome OS"; const char kUpdatePayloadPublicKeyPath[] = "/usr/share/update_engine/update-payload-key.pub.pem"; +const char kUpdateCertificatesPath[] = ""; const char kCACertificatesPath[] = "/usr/share/chromeos-ca-certificates"; const char kOmahaResponseDeadlineFile[] = "/tmp/update-check-response-deadline"; // This directory is wiped during powerwash. diff --git a/otacerts.zip b/otacerts.zip new file mode 100644 index 0000000000000000000000000000000000000000..00a5a513d42c2b8ff72980583ea6841e3e5b7a6b GIT binary patch literal 1083 zcmWIWW@Zs#U|`^2n9?QVA64jlrXtT*b$kg#6dVmOh)gLfmMA&oaJi6%mq>$4$1qsmq`C{S0eJF^dOd z|IB9s0n!e4GB>Db^p){{VG`51^I-18hHJW^UdLq z+J5TrbRmHs|7N}Uo&Dy%_k;V2pSVAn_WcaMoV7w+{zvYwYkx9Vd^j5T!%RfH^%>(I zu?up0-@lKkoPXtekzVeT>lfdiiwk}8aPm^G3mSf5+}GRqw?5|o-Tvpo=j#RCf2Y@< zY4>GYyV6a(b4%#;Ygcz%pYAqA?fk~(-FIzndzGv+W4*@O-MM~SRn^rR?Rk!NwPu#@ zAD#SAwdK-NwYit8qy3+E>zzyE{%U2haBY$Hrsb=%Pf7in)SA{&F@5gYKaSEr!`gI1 z3dpQXSyJK|9`RUePSGl12L%hg z|DTN?&i~uSn^|?dLgf07&Nt}j1d%N+ zOz-kudfk_o*(mk1nrXB9|>}5tdUIkUoazgmpbR9-&_?hk1DUcJ|$sag^hU{vtF=Z%+aMBg4fNM z91a$Gr6=D1R6Nbj;kiM8?_#n2a~mcv*lfP;PW<}2o*xe>-B7=@X4~a22NvI%!2f(2 zN4LAqZCTDKi{$wudu-O+<(P9fw2P@y{^8y!GU0sp+ppD4I21Z%_3J|2S7&)yUdv1o zXI#Pm%%;aUxnPmwzcq)Kge7kdc(U!a@UP_`ia%%S{i~J!#~R?x$Rx*%D>F*~GdTkT kBM>iX1hKGZZB|IuM$6U#-mGjOJ&ZsY1*A^{iwFh=0Hho8Gynhq literal 0 HcmV?d00001 diff --git a/payload_consumer/certificate_parser_android.cc b/payload_consumer/certificate_parser_android.cc new file mode 100644 index 00000000..4a20547a --- /dev/null +++ b/payload_consumer/certificate_parser_android.cc @@ -0,0 +1,121 @@ +// +// Copyright (C) 2019 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include "update_engine/payload_consumer/certificate_parser_android.h" + +#include +#include + +#include +#include +#include +#include + +#include "update_engine/payload_consumer/certificate_parser_interface.h" + +namespace { +bool IterateZipEntriesAndSearchForKeys( + const ZipArchiveHandle& handle, std::vector>* result) { + void* cookie; + int32_t iter_status = StartIteration(handle, &cookie, "", "x509.pem"); + if (iter_status != 0) { + LOG(ERROR) << "Failed to iterate over entries in the certificate zipfile: " + << ErrorCodeString(iter_status); + return false; + } + std::unique_ptr guard(cookie, EndIteration); + + std::vector> pem_keys; + std::string_view name; + ZipEntry entry; + while ((iter_status = Next(cookie, &entry, &name)) == 0) { + std::vector pem_content(entry.uncompressed_length); + if (int32_t extract_status = ExtractToMemory( + handle, &entry, pem_content.data(), pem_content.size()); + extract_status != 0) { + LOG(ERROR) << "Failed to extract " << name << ": " + << ErrorCodeString(extract_status); + return false; + } + pem_keys.push_back(pem_content); + } + + if (iter_status != -1) { + LOG(ERROR) << "Error while iterating over zip entries: " + << ErrorCodeString(iter_status); + return false; + } + + *result = std::move(pem_keys); + return true; +} + +} // namespace + +namespace chromeos_update_engine { +bool CertificateParserAndroid::ReadPublicKeysFromCertificates( + const std::string& path, + std::vector>* + out_public_keys) { + out_public_keys->clear(); + + ZipArchiveHandle handle; + if (int32_t open_status = OpenArchive(path.c_str(), &handle); + open_status != 0) { + LOG(ERROR) << "Failed to open " << path << ": " + << ErrorCodeString(open_status); + return false; + } + + std::vector> pem_certs; + if (!IterateZipEntriesAndSearchForKeys(handle, &pem_certs)) { + CloseArchive(handle); + return false; + } + CloseArchive(handle); + + // Convert the certificates into public keys. Stop and return false if we + // encounter an error. + std::vector> result; + for (const auto& cert : pem_certs) { + std::unique_ptr input( + BIO_new_mem_buf(cert.data(), cert.size()), BIO_free); + + std::unique_ptr x509( + PEM_read_bio_X509(input.get(), nullptr, nullptr, nullptr), X509_free); + if (!x509) { + LOG(ERROR) << "Failed to read x509 certificate"; + return false; + } + + std::unique_ptr public_key( + X509_get_pubkey(x509.get()), EVP_PKEY_free); + if (!public_key) { + LOG(ERROR) << "Failed to extract the public key from x509 certificate"; + return false; + } + result.push_back(std::move(public_key)); + } + + *out_public_keys = std::move(result); + return true; +} + +std::unique_ptr CreateCertificateParser() { + return std::make_unique(); +} + +} // namespace chromeos_update_engine diff --git a/payload_consumer/certificate_parser_android.h b/payload_consumer/certificate_parser_android.h new file mode 100644 index 00000000..ccb92936 --- /dev/null +++ b/payload_consumer/certificate_parser_android.h @@ -0,0 +1,46 @@ +// +// Copyright (C) 2019 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef UPDATE_ENGINE_CERTIFICATE_PARSER_ANDROID_H_ +#define UPDATE_ENGINE_CERTIFICATE_PARSER_ANDROID_H_ + +#include +#include +#include + +#include + +#include "payload_consumer/certificate_parser_interface.h" + +namespace chromeos_update_engine { +// This class parses the certificates from a zip file. Because the Android +// build system stores the certs in otacerts.zip. +class CertificateParserAndroid : public CertificateParserInterface { + public: + CertificateParserAndroid() = default; + + bool ReadPublicKeysFromCertificates( + const std::string& path, + std::vector>* + out_public_keys) override; + + private: + DISALLOW_COPY_AND_ASSIGN(CertificateParserAndroid); +}; + +} // namespace chromeos_update_engine + +#endif diff --git a/payload_consumer/certificate_parser_android_unittest.cc b/payload_consumer/certificate_parser_android_unittest.cc new file mode 100644 index 00000000..e300414b --- /dev/null +++ b/payload_consumer/certificate_parser_android_unittest.cc @@ -0,0 +1,61 @@ +// +// Copyright (C) 2019 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include "update_engine/payload_consumer/certificate_parser_interface.h" + +#include + +#include +#include + +#include "update_engine/common/hash_calculator.h" +#include "update_engine/common/test_utils.h" +#include "update_engine/common/utils.h" +#include "update_engine/payload_consumer/payload_verifier.h" +#include "update_engine/payload_generator/payload_signer.h" + +namespace chromeos_update_engine { + +extern const char* kUnittestPrivateKeyPath; +const char* kUnittestOtacertsPath = "otacerts.zip"; + +TEST(CertificateParserAndroidTest, ParseZipArchive) { + std::string ota_cert = + test_utils::GetBuildArtifactsPath(kUnittestOtacertsPath); + ASSERT_TRUE(utils::FileExists(ota_cert.c_str())); + + std::vector> keys; + auto parser = CreateCertificateParser(); + ASSERT_TRUE(parser->ReadPublicKeysFromCertificates(ota_cert, &keys)); + ASSERT_EQ(1u, keys.size()); +} + +TEST(CertificateParserAndroidTest, VerifySignature) { + brillo::Blob hash_blob; + ASSERT_TRUE(HashCalculator::RawHashOfData({'x'}, &hash_blob)); + brillo::Blob sig_blob; + ASSERT_TRUE(PayloadSigner::SignHash( + hash_blob, + test_utils::GetBuildArtifactsPath(kUnittestPrivateKeyPath), + &sig_blob)); + + auto verifier = PayloadVerifier::CreateInstanceFromZipPath( + test_utils::GetBuildArtifactsPath(kUnittestOtacertsPath)); + ASSERT_TRUE(verifier != nullptr); + ASSERT_TRUE(verifier->VerifyRawSignature(sig_blob, hash_blob, nullptr)); +} + +} // namespace chromeos_update_engine diff --git a/payload_consumer/certificate_parser_interface.h b/payload_consumer/certificate_parser_interface.h new file mode 100644 index 00000000..dad23d21 --- /dev/null +++ b/payload_consumer/certificate_parser_interface.h @@ -0,0 +1,44 @@ +// +// Copyright (C) 2019 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef UPDATE_ENGINE_CERTIFICATE_PARSER_INTERFACE_H_ +#define UPDATE_ENGINE_CERTIFICATE_PARSER_INTERFACE_H_ + +#include +#include +#include + +#include + +namespace chromeos_update_engine { + +// This class parses the PEM encoded X509 certificates from |path|; and +// passes the parsed public keys to the caller. +class CertificateParserInterface { + public: + virtual ~CertificateParserInterface() = default; + + virtual bool ReadPublicKeysFromCertificates( + const std::string& path, + std::vector>* + out_public_keys) = 0; +}; + +std::unique_ptr CreateCertificateParser(); + +} // namespace chromeos_update_engine + +#endif diff --git a/payload_consumer/certificate_parser_stub.cc b/payload_consumer/certificate_parser_stub.cc new file mode 100644 index 00000000..95fd6e89 --- /dev/null +++ b/payload_consumer/certificate_parser_stub.cc @@ -0,0 +1,31 @@ +// +// Copyright (C) 2019 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include + +namespace chromeos_update_engine { +bool CertificateParserStub::ReadPublicKeysFromCertificates( + const std::string& path, + std::vector>* + out_public_keys) { + return true; +} + +std::unique_ptr CreateCertificateParser() { + return std::make_unique(); +} + +} // namespace chromeos_update_engine diff --git a/payload_consumer/certificate_parser_stub.h b/payload_consumer/certificate_parser_stub.h new file mode 100644 index 00000000..f4f8825f --- /dev/null +++ b/payload_consumer/certificate_parser_stub.h @@ -0,0 +1,44 @@ +// +// Copyright (C) 2019 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef UPDATE_ENGINE_CERTIFICATE_PARSER_STUB_H_ +#define UPDATE_ENGINE_CERTIFICATE_PARSER_STUB_H_ + +#include +#include +#include + +#include + +#include "payload_consumer/certificate_parser_interface.h" + +namespace chromeos_update_engine { +class CertificateParserStub : public CertificateParserInterface { + public: + CertificateParserStub() = default; + + bool ReadPublicKeysFromCertificates( + const std::string& path, + std::vector>* + out_public_keys) override; + + private: + DISALLOW_COPY_AND_ASSIGN(CertificateParserStub); +}; + +} // namespace chromeos_update_engine + +#endif // UPDATE_ENGINE_CERTIFICATE_PARSER_STUB_H_ diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc index 8049af72..4b80ae6b 100644 --- a/payload_consumer/delta_performer.cc +++ b/payload_consumer/delta_performer.cc @@ -46,6 +46,7 @@ #include "update_engine/common/terminator.h" #include "update_engine/payload_consumer/bzip_extent_writer.h" #include "update_engine/payload_consumer/cached_file_descriptor.h" +#include "update_engine/payload_consumer/certificate_parser_interface.h" #include "update_engine/payload_consumer/download_action.h" #include "update_engine/payload_consumer/extent_reader.h" #include "update_engine/payload_consumer/extent_writer.h" @@ -526,9 +527,10 @@ MetadataParseResult DeltaPerformer::ParsePayloadMetadata( << "Trusting metadata size in payload = " << metadata_size_; } - string public_key; - if (!GetPublicKey(&public_key)) { - LOG(ERROR) << "Failed to get public key."; + // Perform the verification unconditionally. + auto [payload_verifier, perform_verification] = CreatePayloadVerifier(); + if (!payload_verifier) { + LOG(ERROR) << "Failed to create payload verifier."; *error = ErrorCode::kDownloadMetadataSignatureVerificationError; return MetadataParseResult::kError; } @@ -536,7 +538,7 @@ MetadataParseResult DeltaPerformer::ParsePayloadMetadata( // We have the full metadata in |payload|. Verify its integrity // and authenticity based on the information we have in Omaha response. *error = payload_metadata_.ValidateMetadataSignature( - payload, payload_->metadata_signature, public_key); + payload, payload_->metadata_signature, *payload_verifier); if (*error != ErrorCode::kSuccess) { if (install_plan_->hash_checks_mandatory) { // The autoupdate_CatchBadSignatures test checks for this string @@ -1596,10 +1598,32 @@ bool DeltaPerformer::GetPublicKey(string* out_public_key) { return brillo::data_encoding::Base64Decode(install_plan_->public_key_rsa, out_public_key); } - + LOG(INFO) << "No public keys found for verification."; return true; } +std::pair, bool> +DeltaPerformer::CreatePayloadVerifier() { + if (utils::FileExists(update_certificates_path_.c_str())) { + LOG(INFO) << "Verifying using certificates: " << update_certificates_path_; + return { + PayloadVerifier::CreateInstanceFromZipPath(update_certificates_path_), + true}; + } + + string public_key; + if (!GetPublicKey(&public_key)) { + LOG(ERROR) << "Failed to read public key"; + return {nullptr, true}; + } + + // Skips the verification if the public key is empty. + if (public_key.empty()) { + return {nullptr, false}; + } + return {PayloadVerifier::CreateInstance(public_key), true}; +} + ErrorCode DeltaPerformer::ValidateManifest() { // Perform assorted checks to sanity check the manifest, make sure it // matches data from other sources, and that it is a supported version. @@ -1760,12 +1784,6 @@ ErrorCode DeltaPerformer::ValidateOperationHash( ErrorCode DeltaPerformer::VerifyPayload( const brillo::Blob& update_check_response_hash, const uint64_t update_check_response_size) { - string public_key; - if (!GetPublicKey(&public_key)) { - LOG(ERROR) << "Failed to get public key."; - return ErrorCode::kDownloadPayloadPubKeyVerificationError; - } - // Verifies the download size. if (update_check_response_size != metadata_size_ + metadata_signature_size_ + buffer_offset_) { @@ -1783,20 +1801,19 @@ ErrorCode DeltaPerformer::VerifyPayload( ErrorCode::kPayloadHashMismatchError, payload_hash_calculator_.raw_hash() == update_check_response_hash); - // Verifies the signed payload hash. - if (public_key.empty()) { - LOG(WARNING) << "Not verifying signed delta payload -- missing public key."; - return ErrorCode::kSuccess; - } TEST_AND_RETURN_VAL(ErrorCode::kSignedDeltaPayloadExpectedError, !signatures_message_data_.empty()); brillo::Blob hash_data = signed_hash_calculator_.raw_hash(); TEST_AND_RETURN_VAL(ErrorCode::kDownloadPayloadPubKeyVerificationError, hash_data.size() == kSHA256Size); - auto payload_verifier = PayloadVerifier::CreateInstance(public_key); + auto [payload_verifier, perform_verification] = CreatePayloadVerifier(); + if (!perform_verification) { + LOG(WARNING) << "Not verifying signed delta payload -- missing public key."; + return ErrorCode::kSuccess; + } if (!payload_verifier) { - LOG(ERROR) << "Failed to create the payload verifier from " << public_key; + LOG(ERROR) << "Failed to create the payload verifier."; return ErrorCode::kDownloadPayloadPubKeyVerificationError; } if (!payload_verifier->VerifySignature(signatures_message_data_, hash_data)) { diff --git a/payload_consumer/delta_performer.h b/payload_consumer/delta_performer.h index 25c348c9..4c64dfa9 100644 --- a/payload_consumer/delta_performer.h +++ b/payload_consumer/delta_performer.h @@ -20,7 +20,9 @@ #include #include +#include #include +#include #include #include @@ -34,6 +36,7 @@ #include "update_engine/payload_consumer/file_writer.h" #include "update_engine/payload_consumer/install_plan.h" #include "update_engine/payload_consumer/payload_metadata.h" +#include "update_engine/payload_consumer/payload_verifier.h" #include "update_engine/update_metadata.pb.h" namespace chromeos_update_engine { @@ -156,6 +159,11 @@ class DeltaPerformer : public FileWriter { public_key_path_ = public_key_path; } + void set_update_certificates_path( + const std::string& update_certificates_path) { + update_certificates_path_ = update_certificates_path; + } + // Return true if header parsing is finished and no errors occurred. bool IsHeaderParsed() const; @@ -273,6 +281,12 @@ class DeltaPerformer : public FileWriter { // |out_public_key|. Returns false on failures. bool GetPublicKey(std::string* out_public_key); + // Creates a PayloadVerifier from the zip file containing certificates. If the + // path to the zip file doesn't exist, falls back to use the public key. + // Returns a tuple with the created PayloadVerifier and if we should perform + // the verification. + std::pair, bool> CreatePayloadVerifier(); + // After install_plan_ is filled with partition names and sizes, initialize // metadata of partitions and map necessary devices before opening devices. bool PreparePartitionsForUpdate(); @@ -383,6 +397,9 @@ class DeltaPerformer : public FileWriter { // override with test keys. std::string public_key_path_{constants::kUpdatePayloadPublicKeyPath}; + // The path to the zip file with X509 certificates. + std::string update_certificates_path_{constants::kUpdateCertificatesPath}; + // The number of bytes received so far, used for progress tracking. size_t total_bytes_received_{0}; diff --git a/payload_consumer/delta_performer_integration_test.cc b/payload_consumer/delta_performer_integration_test.cc index 28c11b67..a2ad77b9 100644 --- a/payload_consumer/delta_performer_integration_test.cc +++ b/payload_consumer/delta_performer_integration_test.cc @@ -777,6 +777,7 @@ static void ApplyDeltaFile(bool full_kernel, : GetBuildArtifactsPath(kUnittestPublicKeyPath); EXPECT_TRUE(utils::FileExists(public_key_path.c_str())); (*performer)->set_public_key_path(public_key_path); + (*performer)->set_update_certificates_path(""); EXPECT_EQ(static_cast(state->image_size), HashCalculator::RawHashOfFile( diff --git a/payload_consumer/delta_performer_unittest.cc b/payload_consumer/delta_performer_unittest.cc index b7a38cc1..e9022bab 100644 --- a/payload_consumer/delta_performer_unittest.cc +++ b/payload_consumer/delta_performer_unittest.cc @@ -159,6 +159,11 @@ class DeltaPerformerTest : public ::testing::Test { install_plan_.target_slot = 1; EXPECT_CALL(mock_delegate_, ShouldCancel(_)) .WillRepeatedly(testing::Return(false)); + performer_.set_update_certificates_path(""); + // Set the public key corresponding to the unittest private key. + string public_key_path = GetBuildArtifactsPath(kUnittestPublicKeyPath); + EXPECT_TRUE(utils::FileExists(public_key_path.c_str())); + performer_.set_public_key_path(public_key_path); } // Test helper placed where it can easily be friended from DeltaPerformer. @@ -388,12 +393,6 @@ class DeltaPerformerTest : public ::testing::Test { expected_error = ErrorCode::kSuccess; } - // Use the public key corresponding to the private key used above to - // sign the metadata. - string public_key_path = GetBuildArtifactsPath(kUnittestPublicKeyPath); - EXPECT_TRUE(utils::FileExists(public_key_path.c_str())); - performer_.set_public_key_path(public_key_path); - // Init actual_error with an invalid value so that we make sure // ParsePayloadMetadata properly populates it in all cases. actual_error = ErrorCode::kUmaReportedMax; @@ -920,7 +919,6 @@ TEST_F(DeltaPerformerTest, BrilloParsePayloadMetadataTest) { brillo::Blob payload_data = GeneratePayload( {}, {}, true, kBrilloMajorPayloadVersion, kSourceMinorPayloadVersion); install_plan_.hash_checks_mandatory = true; - performer_.set_public_key_path(GetBuildArtifactsPath(kUnittestPublicKeyPath)); ErrorCode error; EXPECT_EQ(MetadataParseResult::kSuccess, performer_.ParsePayloadMetadata(payload_data, &error)); diff --git a/payload_consumer/payload_metadata.cc b/payload_consumer/payload_metadata.cc index c81d3a91..09526460 100644 --- a/payload_consumer/payload_metadata.cc +++ b/payload_consumer/payload_metadata.cc @@ -159,7 +159,7 @@ bool PayloadMetadata::GetManifest(const brillo::Blob& payload, ErrorCode PayloadMetadata::ValidateMetadataSignature( const brillo::Blob& payload, const string& metadata_signature, - const string& pem_public_key) const { + const PayloadVerifier& payload_verifier) const { if (payload.size() < metadata_size_ + metadata_signature_size_) return ErrorCode::kDownloadMetadataSignatureError; @@ -201,16 +201,9 @@ ErrorCode PayloadMetadata::ValidateMetadataSignature( return ErrorCode::kDownloadMetadataSignatureVerificationError; } - auto payload_verifier = PayloadVerifier::CreateInstance(pem_public_key); - if (!payload_verifier) { - LOG(ERROR) << "Failed to create the payload verifier from " - << pem_public_key; - return ErrorCode::kDownloadMetadataSignatureVerificationError; - } - if (!metadata_signature_blob.empty()) { brillo::Blob decrypted_signature; - if (!payload_verifier->VerifyRawSignature( + if (!payload_verifier.VerifyRawSignature( metadata_signature_blob, metadata_hash, &decrypted_signature)) { LOG(ERROR) << "Manifest hash verification failed. Decrypted hash = "; utils::HexDumpVector(decrypted_signature); @@ -219,8 +212,8 @@ ErrorCode PayloadMetadata::ValidateMetadataSignature( return ErrorCode::kDownloadMetadataSignatureMismatch; } } else { - if (!payload_verifier->VerifySignature(metadata_signature_protobuf, - metadata_hash)) { + if (!payload_verifier.VerifySignature(metadata_signature_protobuf, + metadata_hash)) { LOG(ERROR) << "Manifest hash verification failed."; return ErrorCode::kDownloadMetadataSignatureMismatch; } diff --git a/payload_consumer/payload_metadata.h b/payload_consumer/payload_metadata.h index 1b4c5c89..75ef8f90 100644 --- a/payload_consumer/payload_metadata.h +++ b/payload_consumer/payload_metadata.h @@ -27,6 +27,7 @@ #include "update_engine/common/error_code.h" #include "update_engine/common/platform_constants.h" +#include "update_engine/payload_consumer/payload_verifier.h" #include "update_engine/update_metadata.pb.h" namespace chromeos_update_engine { @@ -65,9 +66,10 @@ class PayloadMetadata { // metadata is parsed so that a man-in-the-middle attack on the SSL connection // to the payload server doesn't exploit any vulnerability in the code that // parses the protocol buffer. - ErrorCode ValidateMetadataSignature(const brillo::Blob& payload, - const std::string& metadata_signature, - const std::string& pem_public_key) const; + ErrorCode ValidateMetadataSignature( + const brillo::Blob& payload, + const std::string& metadata_signature, + const PayloadVerifier& payload_verifier) const; // Returns the major payload version. If the version was not yet parsed, // returns zero. diff --git a/payload_consumer/payload_verifier.cc b/payload_consumer/payload_verifier.cc index 02eeb76e..24e337ec 100644 --- a/payload_consumer/payload_verifier.cc +++ b/payload_consumer/payload_verifier.cc @@ -25,6 +25,7 @@ #include "update_engine/common/constants.h" #include "update_engine/common/hash_calculator.h" #include "update_engine/common/utils.h" +#include "update_engine/payload_consumer/certificate_parser_interface.h" #include "update_engine/update_metadata.pb.h" using std::string; @@ -63,17 +64,39 @@ std::unique_ptr PayloadVerifier::CreateInstance( auto pub_key = std::unique_ptr( PEM_read_bio_PUBKEY(bp.get(), nullptr, nullptr, nullptr), EVP_PKEY_free); if (!pub_key) { - LOG(ERROR) << "Failed to parse the public key in " << pem_public_key; + LOG(ERROR) << "Failed to parse the public key in: " << pem_public_key; + return nullptr; + } + + std::vector> keys; + keys.emplace_back(std::move(pub_key)); + return std::unique_ptr(new PayloadVerifier(std::move(keys))); +} + +std::unique_ptr PayloadVerifier::CreateInstanceFromZipPath( + const std::string& certificate_zip_path) { + auto parser = CreateCertificateParser(); + if (!parser) { + LOG(ERROR) << "Failed to create certificate parser from " + << certificate_zip_path; + return nullptr; + } + + std::vector> public_keys; + if (!parser->ReadPublicKeysFromCertificates(certificate_zip_path, + &public_keys) || + public_keys.empty()) { + LOG(ERROR) << "Failed to parse public keys in: " << certificate_zip_path; return nullptr; } return std::unique_ptr( - new PayloadVerifier(std::move(pub_key))); + new PayloadVerifier(std::move(public_keys))); } bool PayloadVerifier::VerifySignature( const string& signature_proto, const brillo::Blob& sha256_hash_data) const { - TEST_AND_RETURN_FALSE(public_key_ != nullptr); + TEST_AND_RETURN_FALSE(!public_keys_.empty()); Signatures signatures; LOG(INFO) << "signature blob size = " << signature_proto.size(); @@ -125,37 +148,50 @@ bool PayloadVerifier::VerifyRawSignature( const brillo::Blob& sig_data, const brillo::Blob& sha256_hash_data, brillo::Blob* decrypted_sig_data) const { - TEST_AND_RETURN_FALSE(public_key_ != nullptr); - - int key_type = EVP_PKEY_id(public_key_.get()); - if (key_type == EVP_PKEY_RSA) { - brillo::Blob sig_hash_data; - TEST_AND_RETURN_FALSE( - GetRawHashFromSignature(sig_data, public_key_.get(), &sig_hash_data)); - - if (decrypted_sig_data != nullptr) { - *decrypted_sig_data = sig_hash_data; + TEST_AND_RETURN_FALSE(!public_keys_.empty()); + + for (const auto& public_key : public_keys_) { + int key_type = EVP_PKEY_id(public_key.get()); + if (key_type == EVP_PKEY_RSA) { + brillo::Blob sig_hash_data; + if (!GetRawHashFromSignature( + sig_data, public_key.get(), &sig_hash_data)) { + LOG(WARNING) + << "Failed to get the raw hash with RSA key. Trying other keys."; + continue; + } + + if (decrypted_sig_data != nullptr) { + *decrypted_sig_data = sig_hash_data; + } + + brillo::Blob padded_hash_data = sha256_hash_data; + TEST_AND_RETURN_FALSE( + PadRSASHA256Hash(&padded_hash_data, sig_hash_data.size())); + + if (padded_hash_data == sig_hash_data) { + return true; + } } - brillo::Blob padded_hash_data = sha256_hash_data; - TEST_AND_RETURN_FALSE( - PadRSASHA256Hash(&padded_hash_data, sig_hash_data.size())); - - return padded_hash_data == sig_hash_data; - } + if (key_type == EVP_PKEY_EC) { + EC_KEY* ec_key = EVP_PKEY_get0_EC_KEY(public_key.get()); + TEST_AND_RETURN_FALSE(ec_key != nullptr); + if (ECDSA_verify(0, + sha256_hash_data.data(), + sha256_hash_data.size(), + sig_data.data(), + sig_data.size(), + ec_key) == 1) { + return true; + } + } - if (key_type == EVP_PKEY_EC) { - EC_KEY* ec_key = EVP_PKEY_get0_EC_KEY(public_key_.get()); - TEST_AND_RETURN_FALSE(ec_key != nullptr); - return ECDSA_verify(0, - sha256_hash_data.data(), - sha256_hash_data.size(), - sig_data.data(), - sig_data.size(), - ec_key) == 1; + LOG(ERROR) << "Unsupported key type " << key_type; + return false; } - - LOG(ERROR) << "Unsupported key type " << key_type; + LOG(INFO) << "Failed to verify the signature with " << public_keys_.size() + << " keys."; return false; } diff --git a/payload_consumer/payload_verifier.h b/payload_consumer/payload_verifier.h index b5d54572..bc5231fa 100644 --- a/payload_consumer/payload_verifier.h +++ b/payload_consumer/payload_verifier.h @@ -20,13 +20,14 @@ #include #include #include +#include #include #include #include "update_engine/update_metadata.pb.h" -// This class holds the public key and implements methods used for payload +// This class holds the public keys and implements methods used for payload // signature verification. See payload_generator/payload_signer.h for payload // signing. @@ -47,6 +48,11 @@ class PayloadVerifier { static std::unique_ptr CreateInstance( const std::string& pem_public_key); + // Extracts the public keys from the certificates contained in the input + // zip file. And creates a PayloadVerifier with these public keys. + static std::unique_ptr CreateInstanceFromZipPath( + const std::string& certificate_zip_path); + // Interprets |signature_proto| as a protocol buffer containing the // |Signatures| message and decrypts each signature data using the stored // public key. Pads the 32 bytes |sha256_hash_data| to 256 or 512 bytes @@ -65,8 +71,9 @@ class PayloadVerifier { private: explicit PayloadVerifier( - std::unique_ptr&& public_key) - : public_key_(std::move(public_key)) {} + std::vector>&& + public_keys) + : public_keys_(std::move(public_keys)) {} // Decrypts |sig_data| with the given |public_key| and populates // |out_hash_data| with the decoded raw hash. Returns true if successful, @@ -75,8 +82,7 @@ class PayloadVerifier { const EVP_PKEY* public_key, brillo::Blob* out_hash_data) const; - std::unique_ptr public_key_{nullptr, - nullptr}; + std::vector> public_keys_; }; } // namespace chromeos_update_engine diff --git a/update_attempter_android.cc b/update_attempter_android.cc index 08f6c20f..5bffc426 100644 --- a/update_attempter_android.cc +++ b/update_attempter_android.cc @@ -39,6 +39,7 @@ #include "update_engine/metrics_reporter_interface.h" #include "update_engine/metrics_utils.h" #include "update_engine/network_selector.h" +#include "update_engine/payload_consumer/certificate_parser_interface.h" #include "update_engine/payload_consumer/delta_performer.h" #include "update_engine/payload_consumer/download_action.h" #include "update_engine/payload_consumer/file_descriptor.h" @@ -46,6 +47,7 @@ #include "update_engine/payload_consumer/filesystem_verifier_action.h" #include "update_engine/payload_consumer/payload_constants.h" #include "update_engine/payload_consumer/payload_metadata.h" +#include "update_engine/payload_consumer/payload_verifier.h" #include "update_engine/payload_consumer/postinstall_runner_action.h" #include "update_engine/update_boot_flags_action.h" #include "update_engine/update_status_utils.h" @@ -410,12 +412,16 @@ bool UpdateAttempterAndroid::VerifyPayloadApplicable( } fd->Close(); - string public_key; - if (!utils::ReadFile(constants::kUpdatePayloadPublicKeyPath, &public_key)) { - return LogAndSetError(error, FROM_HERE, "Failed to read public key."); + auto payload_verifier = PayloadVerifier::CreateInstanceFromZipPath( + constants::kUpdateCertificatesPath); + if (!payload_verifier) { + return LogAndSetError(error, + FROM_HERE, + "Failed to create the payload verifier from " + + std::string(constants::kUpdateCertificatesPath)); } - errorcode = - payload_metadata.ValidateMetadataSignature(metadata, "", public_key); + errorcode = payload_metadata.ValidateMetadataSignature( + metadata, "", *payload_verifier); if (errorcode != ErrorCode::kSuccess) { return LogAndSetError(error, FROM_HERE, From 6f0366d35c9646d4ffad48c8b8531a1b6ebfdd81 Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Mon, 14 Oct 2019 14:56:43 -0700 Subject: [PATCH 124/624] update_engine: Add delta_performer_fuzzer delta_performer operates on data it downloads from Omaha servers. Most of the data is signed, but there are a few header bytes that are not signed. It worth doing this fuzzer. BUG=chromium:1014275 TEST=FEATURES=test USE="asan fuzzer" emerge-amd64-generic update_engine Change-Id: I514bac5f8c4d28833cd01e1456481fb593c9f51a Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/1860994 Reviewed-by: Manoj Gupta Tested-by: Amin Hassani --- BUILD.gn | 22 ++++- payload_consumer/delta_performer_fuzzer.cc | 103 +++++++++++++++++++++ 2 files changed, 124 insertions(+), 1 deletion(-) create mode 100644 payload_consumer/delta_performer_fuzzer.cc diff --git a/BUILD.gn b/BUILD.gn index 1e803a06..01207bd0 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -50,7 +50,10 @@ group("all") { } if (use.fuzzer) { - deps += [ ":update_engine_omaha_request_action_fuzzer" ] + deps += [ + ":update_engine_delta_performer_fuzzer", + ":update_engine_omaha_request_action_fuzzer", + ] } } @@ -553,6 +556,23 @@ if (use.test) { # Fuzzer target. if (use.fuzzer) { + executable("update_engine_delta_performer_fuzzer") { + sources = [ + "payload_consumer/delta_performer_fuzzer.cc", + ] + configs += [ + "//common-mk/common_fuzzer", + ":target_defaults", + ] + pkg_deps = [ + "libbrillo-test-${libbase_ver}", + "libchrome-test-${libbase_ver}", + ] + deps = [ + ":libupdate_engine", + ":update_engine_test_libs", + ] + } executable("update_engine_omaha_request_action_fuzzer") { sources = [ "omaha_request_action_fuzzer.cc", diff --git a/payload_consumer/delta_performer_fuzzer.cc b/payload_consumer/delta_performer_fuzzer.cc new file mode 100644 index 00000000..53b168aa --- /dev/null +++ b/payload_consumer/delta_performer_fuzzer.cc @@ -0,0 +1,103 @@ +// +// Copyright 2019 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include +#include + +#include "update_engine/common/fake_boot_control.h" +#include "update_engine/common/fake_hardware.h" +#include "update_engine/common/prefs.h" +#include "update_engine/payload_consumer/delta_performer.h" +#include "update_engine/payload_consumer/download_action.h" +#include "update_engine/payload_consumer/install_plan.h" + +namespace chromeos_update_engine { + +class FakeDownloadActionDelegate : public DownloadActionDelegate { + public: + FakeDownloadActionDelegate() = default; + ~FakeDownloadActionDelegate() = default; + + // DownloadActionDelegate overrides; + void BytesReceived(uint64_t bytes_progressed, + uint64_t bytes_received, + uint64_t total) override{}; + + bool ShouldCancel(ErrorCode* cancel_reason) override { return false; }; + + void DownloadComplete() override{}; + + DISALLOW_COPY_AND_ASSIGN(FakeDownloadActionDelegate); +}; + +void FuzzDeltaPerformer(const uint8_t* data, size_t size) { + MemoryPrefs prefs; + FakeBootControl boot_control; + FakeHardware hardware; + FakeDownloadActionDelegate download_action_delegate; + + FuzzedDataProvider data_provider(data, size); + + InstallPlan install_plan{ + .target_slot = 1, + .partitions = {InstallPlan::Partition{ + .source_path = "/dev/zero", + .source_size = 4096, + .target_path = "/dev/null", + .target_size = 4096, + }}, + }; + + InstallPlan::Payload payload{ + .size = data_provider.ConsumeIntegralInRange(0, 10000), + .metadata_size = data_provider.ConsumeIntegralInRange(0, 1000), + .hash = data_provider.ConsumeBytes(32), + .type = static_cast( + data_provider.ConsumeIntegralInRange(0, 3)), + .already_applied = data_provider.ConsumeBool(), + }; + + DeltaPerformer performer(&prefs, + &boot_control, + &hardware, + &download_action_delegate, + &install_plan, + &payload, + data_provider.ConsumeBool()); + do { + auto chunk_size = data_provider.ConsumeIntegralInRange(0, 100); + auto data = data_provider.ConsumeBytes(chunk_size); + performer.Write(data.data(), data.size()); + } while (data_provider.remaining_bytes() > 0); +} + +} // namespace chromeos_update_engine + +class Environment { + public: + Environment() { logging::SetMinLogLevel(logging::LOG_FATAL); } +}; + +extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { + if (size > 1000000) { + return 0; + } + + static Environment env; + chromeos_update_engine::FuzzDeltaPerformer(data, size); + return 0; +} From f0f4a91d686b50aff2afdf2cabfec68fb6765d5a Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Thu, 26 Sep 2019 17:51:33 -0700 Subject: [PATCH 125/624] DynamicPartitionControl: only create snapshot when snapshot_enabled Do not create snapshot when applying downgrade to non-Virtual-A/B packages and secondary OTAs. Test: apply downgrade OTA on Virtual A/B devices Bug: 138733621 (secondary OTA) Fixes: 138258570 (downgrades) Change-Id: I13318f57613d6bd60a5b7e81ebb3e35b3c225a0c --- boot_control_android.cc | 6 +---- boot_control_android_unittest.cc | 3 +++ dynamic_partition_control_android.cc | 26 ++++++++++++------- dynamic_partition_control_android.h | 9 ++++--- dynamic_partition_control_android_unittest.cc | 2 +- dynamic_partition_control_interface.h | 9 ++++--- mock_dynamic_partition_control.h | 4 +-- 7 files changed, 33 insertions(+), 26 deletions(-) diff --git a/boot_control_android.cc b/boot_control_android.cc index 4a010bda..b1d775e2 100644 --- a/boot_control_android.cc +++ b/boot_control_android.cc @@ -308,12 +308,8 @@ bool BootControlAndroid::PreparePartitionsForUpdate( return true; } - if (!update_metadata) { - return true; - } - return dynamic_control_->PreparePartitionsForUpdate( - source_slot, target_slot, manifest); + source_slot, target_slot, manifest, update_metadata); } } // namespace chromeos_update_engine diff --git a/boot_control_android_unittest.cc b/boot_control_android_unittest.cc index f090de2f..e44af157 100644 --- a/boot_control_android_unittest.cc +++ b/boot_control_android_unittest.cc @@ -197,6 +197,9 @@ TEST_P(BootControlAndroidTestP, GetPartitionDeviceWhenResumingUpdate) { {T("system"), 2_GiB}, {T("vendor"), 1_GiB}}); + EXPECT_CALL(dynamicControl(), PreparePartitionsForUpdate(_, _, _, false)) + .WillOnce(Return(true)); + EXPECT_TRUE(PreparePartitionsForUpdate( target(), {{"system", 2_GiB}, {"vendor", 1_GiB}}, false)); diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc index 8dcf343d..88a787af 100644 --- a/dynamic_partition_control_android.cc +++ b/dynamic_partition_control_android.cc @@ -112,7 +112,8 @@ bool DynamicPartitionControlAndroid::MapPartitionInternal( .force_writable = force_writable, }; bool success = false; - if (GetVirtualAbFeatureFlag().IsEnabled() && force_writable) { + if (GetVirtualAbFeatureFlag().IsEnabled() && target_supports_snapshot_ && + force_writable) { // Only target partitions are mapped with force_writable. On Virtual // A/B devices, target partitions may overlap with source partitions, so // they must be mapped with snapshot. @@ -343,11 +344,15 @@ bool DynamicPartitionControlAndroid::GetDeviceDir(std::string* out) { bool DynamicPartitionControlAndroid::PreparePartitionsForUpdate( uint32_t source_slot, uint32_t target_slot, - const DeltaArchiveManifest& manifest) { - // TODO(elsk): Also call PrepareDynamicPartitionsForUpdate when applying - // downgrade packages on retrofit Virtual A/B devices and when applying - // secondary OTA. b/138258570 - if (GetVirtualAbFeatureFlag().IsEnabled()) { + const DeltaArchiveManifest& manifest, + bool update) { + target_supports_snapshot_ = + manifest.dynamic_partition_metadata().snapshot_enabled(); + + if (!update) + return true; + + if (GetVirtualAbFeatureFlag().IsEnabled() && target_supports_snapshot_) { return PrepareSnapshotPartitionsForUpdate( source_slot, target_slot, manifest); } @@ -491,10 +496,11 @@ bool DynamicPartitionControlAndroid::UpdatePartitionMetadata( } bool DynamicPartitionControlAndroid::FinishUpdate() { - if (!GetVirtualAbFeatureFlag().IsEnabled()) - return true; - LOG(INFO) << "Snapshot writes are done."; - return snapshot_->FinishedSnapshotWrites(); + if (GetVirtualAbFeatureFlag().IsEnabled() && target_supports_snapshot_) { + LOG(INFO) << "Snapshot writes are done."; + return snapshot_->FinishedSnapshotWrites(); + } + return true; } } // namespace chromeos_update_engine diff --git a/dynamic_partition_control_android.h b/dynamic_partition_control_android.h index f9dfd894..35d82168 100644 --- a/dynamic_partition_control_android.h +++ b/dynamic_partition_control_android.h @@ -46,10 +46,10 @@ class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { std::unique_ptr LoadMetadataBuilder( const std::string& super_device, uint32_t source_slot) override; - bool PreparePartitionsForUpdate( - uint32_t source_slot, - uint32_t target_slot, - const DeltaArchiveManifest& manifest) override; + bool PreparePartitionsForUpdate(uint32_t source_slot, + uint32_t target_slot, + const DeltaArchiveManifest& manifest, + bool update) override; bool GetDeviceDir(std::string* path) override; std::string GetSuperPartitionName(uint32_t slot) override; bool FinishUpdate() override; @@ -113,6 +113,7 @@ class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { std::set mapped_devices_; std::unique_ptr snapshot_; + bool target_supports_snapshot_ = false; DISALLOW_COPY_AND_ASSIGN(DynamicPartitionControlAndroid); }; diff --git a/dynamic_partition_control_android_unittest.cc b/dynamic_partition_control_android_unittest.cc index 552774e6..e8ef1f98 100644 --- a/dynamic_partition_control_android_unittest.cc +++ b/dynamic_partition_control_android_unittest.cc @@ -112,7 +112,7 @@ class DynamicPartitionControlAndroidTest : public ::testing::Test { } bool PreparePartitionsForUpdate(const PartitionSizes& partition_sizes) { return dynamicControl().PreparePartitionsForUpdate( - source(), target(), PartitionSizesToManifest(partition_sizes)); + source(), target(), PartitionSizesToManifest(partition_sizes), true); } void SetSlots(const TestParam& slots) { slots_ = slots; } diff --git a/dynamic_partition_control_interface.h b/dynamic_partition_control_interface.h index 0ccfcd6b..9c4c2e82 100644 --- a/dynamic_partition_control_interface.h +++ b/dynamic_partition_control_interface.h @@ -92,10 +92,11 @@ class DynamicPartitionControlInterface { // Prepare all partitions for an update specified in |manifest|. // This is needed before calling MapPartitionOnDeviceMapper(), otherwise the // device would be mapped in an inconsistent way. - virtual bool PreparePartitionsForUpdate( - uint32_t source_slot, - uint32_t target_slot, - const DeltaArchiveManifest& manifest) = 0; + // If |update| is set, create snapshots and writes super partition metadata. + virtual bool PreparePartitionsForUpdate(uint32_t source_slot, + uint32_t target_slot, + const DeltaArchiveManifest& manifest, + bool update) = 0; // Return a possible location for devices listed by name. virtual bool GetDeviceDir(std::string* path) = 0; diff --git a/mock_dynamic_partition_control.h b/mock_dynamic_partition_control.h index 1af6cfed..8146e0f1 100644 --- a/mock_dynamic_partition_control.h +++ b/mock_dynamic_partition_control.h @@ -44,8 +44,8 @@ class MockDynamicPartitionControl : public DynamicPartitionControlInterface { const std::string&, uint32_t)); MOCK_METHOD1(GetDeviceDir, bool(std::string*)); MOCK_METHOD0(GetDynamicPartitionsFeatureFlag, FeatureFlag()); - MOCK_METHOD3(PreparePartitionsForUpdate, - bool(uint32_t, uint32_t, const DeltaArchiveManifest&)); + MOCK_METHOD4(PreparePartitionsForUpdate, + bool(uint32_t, uint32_t, const DeltaArchiveManifest&, bool)); MOCK_METHOD1(GetSuperPartitionName, std::string(uint32_t)); MOCK_METHOD0(GetVirtualAbFeatureFlag, FeatureFlag()); MOCK_METHOD0(FinishUpdate, bool()); From a4e7da3b8a4e175dc2cec7c45a5e3d995a0711e1 Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Mon, 30 Sep 2019 18:25:03 -0700 Subject: [PATCH 126/624] Delete COW group for downgrade Virtual A/B When applying a downgrade OTA package on retrofit Virtual A/B device (to a non-Virtual-A/B build), the COW group / partitions needs to be explicitly deleted to ensure there are enough space to create the regular target partitions. Bug: 138258570 Test: apply downgrade Change-Id: I9e90a26500ec291ebd1e784e8e7fb8fbd508a925 --- dynamic_partition_control_android.cc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc index 88a787af..2bd7d510 100644 --- a/dynamic_partition_control_android.cc +++ b/dynamic_partition_control_android.cc @@ -423,6 +423,11 @@ bool DynamicPartitionControlAndroid::UpdatePartitionMetadata( MetadataBuilder* builder, uint32_t target_slot, const DeltaArchiveManifest& manifest) { + // If applying downgrade from Virtual A/B to non-Virtual A/B, the left-over + // COW group needs to be deleted to ensure there are enough space to create + // target partitions. + builder->RemoveGroupAndPartitions(android::snapshot::kCowGroupName); + const std::string target_suffix = SlotSuffixForSlotNumber(target_slot); DeleteGroupsWithSuffix(builder, target_suffix); From 6d88856a1b74121f3d242af7f8be6ad66798692a Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Tue, 1 Oct 2019 13:00:31 -0700 Subject: [PATCH 127/624] Call SnapshotManager::CancelUpdate on downgrade path When applying a downgrade package on Virtual A/B device, /metadata/ota/state may not be present, and the attempt to unmap any previous devices,UnmapUpdateSnapshot may fail. Call CancelUpdate() no matter what OTA package (update, downgrade) is applied. Test: apply downgrade when /metadata/ota/state is not present Bug: 138258570 Change-Id: If9a438762db757089376d531b752ac384caa059c --- dynamic_partition_control_android.cc | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc index 2bd7d510..90e489ad 100644 --- a/dynamic_partition_control_android.cc +++ b/dynamic_partition_control_android.cc @@ -352,9 +352,20 @@ bool DynamicPartitionControlAndroid::PreparePartitionsForUpdate( if (!update) return true; - if (GetVirtualAbFeatureFlag().IsEnabled() && target_supports_snapshot_) { - return PrepareSnapshotPartitionsForUpdate( - source_slot, target_slot, manifest); + if (GetVirtualAbFeatureFlag().IsEnabled()) { + // On Virtual A/B device, either CancelUpdate() or BeginUpdate() must be + // called before calling UnmapUpdateSnapshot. + // - If target_supports_snapshot_, PrepareSnapshotPartitionsForUpdate() + // calls BeginUpdate() which resets update state + // - If !target_supports_snapshot_, explicitly CancelUpdate(). + if (target_supports_snapshot_) { + return PrepareSnapshotPartitionsForUpdate( + source_slot, target_slot, manifest); + } + if (!snapshot_->CancelUpdate()) { + LOG(ERROR) << "Cannot cancel previous update."; + return false; + } } return PrepareDynamicPartitionsForUpdate(source_slot, target_slot, manifest); } From 50a56c6a280391bc21762c3d9f020bcc41cf24ac Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Mon, 14 Oct 2019 19:35:05 -0700 Subject: [PATCH 128/624] DynamicPartitionControl: Load metadata correctly for downgrading Virtual A/B When applying a downgrade package on a Virtual A/B device (to a non-Virtual A/B build), source slot partitions must be kept in the metadata. Test: apply downgrade package Bug: 138258570 Change-Id: If083538b3db10e56c6255686ca63b97007bea9d5 --- dynamic_partition_control_android.cc | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc index 90e489ad..a8210cf8 100644 --- a/dynamic_partition_control_android.cc +++ b/dynamic_partition_control_android.cc @@ -257,8 +257,12 @@ DynamicPartitionControlAndroid::LoadMetadataBuilder( builder = MetadataBuilder::New(PartitionOpener(), super_device, source_slot); } else { - builder = MetadataBuilder::NewForUpdate( - PartitionOpener(), super_device, source_slot, target_slot); + bool always_keep_source_slot = !target_supports_snapshot_; + builder = MetadataBuilder::NewForUpdate(PartitionOpener(), + super_device, + source_slot, + target_slot, + always_keep_source_slot); } if (builder == nullptr) { From be4ea231bc2748446b3ed84ba99ff4dc347587d2 Mon Sep 17 00:00:00 2001 From: Tianjie Xu Date: Tue, 15 Oct 2019 18:08:31 -0700 Subject: [PATCH 129/624] Update engine now depends on otacerts The otacerts.zip is aleady added to the system/recovery image. Add the dependency here anyway. Also drop the dependency on update_engine_payload_key. Bug: 116660991 Test: build and apply an update Change-Id: I0353af86c4e66eec9ce84901ac09fb78bd924116 --- Android.bp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Android.bp b/Android.bp index a691e7e8..a7f92153 100644 --- a/Android.bp +++ b/Android.bp @@ -306,7 +306,7 @@ cc_binary { static_libs: ["libupdate_engine_android"], required: [ "cacerts_google", - "update_engine_payload_key", + "otacerts", ], srcs: ["main.cc"], @@ -380,7 +380,7 @@ cc_binary { }, required: [ - "update_engine_payload_key.recovery", + "otacerts.recovery", ], } From 8368ee08c56d222080704e42c0e98a76e2143ffa Mon Sep 17 00:00:00 2001 From: Jae Hoon Kim Date: Wed, 16 Oct 2019 15:37:39 -0700 Subject: [PATCH 130/624] update_engine: Block logging of invalid EOL date A simple blocakage of warning log, which stops spams that can happen prior to GE actually sending the optional attribute for EOL date. Even fetching the value of EOL date my not exists, so will also be removed for prefs as callers should log based on return value. BUG=chromium:1014758 TEST=none Change-Id: Ib2da2e9574c670c99deea6e8c8409f7e83d216e9 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/1863008 Tested-by: Jae Hoon Kim Reviewed-by: Amin Hassani Commit-Queue: Amin Hassani Auto-Submit: Jae Hoon Kim --- common/prefs.cc | 1 - omaha_utils.cc | 4 +++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/common/prefs.cc b/common/prefs.cc index 12d06c0c..71838619 100644 --- a/common/prefs.cc +++ b/common/prefs.cc @@ -119,7 +119,6 @@ bool Prefs::FileStorage::GetKey(const string& key, string* value) const { base::FilePath filename; TEST_AND_RETURN_FALSE(GetFileNameForKey(key, &filename)); if (!base::ReadFileToString(filename, value)) { - LOG(INFO) << key << " not present in " << prefs_dir_.value(); return false; } return true; diff --git a/omaha_utils.cc b/omaha_utils.cc index 9fe425b1..f9ec85ac 100644 --- a/omaha_utils.cc +++ b/omaha_utils.cc @@ -63,7 +63,9 @@ std::string EolDateToString(EolDate eol_date) { EolDate StringToEolDate(const std::string& eol_date) { EolDate date = kEolDateInvalid; if (!base::StringToInt64(eol_date, &date)) { - LOG(WARNING) << "Invalid EOL date attribute: " << eol_date; + // TODO(b/142823480): Once Omaha is passing _eol_date attribute, this log + // may be turned back on. + // LOG(WARNING) << "Invalid EOL date attribute: " << eol_date; return kEolDateInvalid; } return date; From 2111d063ce9c9f8e0c0fce55313f092713fc00e9 Mon Sep 17 00:00:00 2001 From: David Anderson Date: Tue, 15 Oct 2019 22:36:27 -0700 Subject: [PATCH 131/624] Link to IBootControl 1.1. This fixes linking against the latest libsnapshot. Bug: 139154945 Test: builds Change-Id: Ib20a4c4ed4b7f0c105889ca0fbed2bfbfc423304 --- Android.bp | 1 + 1 file changed, 1 insertion(+) diff --git a/Android.bp b/Android.bp index a7f92153..54fd0c2c 100644 --- a/Android.bp +++ b/Android.bp @@ -204,6 +204,7 @@ cc_defaults { "liblp", "libutils", "android.hardware.boot@1.0", + "android.hardware.boot@1.1", ], target: { recovery: { From b38e1afa1f1df3c6b00543c6f4edb015985e1037 Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Thu, 17 Oct 2019 14:59:22 -0700 Subject: [PATCH 132/624] Avoid reading sysprops again. DynamicPartiitonControlAndroid reads sysprops again and again. These feature flags can be stored as a member variable. Test: pass Change-Id: Ie885afa502ae7387b76c231d9f54cd550c38c682 --- dynamic_partition_control_android.cc | 21 ++++++++++++--------- dynamic_partition_control_android.h | 2 ++ 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc index a8210cf8..0c1f0d30 100644 --- a/dynamic_partition_control_android.cc +++ b/dynamic_partition_control_android.cc @@ -62,13 +62,6 @@ constexpr std::chrono::milliseconds kMapTimeout{1000}; // needs to be mapped, this timeout is longer than |kMapTimeout|. constexpr std::chrono::milliseconds kMapSnapshotTimeout{5000}; -DynamicPartitionControlAndroid::DynamicPartitionControlAndroid() { - if (GetVirtualAbFeatureFlag().IsEnabled()) { - snapshot_ = android::snapshot::SnapshotManager::New(); - CHECK(snapshot_ != nullptr) << "Cannot initialize SnapshotManager."; - } -} - DynamicPartitionControlAndroid::~DynamicPartitionControlAndroid() { CleanupInternal(false /* wait */); } @@ -91,12 +84,22 @@ static FeatureFlag GetFeatureFlag(const char* enable_prop, return FeatureFlag(FeatureFlag::Value::NONE); } +DynamicPartitionControlAndroid::DynamicPartitionControlAndroid() + : dynamic_partitions_( + GetFeatureFlag(kUseDynamicPartitions, kRetrfoitDynamicPartitions)), + virtual_ab_(GetFeatureFlag(kVirtualAbEnabled, kVirtualAbRetrofit)) { + if (GetVirtualAbFeatureFlag().IsEnabled()) { + snapshot_ = android::snapshot::SnapshotManager::New(); + CHECK(snapshot_ != nullptr) << "Cannot initialize SnapshotManager."; + } +} + FeatureFlag DynamicPartitionControlAndroid::GetDynamicPartitionsFeatureFlag() { - return GetFeatureFlag(kUseDynamicPartitions, kRetrfoitDynamicPartitions); + return dynamic_partitions_; } FeatureFlag DynamicPartitionControlAndroid::GetVirtualAbFeatureFlag() { - return GetFeatureFlag(kVirtualAbEnabled, kVirtualAbRetrofit); + return virtual_ab_; } bool DynamicPartitionControlAndroid::MapPartitionInternal( diff --git a/dynamic_partition_control_android.h b/dynamic_partition_control_android.h index 35d82168..d70a2aa0 100644 --- a/dynamic_partition_control_android.h +++ b/dynamic_partition_control_android.h @@ -112,6 +112,8 @@ class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { const DeltaArchiveManifest& manifest); std::set mapped_devices_; + const FeatureFlag dynamic_partitions_; + const FeatureFlag virtual_ab_; std::unique_ptr snapshot_; bool target_supports_snapshot_ = false; From 6c70b934f24d3d1b14795f34e8504e2a05f85c95 Mon Sep 17 00:00:00 2001 From: Tianjie Xu Date: Tue, 22 Oct 2019 16:46:00 -0700 Subject: [PATCH 133/624] Fix the behavior for metadata verification when no public key presents The old behavior actually checks if the hash check is mandatory (install_plan_->hash_checks_mandatory). And it reports an warning instead of an error for non-mandatory checks when there is no public key presents. Change the logic to match the old behavior. Test: unit tests pass, run 'brillo_update_payload verify' without a source image. Change-Id: Ie9be7553ec018c1c7fd515a462190c2376c67e4c --- payload_consumer/delta_performer.cc | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc index 4b80ae6b..4aec00bf 100644 --- a/payload_consumer/delta_performer.cc +++ b/payload_consumer/delta_performer.cc @@ -527,18 +527,19 @@ MetadataParseResult DeltaPerformer::ParsePayloadMetadata( << "Trusting metadata size in payload = " << metadata_size_; } - // Perform the verification unconditionally. auto [payload_verifier, perform_verification] = CreatePayloadVerifier(); if (!payload_verifier) { LOG(ERROR) << "Failed to create payload verifier."; *error = ErrorCode::kDownloadMetadataSignatureVerificationError; - return MetadataParseResult::kError; + if (perform_verification) { + return MetadataParseResult::kError; + } + } else { + // We have the full metadata in |payload|. Verify its integrity + // and authenticity based on the information we have in Omaha response. + *error = payload_metadata_.ValidateMetadataSignature( + payload, payload_->metadata_signature, *payload_verifier); } - - // We have the full metadata in |payload|. Verify its integrity - // and authenticity based on the information we have in Omaha response. - *error = payload_metadata_.ValidateMetadataSignature( - payload, payload_->metadata_signature, *payload_verifier); if (*error != ErrorCode::kSuccess) { if (install_plan_->hash_checks_mandatory) { // The autoupdate_CatchBadSignatures test checks for this string From 6e0d0ef979d2d0dd99c586f83fd7edbf356c63c3 Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Thu, 17 Oct 2019 14:34:22 -0700 Subject: [PATCH 134/624] Sanity check that no downgrade package on launch VAB device. If the OTA server were misconfigured, OTA client can reject the OTA if the current device launches with VAB but is trying to update to a build that does not support VAB. This operation is only permitted on retrofit devices. Bug: 138258570 Test: pass Change-Id: I159ff2edc81555fee8bfa6296e0c6c969f2f4f6d --- dynamic_partition_control_android.cc | 7 +++++++ dynamic_partition_control_interface.h | 1 + 2 files changed, 8 insertions(+) diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc index 0c1f0d30..e194670f 100644 --- a/dynamic_partition_control_android.cc +++ b/dynamic_partition_control_android.cc @@ -369,6 +369,13 @@ bool DynamicPartitionControlAndroid::PreparePartitionsForUpdate( return PrepareSnapshotPartitionsForUpdate( source_slot, target_slot, manifest); } + + if (GetVirtualAbFeatureFlag().IsLaunch() && !target_supports_snapshot_) { + LOG(ERROR) << "Cannot downgrade to a build that does not support " + << "snapshots because this device launches with Virtual A/B."; + return false; + } + if (!snapshot_->CancelUpdate()) { LOG(ERROR) << "Cannot cancel previous update."; return false; diff --git a/dynamic_partition_control_interface.h b/dynamic_partition_control_interface.h index 9c4c2e82..9c18973f 100644 --- a/dynamic_partition_control_interface.h +++ b/dynamic_partition_control_interface.h @@ -36,6 +36,7 @@ struct FeatureFlag { constexpr explicit FeatureFlag(Value value) : value_(value) {} constexpr bool IsEnabled() const { return value_ != Value::NONE; } constexpr bool IsRetrofit() const { return value_ == Value::RETROFIT; } + constexpr bool IsLaunch() const { return value_ == Value::LAUNCH; } private: Value value_; From 816193f68604845ad3c66cafb71b68e05e310469 Mon Sep 17 00:00:00 2001 From: Allen Webb Date: Fri, 25 Oct 2019 10:06:46 -0700 Subject: [PATCH 135/624] Conditionally link protobuf for fuzzer builds. BUG=chromium:1017318 TEST=cros_workon-amd64-generic start update_engine && \ USE='asan fuzzer kvm_guest' ./build_packages \ --board=amd64-generic --skip_chroot_upgrade chromium-os-fuzzers Change-Id: I696c8bdf20e4a60d21139c169ac90ede186f4050 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/1881336 Tested-by: Allen Webb Reviewed-by: Amin Hassani Commit-Queue: Allen Webb --- BUILD.gn | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/BUILD.gn b/BUILD.gn index 01207bd0..0cc27e6f 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -96,8 +96,12 @@ pkg_config("target_defaults") { # before protobuf here or the linker flags won't be in the right # order. "system_api", - "protobuf-lite", ] + if (use.fuzzer) { + pkg_deps += [ "protobuf" ] + } else { + pkg_deps += [ "protobuf-lite" ] + } } # Protobufs. From 55c75417e22d5026971276997924a345d9973bbc Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Mon, 7 Oct 2019 11:20:39 -0700 Subject: [PATCH 136/624] update_engine: Deprecate major version 1 We have moved away from major version 1 in Chrome OS and already have a stepping stone for it in M53. So this cleanup makes the code much easier to understand. BUG=chromium:1008553 TEST=FEATURES="test" sudo emerge update_engine update_payload TEST=cros_generate_update_payload --image chromiumos_test_image.bin --check --output delta.bin Change-Id: I01815dfa5fdf395f8214ef162e01ecca2d42f7fc Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/1857459 Tested-by: Amin Hassani Reviewed-by: Sen Jiang Commit-Queue: Amin Hassani --- payload_consumer/delta_performer.cc | 112 ++--------- payload_consumer/delta_performer.h | 5 - .../delta_performer_integration_test.cc | 150 +++++++++------ payload_consumer/delta_performer_unittest.cc | 94 +++++----- payload_consumer/payload_constants.cc | 8 +- payload_consumer/payload_constants.h | 2 +- payload_consumer/payload_metadata.cc | 85 +++------ payload_consumer/payload_metadata.h | 14 +- payload_generator/ab_generator_unittest.cc | 70 +++---- .../delta_diff_utils_unittest.cc | 9 +- .../full_update_generator_unittest.cc | 2 +- payload_generator/generate_delta_main.cc | 19 +- payload_generator/payload_file.cc | 124 +++++-------- .../payload_generation_config.cc | 14 +- payload_generator/payload_properties.cc | 3 +- payload_generator/payload_signer.cc | 45 ++--- payload_generator/payload_signer.h | 5 +- scripts/payload_info.py | 49 ++--- scripts/payload_info_unittest.py | 145 ++++++--------- scripts/update_payload/applier.py | 90 ++++----- scripts/update_payload/checker.py | 174 ++++++------------ scripts/update_payload/checker_unittest.py | 126 +++++-------- scripts/update_payload/common.py | 6 +- scripts/update_payload/payload.py | 4 +- scripts/update_payload/test_utils.py | 59 +++--- scripts/update_payload/update_metadata_pb2.py | 64 ++++--- update_metadata.proto | 12 +- 27 files changed, 586 insertions(+), 904 deletions(-) diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc index cc39943c..ee5f38cc 100644 --- a/payload_consumer/delta_performer.cc +++ b/payload_consumer/delta_performer.cc @@ -713,8 +713,7 @@ bool DeltaPerformer::Write(const void* bytes, size_t count, ErrorCode* error) { // In major version 2, we don't add dummy operation to the payload. // If we already extracted the signature we should skip this step. - if (major_payload_version_ == kBrilloMajorPayloadVersion && - manifest_.has_signatures_offset() && manifest_.has_signatures_size() && + if (manifest_.has_signatures_offset() && manifest_.has_signatures_size() && signatures_message_data_.empty()) { if (manifest_.signatures_offset() != buffer_offset_) { LOG(ERROR) << "Payload signatures offset points to blob offset " @@ -749,51 +748,11 @@ bool DeltaPerformer::IsManifestValid() { } bool DeltaPerformer::ParseManifestPartitions(ErrorCode* error) { - if (major_payload_version_ == kBrilloMajorPayloadVersion) { - partitions_.clear(); - for (const PartitionUpdate& partition : manifest_.partitions()) { - partitions_.push_back(partition); - } - manifest_.clear_partitions(); - } else if (major_payload_version_ == kChromeOSMajorPayloadVersion) { - LOG(INFO) << "Converting update information from old format."; - PartitionUpdate root_part; - root_part.set_partition_name(kPartitionNameRoot); -#ifdef __ANDROID__ - LOG(WARNING) << "Legacy payload major version provided to an Android " - "build. Assuming no post-install. Please use major version " - "2 or newer."; - root_part.set_run_postinstall(false); -#else - root_part.set_run_postinstall(true); -#endif // __ANDROID__ - if (manifest_.has_old_rootfs_info()) { - *root_part.mutable_old_partition_info() = manifest_.old_rootfs_info(); - manifest_.clear_old_rootfs_info(); - } - if (manifest_.has_new_rootfs_info()) { - *root_part.mutable_new_partition_info() = manifest_.new_rootfs_info(); - manifest_.clear_new_rootfs_info(); - } - *root_part.mutable_operations() = manifest_.install_operations(); - manifest_.clear_install_operations(); - partitions_.push_back(std::move(root_part)); - - PartitionUpdate kern_part; - kern_part.set_partition_name(kPartitionNameKernel); - kern_part.set_run_postinstall(false); - if (manifest_.has_old_kernel_info()) { - *kern_part.mutable_old_partition_info() = manifest_.old_kernel_info(); - manifest_.clear_old_kernel_info(); - } - if (manifest_.has_new_kernel_info()) { - *kern_part.mutable_new_partition_info() = manifest_.new_kernel_info(); - manifest_.clear_new_kernel_info(); - } - *kern_part.mutable_operations() = manifest_.kernel_install_operations(); - manifest_.clear_kernel_install_operations(); - partitions_.push_back(std::move(kern_part)); + partitions_.clear(); + for (const PartitionUpdate& partition : manifest_.partitions()) { + partitions_.push_back(partition); } + manifest_.clear_partitions(); // Fill in the InstallPlan::partitions based on the partitions from the // payload. @@ -954,14 +913,6 @@ bool DeltaPerformer::PerformReplaceOperation( TEST_AND_RETURN_FALSE(buffer_offset_ == operation.data_offset()); TEST_AND_RETURN_FALSE(buffer_.size() >= operation.data_length()); - // Extract the signature message if it's in this operation. - if (ExtractSignatureMessageFromOperation(operation)) { - // If this is dummy replace operation, we ignore it after extracting the - // signature. - DiscardBuffer(true, 0); - return true; - } - // Setup the ExtentWriter stack based on the operation type. std::unique_ptr writer = std::make_unique(); @@ -1412,19 +1363,6 @@ bool DeltaPerformer::PerformPuffDiffOperation(const InstallOperation& operation, return true; } -bool DeltaPerformer::ExtractSignatureMessageFromOperation( - const InstallOperation& operation) { - if (operation.type() != InstallOperation::REPLACE || - !manifest_.has_signatures_offset() || - manifest_.signatures_offset() != operation.data_offset()) { - return false; - } - TEST_AND_RETURN_FALSE(manifest_.has_signatures_size() && - manifest_.signatures_size() == operation.data_length()); - TEST_AND_RETURN_FALSE(ExtractSignatureMessage()); - return true; -} - bool DeltaPerformer::ExtractSignatureMessage() { TEST_AND_RETURN_FALSE(signatures_message_data_.empty()); TEST_AND_RETURN_FALSE(buffer_offset_ == manifest_.signatures_offset()); @@ -1476,11 +1414,11 @@ ErrorCode DeltaPerformer::ValidateManifest() { // Perform assorted checks to sanity check the manifest, make sure it // matches data from other sources, and that it is a supported version. - bool has_old_fields = - (manifest_.has_old_kernel_info() || manifest_.has_old_rootfs_info()); - for (const PartitionUpdate& partition : manifest_.partitions()) { - has_old_fields = has_old_fields || partition.has_old_partition_info(); - } + bool has_old_fields = std::any_of(manifest_.partitions().begin(), + manifest_.partitions().end(), + [](const PartitionUpdate& partition) { + return partition.has_old_partition_info(); + }); // The presence of an old partition hash is the sole indicator for a delta // update. @@ -1522,16 +1460,12 @@ ErrorCode DeltaPerformer::ValidateManifest() { } } - if (major_payload_version_ != kChromeOSMajorPayloadVersion) { - if (manifest_.has_old_rootfs_info() || manifest_.has_new_rootfs_info() || - manifest_.has_old_kernel_info() || manifest_.has_new_kernel_info() || - manifest_.install_operations_size() != 0 || - manifest_.kernel_install_operations_size() != 0) { - LOG(ERROR) << "Manifest contains deprecated field only supported in " - << "major payload version 1, but the payload major version is " - << major_payload_version_; - return ErrorCode::kPayloadMismatchedType; - } + if (manifest_.has_old_rootfs_info() || manifest_.has_new_rootfs_info() || + manifest_.has_old_kernel_info() || manifest_.has_new_kernel_info() || + manifest_.install_operations_size() != 0 || + manifest_.kernel_install_operations_size() != 0) { + LOG(ERROR) << "Manifest contains deprecated fields."; + return ErrorCode::kPayloadMismatchedType; } if (manifest_.max_timestamp() < hardware_->GetBuildTimestamp()) { @@ -1542,18 +1476,8 @@ ErrorCode DeltaPerformer::ValidateManifest() { return ErrorCode::kPayloadTimestampError; } - if (major_payload_version_ == kChromeOSMajorPayloadVersion) { - if (manifest_.has_dynamic_partition_metadata()) { - LOG(ERROR) - << "Should not contain dynamic_partition_metadata for major version " - << kChromeOSMajorPayloadVersion - << ". Please use major version 2 or above."; - return ErrorCode::kPayloadMismatchedType; - } - } - - // TODO(garnold) we should be adding more and more manifest checks, such as - // partition boundaries etc (see chromium-os:37661). + // TODO(crbug.com/37661) we should be adding more and more manifest checks, + // such as partition boundaries, etc. return ErrorCode::kSuccess; } diff --git a/payload_consumer/delta_performer.h b/payload_consumer/delta_performer.h index 4493c2ae..78607475 100644 --- a/payload_consumer/delta_performer.h +++ b/payload_consumer/delta_performer.h @@ -237,11 +237,6 @@ class DeltaPerformer : public FileWriter { FileDescriptorPtr ChooseSourceFD(const InstallOperation& operation, ErrorCode* error); - // Extracts the payload signature message from the blob on the |operation| if - // the offset matches the one specified by the manifest. Returns whether the - // signature was extracted. - bool ExtractSignatureMessageFromOperation(const InstallOperation& operation); - // Extracts the payload signature message from the current |buffer_| if the // offset matches the one specified by the manifest. Returns whether the // signature was extracted. diff --git a/payload_consumer/delta_performer_integration_test.cc b/payload_consumer/delta_performer_integration_test.cc index 904ea5a5..5f557392 100644 --- a/payload_consumer/delta_performer_integration_test.cc +++ b/payload_consumer/delta_performer_integration_test.cc @@ -76,6 +76,7 @@ struct DeltaState { string delta_path; uint64_t metadata_size; + uint32_t metadata_signature_size; string old_kernel; brillo::Blob old_kernel_data; @@ -187,17 +188,32 @@ static void SignGeneratedPayload(const string& payload_path, uint64_t* out_metadata_size) { string private_key_path = GetBuildArtifactsPath(kUnittestPrivateKeyPath); int signature_size = GetSignatureSize(private_key_path); - brillo::Blob hash; + brillo::Blob metadata_hash, payload_hash; ASSERT_TRUE(PayloadSigner::HashPayloadForSigning( - payload_path, {signature_size}, &hash, nullptr)); - brillo::Blob signature; - ASSERT_TRUE(PayloadSigner::SignHash(hash, private_key_path, &signature)); - ASSERT_TRUE(PayloadSigner::AddSignatureToPayload( - payload_path, {signature}, {}, payload_path, out_metadata_size)); + payload_path, {signature_size}, &payload_hash, &metadata_hash)); + brillo::Blob metadata_signature, payload_signature; + ASSERT_TRUE(PayloadSigner::SignHash( + payload_hash, private_key_path, &payload_signature)); + ASSERT_TRUE(PayloadSigner::SignHash( + metadata_hash, private_key_path, &metadata_signature)); + ASSERT_TRUE(PayloadSigner::AddSignatureToPayload(payload_path, + {payload_signature}, + {metadata_signature}, + payload_path, + out_metadata_size)); EXPECT_TRUE(PayloadSigner::VerifySignedPayload( payload_path, GetBuildArtifactsPath(kUnittestPublicKeyPath))); } +static void SignHashToFile(const string& hash_file, + const string& signature_file, + const string& private_key_file) { + brillo::Blob hash, signature; + ASSERT_TRUE(utils::ReadFile(hash_file, &hash)); + ASSERT_TRUE(PayloadSigner::SignHash(hash, private_key_file, &signature)); + ASSERT_TRUE(test_utils::WriteFileVector(signature_file, signature)); +} + static void SignGeneratedShellPayload(SignatureTest signature_test, const string& payload_path) { string private_key_path = GetBuildArtifactsPath(kUnittestPrivateKeyPath); @@ -230,7 +246,8 @@ static void SignGeneratedShellPayload(SignatureTest signature_test, RSA_free(rsa); } int signature_size = GetSignatureSize(private_key_path); - test_utils::ScopedTempFile hash_file("hash.XXXXXX"); + test_utils::ScopedTempFile payload_hash_file("hash.XXXXXX"), + metadata_hash_file("hash.XXXXXX"); string signature_size_string; if (signature_test == kSignatureGeneratedShellRotateCl1 || signature_test == kSignatureGeneratedShellRotateCl2) @@ -241,38 +258,51 @@ static void SignGeneratedShellPayload(SignatureTest signature_test, string delta_generator_path = GetBuildArtifactsPath("delta_generator"); ASSERT_EQ(0, System(base::StringPrintf( - "%s -in_file=%s -signature_size=%s -out_hash_file=%s", + "%s -in_file=%s -signature_size=%s -out_hash_file=%s " + "-out_metadata_hash_file=%s", delta_generator_path.c_str(), payload_path.c_str(), signature_size_string.c_str(), - hash_file.path().c_str()))); - - // Sign the hash - brillo::Blob hash, signature; - ASSERT_TRUE(utils::ReadFile(hash_file.path(), &hash)); - ASSERT_TRUE(PayloadSigner::SignHash(hash, private_key_path, &signature)); - - test_utils::ScopedTempFile sig_file("signature.XXXXXX"); - ASSERT_TRUE(test_utils::WriteFileVector(sig_file.path(), signature)); - string sig_files = sig_file.path(); - - test_utils::ScopedTempFile sig_file2("signature.XXXXXX"); + payload_hash_file.path().c_str(), + metadata_hash_file.path().c_str()))); + + // Sign the payload hash. + test_utils::ScopedTempFile payload_signature_file("signature.XXXXXX"); + SignHashToFile(payload_hash_file.path(), + payload_signature_file.path(), + private_key_path); + string payload_sig_files = payload_signature_file.path(); + // Sign the metadata hash. + test_utils::ScopedTempFile metadata_signature_file("signature.XXXXXX"); + SignHashToFile(metadata_hash_file.path(), + metadata_signature_file.path(), + private_key_path); + string metadata_sig_files = metadata_signature_file.path(); + + test_utils::ScopedTempFile payload_signature_file2("signature.XXXXXX"); + test_utils::ScopedTempFile metadata_signature_file2("signature.XXXXXX"); if (signature_test == kSignatureGeneratedShellRotateCl1 || signature_test == kSignatureGeneratedShellRotateCl2) { - ASSERT_TRUE(PayloadSigner::SignHash( - hash, GetBuildArtifactsPath(kUnittestPrivateKey2Path), &signature)); - ASSERT_TRUE(test_utils::WriteFileVector(sig_file2.path(), signature)); + SignHashToFile(payload_hash_file.path(), + payload_signature_file2.path(), + GetBuildArtifactsPath(kUnittestPrivateKey2Path)); + SignHashToFile(metadata_hash_file.path(), + metadata_signature_file2.path(), + GetBuildArtifactsPath(kUnittestPrivateKey2Path)); // Append second sig file to first path - sig_files += ":" + sig_file2.path(); + payload_sig_files += ":" + payload_signature_file2.path(); + metadata_sig_files += ":" + metadata_signature_file2.path(); } - ASSERT_EQ(0, - System(base::StringPrintf( - "%s -in_file=%s -payload_signature_file=%s -out_file=%s", - delta_generator_path.c_str(), - payload_path.c_str(), - sig_files.c_str(), - payload_path.c_str()))); + ASSERT_EQ( + 0, + System(base::StringPrintf("%s -in_file=%s -payload_signature_file=%s " + "-metadata_signature_file=%s -out_file=%s", + delta_generator_path.c_str(), + payload_path.c_str(), + payload_sig_files.c_str(), + metadata_sig_files.c_str(), + payload_path.c_str()))); int verify_result = System(base::StringPrintf( "%s -in_file=%s -public_key=%s -public_key_version=%d", delta_generator_path.c_str(), @@ -474,7 +504,7 @@ static void GenerateDeltaFile(bool full_kernel, payload_config.is_delta = !full_rootfs; payload_config.hard_chunk_size = chunk_size; payload_config.rootfs_partition_size = kRootFSPartitionSize; - payload_config.version.major = kChromeOSMajorPayloadVersion; + payload_config.version.major = kBrilloMajorPayloadVersion; payload_config.version.minor = minor_version; if (!full_rootfs) { payload_config.source.partitions.emplace_back(kPartitionNameRoot); @@ -564,6 +594,9 @@ static void ApplyDeltaFile(bool full_kernel, EXPECT_TRUE(payload_metadata.ParsePayloadHeader(state->delta)); state->metadata_size = payload_metadata.GetMetadataSize(); LOG(INFO) << "Metadata size: " << state->metadata_size; + state->metadata_signature_size = + payload_metadata.GetMetadataSignatureSize(); + LOG(INFO) << "Metadata signature size: " << state->metadata_signature_size; DeltaArchiveManifest manifest; EXPECT_TRUE(payload_metadata.GetManifest(state->delta, &manifest)); @@ -575,7 +608,8 @@ static void ApplyDeltaFile(bool full_kernel, EXPECT_TRUE(manifest.has_signatures_size()); Signatures sigs_message; EXPECT_TRUE(sigs_message.ParseFromArray( - &state->delta[state->metadata_size + manifest.signatures_offset()], + &state->delta[state->metadata_size + state->metadata_signature_size + + manifest.signatures_offset()], manifest.signatures_size())); if (signature_test == kSignatureGeneratedShellRotateCl1 || signature_test == kSignatureGeneratedShellRotateCl2) @@ -597,13 +631,38 @@ static void ApplyDeltaFile(bool full_kernel, EXPECT_FALSE(signature.data().empty()); } + // TODO(ahassani): Make |DeltaState| into a partition list kind of struct + // instead of hardcoded kernel/rootfs so its cleaner and we can make the + // following code into a helper function instead. + const auto& kernel_part = *std::find_if( + manifest.partitions().begin(), + manifest.partitions().end(), + [](const PartitionUpdate& partition) { + return partition.partition_name() == kPartitionNameKernel; + }); if (full_kernel) { - EXPECT_FALSE(manifest.has_old_kernel_info()); + EXPECT_FALSE(kernel_part.has_old_partition_info()); } else { EXPECT_EQ(state->old_kernel_data.size(), - manifest.old_kernel_info().size()); - EXPECT_FALSE(manifest.old_kernel_info().hash().empty()); + kernel_part.old_partition_info().size()); + EXPECT_FALSE(kernel_part.old_partition_info().hash().empty()); + } + EXPECT_EQ(state->new_kernel_data.size(), + kernel_part.new_partition_info().size()); + EXPECT_FALSE(kernel_part.new_partition_info().hash().empty()); + + const auto& rootfs_part = + *std::find_if(manifest.partitions().begin(), + manifest.partitions().end(), + [](const PartitionUpdate& partition) { + return partition.partition_name() == kPartitionNameRoot; + }); + if (full_rootfs) { + EXPECT_FALSE(rootfs_part.has_old_partition_info()); + } else { + EXPECT_FALSE(rootfs_part.old_partition_info().hash().empty()); } + EXPECT_FALSE(rootfs_part.new_partition_info().hash().empty()); EXPECT_EQ(manifest.new_image_info().channel(), "test-channel"); EXPECT_EQ(manifest.new_image_info().board(), "test-board"); @@ -620,27 +679,14 @@ static void ApplyDeltaFile(bool full_kernel, EXPECT_EQ(manifest.old_image_info().build_channel(), "src-build-channel"); EXPECT_EQ(manifest.old_image_info().build_version(), "src-build-version"); } - - if (full_rootfs) { - EXPECT_FALSE(manifest.has_old_rootfs_info()); - EXPECT_FALSE(manifest.has_old_image_info()); - EXPECT_TRUE(manifest.has_new_image_info()); - } else { - EXPECT_EQ(state->image_size, manifest.old_rootfs_info().size()); - EXPECT_FALSE(manifest.old_rootfs_info().hash().empty()); - } - - EXPECT_EQ(state->new_kernel_data.size(), manifest.new_kernel_info().size()); - EXPECT_EQ(state->image_size, manifest.new_rootfs_info().size()); - - EXPECT_FALSE(manifest.new_kernel_info().hash().empty()); - EXPECT_FALSE(manifest.new_rootfs_info().hash().empty()); } MockPrefs prefs; EXPECT_CALL(prefs, SetInt64(kPrefsManifestMetadataSize, state->metadata_size)) .WillOnce(Return(true)); - EXPECT_CALL(prefs, SetInt64(kPrefsManifestSignatureSize, 0)) + EXPECT_CALL( + prefs, + SetInt64(kPrefsManifestSignatureSize, state->metadata_signature_size)) .WillOnce(Return(true)); EXPECT_CALL(prefs, SetInt64(kPrefsUpdateStateNextOperation, _)) .WillRepeatedly(Return(true)); diff --git a/payload_consumer/delta_performer_unittest.cc b/payload_consumer/delta_performer_unittest.cc index 61b58ed3..0671ecae 100644 --- a/payload_consumer/delta_performer_unittest.cc +++ b/payload_consumer/delta_performer_unittest.cc @@ -304,14 +304,16 @@ class DeltaPerformerTest : public ::testing::Test { // Set a valid magic string and version number 1. EXPECT_TRUE(performer_.Write("CrAU", 4)); - uint64_t version = htobe64(kChromeOSMajorPayloadVersion); + uint64_t version = htobe64(kBrilloMajorPayloadVersion); EXPECT_TRUE(performer_.Write(&version, 8)); payload_.metadata_size = expected_metadata_size; ErrorCode error_code; - // When filling in size in manifest, exclude the size of the 20-byte header. - uint64_t size_in_manifest = htobe64(actual_metadata_size - 20); - bool result = performer_.Write(&size_in_manifest, 8, &error_code); + // When filling in size in manifest, exclude the size of the 24-byte header. + uint64_t size_in_manifest = htobe64(actual_metadata_size - 24); + performer_.Write(&size_in_manifest, 8, &error_code); + uint32_t signature_size = htobe64(10); + bool result = performer_.Write(&signature_size, 4, &error_code); if (expected_metadata_size == actual_metadata_size || !hash_checks_mandatory) { EXPECT_TRUE(result); @@ -333,7 +335,7 @@ class DeltaPerformerTest : public ::testing::Test { brillo::Blob payload = GeneratePayload(brillo::Blob(), vector(), sign_payload, - kChromeOSMajorPayloadVersion, + kBrilloMajorPayloadVersion, kFullPayloadMinorVersion); LOG(INFO) << "Payload size: " << payload.size(); @@ -347,6 +349,9 @@ class DeltaPerformerTest : public ::testing::Test { switch (metadata_signature_test) { case kEmptyMetadataSignature: payload_.metadata_signature.clear(); + // We need to set the signature size in a signed payload to zero. + std::fill( + std::next(payload.begin(), 20), std::next(payload.begin(), 24), 0); expected_result = MetadataParseResult::kError; expected_error = ErrorCode::kDownloadMetadataSignatureMissingError; break; @@ -447,7 +452,7 @@ TEST_F(DeltaPerformerTest, FullPayloadWriteTest) { brillo::Blob payload_data = GeneratePayload(expected_data, aops, false, - kChromeOSMajorPayloadVersion, + kBrilloMajorPayloadVersion, kFullPayloadMinorVersion); EXPECT_EQ(expected_data, ApplyPayload(payload_data, "/dev/null", true)); @@ -469,7 +474,7 @@ TEST_F(DeltaPerformerTest, ShouldCancelTest) { brillo::Blob payload_data = GeneratePayload(expected_data, aops, false, - kChromeOSMajorPayloadVersion, + kBrilloMajorPayloadVersion, kFullPayloadMinorVersion); testing::Mock::VerifyAndClearExpectations(&mock_delegate_); @@ -725,27 +730,32 @@ TEST_F(DeltaPerformerTest, ExtentsToByteStringTest) { TEST_F(DeltaPerformerTest, ValidateManifestFullGoodTest) { // The Manifest we are validating. DeltaArchiveManifest manifest; - manifest.mutable_new_kernel_info(); - manifest.mutable_new_rootfs_info(); + for (const auto& part_name : {"kernel", "rootfs"}) { + auto part = manifest.add_partitions(); + part->set_partition_name(part_name); + part->mutable_new_partition_info(); + } manifest.set_minor_version(kFullPayloadMinorVersion); RunManifestValidation(manifest, - kChromeOSMajorPayloadVersion, + kBrilloMajorPayloadVersion, InstallPayloadType::kFull, ErrorCode::kSuccess); } -TEST_F(DeltaPerformerTest, ValidateManifestDeltaGoodTest) { +TEST_F(DeltaPerformerTest, ValidateManifestDeltaMaxGoodTest) { // The Manifest we are validating. DeltaArchiveManifest manifest; - manifest.mutable_old_kernel_info(); - manifest.mutable_old_rootfs_info(); - manifest.mutable_new_kernel_info(); - manifest.mutable_new_rootfs_info(); + for (const auto& part_name : {"kernel", "rootfs"}) { + auto part = manifest.add_partitions(); + part->set_partition_name(part_name); + part->mutable_old_partition_info(); + part->mutable_new_partition_info(); + } manifest.set_minor_version(kMaxSupportedMinorPayloadVersion); RunManifestValidation(manifest, - kChromeOSMajorPayloadVersion, + kBrilloMajorPayloadVersion, InstallPayloadType::kDelta, ErrorCode::kSuccess); } @@ -753,14 +763,16 @@ TEST_F(DeltaPerformerTest, ValidateManifestDeltaGoodTest) { TEST_F(DeltaPerformerTest, ValidateManifestDeltaMinGoodTest) { // The Manifest we are validating. DeltaArchiveManifest manifest; - manifest.mutable_old_kernel_info(); - manifest.mutable_old_rootfs_info(); - manifest.mutable_new_kernel_info(); - manifest.mutable_new_rootfs_info(); + for (const auto& part_name : {"kernel", "rootfs"}) { + auto part = manifest.add_partitions(); + part->set_partition_name(part_name); + part->mutable_old_partition_info(); + part->mutable_new_partition_info(); + } manifest.set_minor_version(kMinSupportedMinorPayloadVersion); RunManifestValidation(manifest, - kChromeOSMajorPayloadVersion, + kBrilloMajorPayloadVersion, InstallPayloadType::kDelta, ErrorCode::kSuccess); } @@ -778,9 +790,11 @@ TEST_F(DeltaPerformerTest, ValidateManifestFullUnsetMinorVersion) { TEST_F(DeltaPerformerTest, ValidateManifestDeltaUnsetMinorVersion) { // The Manifest we are validating. DeltaArchiveManifest manifest; - // Add an empty old_rootfs_info() to trick the DeltaPerformer into think that - // this is a delta payload manifest with a missing minor version. - manifest.mutable_old_rootfs_info(); + // Add an empty rootfs partition info to trick the DeltaPerformer into think + // that this is a delta payload manifest with a missing minor version. + auto rootfs = manifest.add_partitions(); + rootfs->set_partition_name("rootfs"); + rootfs->mutable_old_partition_info(); RunManifestValidation(manifest, kMaxSupportedMajorPayloadVersion, @@ -791,27 +805,15 @@ TEST_F(DeltaPerformerTest, ValidateManifestDeltaUnsetMinorVersion) { TEST_F(DeltaPerformerTest, ValidateManifestFullOldKernelTest) { // The Manifest we are validating. DeltaArchiveManifest manifest; - manifest.mutable_old_kernel_info(); - manifest.mutable_new_kernel_info(); - manifest.mutable_new_rootfs_info(); - manifest.set_minor_version(kMaxSupportedMinorPayloadVersion); - - RunManifestValidation(manifest, - kChromeOSMajorPayloadVersion, - InstallPayloadType::kFull, - ErrorCode::kPayloadMismatchedType); -} - -TEST_F(DeltaPerformerTest, ValidateManifestFullOldRootfsTest) { - // The Manifest we are validating. - DeltaArchiveManifest manifest; - manifest.mutable_old_rootfs_info(); - manifest.mutable_new_kernel_info(); - manifest.mutable_new_rootfs_info(); - manifest.set_minor_version(kMaxSupportedMinorPayloadVersion); - + for (const auto& part_name : {"kernel", "rootfs"}) { + auto part = manifest.add_partitions(); + part->set_partition_name(part_name); + part->mutable_old_partition_info(); + part->mutable_new_partition_info(); + } + manifest.mutable_partitions(0)->clear_old_partition_info(); RunManifestValidation(manifest, - kChromeOSMajorPayloadVersion, + kBrilloMajorPayloadVersion, InstallPayloadType::kFull, ErrorCode::kPayloadMismatchedType); } @@ -836,8 +838,8 @@ TEST_F(DeltaPerformerTest, ValidateManifestBadMinorVersion) { // Generate a bad version number. manifest.set_minor_version(kMaxSupportedMinorPayloadVersion + 10000); - // Mark the manifest as a delta payload by setting old_rootfs_info. - manifest.mutable_old_rootfs_info(); + // Mark the manifest as a delta payload by setting |old_partition_info|. + manifest.add_partitions()->mutable_old_partition_info(); RunManifestValidation(manifest, kMaxSupportedMajorPayloadVersion, diff --git a/payload_consumer/payload_constants.cc b/payload_consumer/payload_constants.cc index 9e684d7c..908a8933 100644 --- a/payload_consumer/payload_constants.cc +++ b/payload_consumer/payload_constants.cc @@ -20,9 +20,12 @@ namespace chromeos_update_engine { -const uint64_t kChromeOSMajorPayloadVersion = 1; +// const uint64_t kChromeOSMajorPayloadVersion = 1; DEPRECATED const uint64_t kBrilloMajorPayloadVersion = 2; +const uint64_t kMinSupportedMajorPayloadVersion = kBrilloMajorPayloadVersion; +const uint64_t kMaxSupportedMajorPayloadVersion = kBrilloMajorPayloadVersion; + const uint32_t kFullPayloadMinorVersion = 0; // const uint32_t kInPlaceMinorPayloadVersion = 1; DEPRECATED const uint32_t kSourceMinorPayloadVersion = 2; @@ -34,9 +37,6 @@ const uint32_t kVerityMinorPayloadVersion = 6; const uint32_t kMinSupportedMinorPayloadVersion = kSourceMinorPayloadVersion; const uint32_t kMaxSupportedMinorPayloadVersion = kVerityMinorPayloadVersion; -const uint64_t kMinSupportedMajorPayloadVersion = 1; -const uint64_t kMaxSupportedMajorPayloadVersion = 2; - const uint64_t kMaxPayloadHeaderSize = 24; const char kPartitionNameKernel[] = "kernel"; diff --git a/payload_consumer/payload_constants.h b/payload_consumer/payload_constants.h index fe823f41..888fa2a1 100644 --- a/payload_consumer/payload_constants.h +++ b/payload_consumer/payload_constants.h @@ -26,7 +26,7 @@ namespace chromeos_update_engine { // The major version used by Chrome OS. -extern const uint64_t kChromeOSMajorPayloadVersion; +// extern const uint64_t kChromeOSMajorPayloadVersion; DEPRECATED // The major version used by Brillo. extern const uint64_t kBrilloMajorPayloadVersion; diff --git a/payload_consumer/payload_metadata.cc b/payload_consumer/payload_metadata.cc index 337edb43..4d8ee7b4 100644 --- a/payload_consumer/payload_metadata.cc +++ b/payload_consumer/payload_metadata.cc @@ -36,34 +36,18 @@ const uint64_t PayloadMetadata::kDeltaManifestSizeOffset = const uint64_t PayloadMetadata::kDeltaManifestSizeSize = 8; const uint64_t PayloadMetadata::kDeltaMetadataSignatureSizeSize = 4; -bool PayloadMetadata::GetMetadataSignatureSizeOffset( - uint64_t* out_offset) const { - if (GetMajorVersion() == kBrilloMajorPayloadVersion) { - *out_offset = kDeltaManifestSizeOffset + kDeltaManifestSizeSize; - return true; - } - return false; +uint64_t PayloadMetadata::GetMetadataSignatureSizeOffset() const { + return kDeltaManifestSizeOffset + kDeltaManifestSizeSize; } -bool PayloadMetadata::GetManifestOffset(uint64_t* out_offset) const { - // Actual manifest begins right after the manifest size field or - // metadata signature size field if major version >= 2. - if (major_payload_version_ == kChromeOSMajorPayloadVersion) { - *out_offset = kDeltaManifestSizeOffset + kDeltaManifestSizeSize; - return true; - } - if (major_payload_version_ == kBrilloMajorPayloadVersion) { - *out_offset = kDeltaManifestSizeOffset + kDeltaManifestSizeSize + - kDeltaMetadataSignatureSizeSize; - return true; - } - LOG(ERROR) << "Unknown major payload version: " << major_payload_version_; - return false; +uint64_t PayloadMetadata::GetManifestOffset() const { + // Actual manifest begins right after the metadata signature size field. + return kDeltaManifestSizeOffset + kDeltaManifestSizeSize + + kDeltaMetadataSignatureSizeSize; } MetadataParseResult PayloadMetadata::ParsePayloadHeader( const brillo::Blob& payload, ErrorCode* error) { - uint64_t manifest_offset; // Ensure we have data to cover the major payload version. if (payload.size() < kDeltaManifestSizeOffset) return MetadataParseResult::kInsufficientData; @@ -75,6 +59,11 @@ MetadataParseResult PayloadMetadata::ParsePayloadHeader( return MetadataParseResult::kError; } + uint64_t manifest_offset = GetManifestOffset(); + // Check again with the manifest offset. + if (payload.size() < manifest_offset) + return MetadataParseResult::kInsufficientData; + // Extract the payload version from the metadata. static_assert(sizeof(major_payload_version_) == kDeltaVersionSize, "Major payload version size mismatch"); @@ -92,15 +81,6 @@ MetadataParseResult PayloadMetadata::ParsePayloadHeader( return MetadataParseResult::kError; } - // Get the manifest offset now that we have payload version. - if (!GetManifestOffset(&manifest_offset)) { - *error = ErrorCode::kUnsupportedMajorPayloadVersion; - return MetadataParseResult::kError; - } - // Check again with the manifest offset. - if (payload.size() < manifest_offset) - return MetadataParseResult::kInsufficientData; - // Next, parse the manifest size. static_assert(sizeof(manifest_size_) == kDeltaManifestSizeSize, "manifest_size size mismatch"); @@ -116,26 +96,20 @@ MetadataParseResult PayloadMetadata::ParsePayloadHeader( return MetadataParseResult::kError; } - if (GetMajorVersion() == kBrilloMajorPayloadVersion) { - // Parse the metadata signature size. - static_assert( - sizeof(metadata_signature_size_) == kDeltaMetadataSignatureSizeSize, - "metadata_signature_size size mismatch"); - uint64_t metadata_signature_size_offset; - if (!GetMetadataSignatureSizeOffset(&metadata_signature_size_offset)) { - *error = ErrorCode::kError; - return MetadataParseResult::kError; - } - memcpy(&metadata_signature_size_, - &payload[metadata_signature_size_offset], - kDeltaMetadataSignatureSizeSize); - metadata_signature_size_ = be32toh(metadata_signature_size_); - - if (metadata_size_ + metadata_signature_size_ < metadata_size_) { - // Overflow detected. - *error = ErrorCode::kDownloadInvalidMetadataSize; - return MetadataParseResult::kError; - } + // Parse the metadata signature size. + static_assert( + sizeof(metadata_signature_size_) == kDeltaMetadataSignatureSizeSize, + "metadata_signature_size size mismatch"); + uint64_t metadata_signature_size_offset = GetMetadataSignatureSizeOffset(); + memcpy(&metadata_signature_size_, + &payload[metadata_signature_size_offset], + kDeltaMetadataSignatureSizeSize); + metadata_signature_size_ = be32toh(metadata_signature_size_); + + if (metadata_size_ + metadata_signature_size_ < metadata_size_) { + // Overflow detected. + *error = ErrorCode::kDownloadInvalidMetadataSize; + return MetadataParseResult::kError; } return MetadataParseResult::kSuccess; } @@ -147,9 +121,7 @@ bool PayloadMetadata::ParsePayloadHeader(const brillo::Blob& payload) { bool PayloadMetadata::GetManifest(const brillo::Blob& payload, DeltaArchiveManifest* out_manifest) const { - uint64_t manifest_offset; - if (!GetManifestOffset(&manifest_offset)) - return false; + uint64_t manifest_offset = GetManifestOffset(); CHECK_GE(payload.size(), manifest_offset + manifest_size_); return out_manifest->ParseFromArray(&payload[manifest_offset], manifest_size_); @@ -171,7 +143,7 @@ ErrorCode PayloadMetadata::ValidateMetadataSignature( << metadata_signature; return ErrorCode::kDownloadMetadataSignatureError; } - } else if (major_payload_version_ == kBrilloMajorPayloadVersion) { + } else { metadata_signature_protobuf_blob.assign( payload.begin() + metadata_size_, payload.begin() + metadata_size_ + metadata_signature_size_); @@ -243,8 +215,7 @@ bool PayloadMetadata::ParsePayloadFile(const string& payload_path, TEST_AND_RETURN_FALSE(GetManifest(payload, manifest)); } - if (metadata_signatures != nullptr && - GetMajorVersion() >= kBrilloMajorPayloadVersion) { + if (metadata_signatures != nullptr) { payload.clear(); TEST_AND_RETURN_FALSE(utils::ReadFileChunk( payload_path, GetMetadataSize(), GetMetadataSignatureSize(), &payload)); diff --git a/payload_consumer/payload_metadata.h b/payload_consumer/payload_metadata.h index ec8eea63..be43c410 100644 --- a/payload_consumer/payload_metadata.h +++ b/payload_consumer/payload_metadata.h @@ -94,14 +94,12 @@ class PayloadMetadata { Signatures* metadata_signatures); private: - // Set |*out_offset| to the byte offset at which the manifest protobuf begins - // in a payload. Return true on success, false if the offset is unknown. - bool GetManifestOffset(uint64_t* out_offset) const; - - // Set |*out_offset| to the byte offset where the size of the metadata - // signature is stored in a payload. Return true on success, if this field is - // not present in the payload, return false. - bool GetMetadataSignatureSizeOffset(uint64_t* out_offset) const; + // Returns the byte offset at which the manifest protobuf begins in a payload. + uint64_t GetManifestOffset() const; + + // Returns the byte offset where the size of the metadata signature is stored + // in a payload. + uint64_t GetMetadataSignatureSizeOffset() const; uint64_t metadata_size_{0}; uint64_t manifest_size_{0}; diff --git a/payload_generator/ab_generator_unittest.cc b/payload_generator/ab_generator_unittest.cc index 2f8c0c60..170e0e3b 100644 --- a/payload_generator/ab_generator_unittest.cc +++ b/payload_generator/ab_generator_unittest.cc @@ -30,10 +30,10 @@ #include "update_engine/common/test_utils.h" #include "update_engine/common/utils.h" #include "update_engine/payload_generator/annotated_operation.h" -#include "update_engine/payload_generator/bzip.h" #include "update_engine/payload_generator/delta_diff_generator.h" #include "update_engine/payload_generator/extent_ranges.h" #include "update_engine/payload_generator/extent_utils.h" +#include "update_engine/payload_generator/xz.h" using std::string; using std::vector; @@ -48,8 +48,8 @@ bool ExtentEquals(const Extent& ext, return ext.start_block() == start_block && ext.num_blocks() == num_blocks; } -// Tests splitting of a REPLACE/REPLACE_BZ operation. -void TestSplitReplaceOrReplaceBzOperation(InstallOperation_Type orig_type, +// Tests splitting of a REPLACE/REPLACE_XZ operation. +void TestSplitReplaceOrReplaceXzOperation(InstallOperation_Type orig_type, bool compressible) { const size_t op_ex1_start_block = 2; const size_t op_ex1_num_blocks = 2; @@ -71,7 +71,7 @@ void TestSplitReplaceOrReplaceBzOperation(InstallOperation_Type orig_type, } ASSERT_EQ(part_size, part_data.size()); test_utils::ScopedTempFile part_file( - "SplitReplaceOrReplaceBzTest_part.XXXXXX"); + "SplitReplaceOrReplaceXzTest_part.XXXXXX"); ASSERT_TRUE(test_utils::WriteFileVector(part_file.path(), part_data)); // Create original operation and blob data. @@ -97,7 +97,7 @@ void TestSplitReplaceOrReplaceBzOperation(InstallOperation_Type orig_type, if (orig_type == InstallOperation::REPLACE) { op_blob = op_data; } else { - ASSERT_TRUE(BzipCompress(op_data, &op_blob)); + ASSERT_TRUE(XzCompress(op_data, &op_blob)); } op.set_data_offset(0); op.set_data_length(op_blob.size()); @@ -108,7 +108,7 @@ void TestSplitReplaceOrReplaceBzOperation(InstallOperation_Type orig_type, // Create the data file. test_utils::ScopedTempFile data_file( - "SplitReplaceOrReplaceBzTest_data.XXXXXX"); + "SplitReplaceOrReplaceXzTest_data.XXXXXX"); EXPECT_TRUE(test_utils::WriteFileVector(data_file.path(), op_blob)); int data_fd = open(data_file.path().c_str(), O_RDWR, 000); EXPECT_GE(data_fd, 0); @@ -118,14 +118,14 @@ void TestSplitReplaceOrReplaceBzOperation(InstallOperation_Type orig_type, // Split the operation. vector result_ops; - PayloadVersion version(kChromeOSMajorPayloadVersion, + PayloadVersion version(kBrilloMajorPayloadVersion, kSourceMinorPayloadVersion); ASSERT_TRUE(ABGenerator::SplitAReplaceOp( version, aop, part_file.path(), &result_ops, &blob_file)); // Check the result. InstallOperation_Type expected_type = - compressible ? InstallOperation::REPLACE_BZ : InstallOperation::REPLACE; + compressible ? InstallOperation::REPLACE_XZ : InstallOperation::REPLACE; ASSERT_EQ(2U, result_ops.size()); @@ -143,7 +143,7 @@ void TestSplitReplaceOrReplaceBzOperation(InstallOperation_Type orig_type, part_data.begin() + op_ex1_offset + op_ex1_size); brillo::Blob first_expected_blob; if (compressible) { - ASSERT_TRUE(BzipCompress(first_expected_data, &first_expected_blob)); + ASSERT_TRUE(XzCompress(first_expected_data, &first_expected_blob)); } else { first_expected_blob = first_expected_data; } @@ -173,7 +173,7 @@ void TestSplitReplaceOrReplaceBzOperation(InstallOperation_Type orig_type, part_data.begin() + op_ex2_offset + op_ex2_size); brillo::Blob second_expected_blob; if (compressible) { - ASSERT_TRUE(BzipCompress(second_expected_data, &second_expected_blob)); + ASSERT_TRUE(XzCompress(second_expected_data, &second_expected_blob)); } else { second_expected_blob = second_expected_data; } @@ -199,8 +199,8 @@ void TestSplitReplaceOrReplaceBzOperation(InstallOperation_Type orig_type, } } -// Tests merging of REPLACE/REPLACE_BZ operations. -void TestMergeReplaceOrReplaceBzOperations(InstallOperation_Type orig_type, +// Tests merging of REPLACE/REPLACE_XZ operations. +void TestMergeReplaceOrReplaceXzOperations(InstallOperation_Type orig_type, bool compressible) { const size_t first_op_num_blocks = 1; const size_t second_op_num_blocks = 2; @@ -221,7 +221,7 @@ void TestMergeReplaceOrReplaceBzOperations(InstallOperation_Type orig_type, } ASSERT_EQ(part_size, part_data.size()); test_utils::ScopedTempFile part_file( - "MergeReplaceOrReplaceBzTest_part.XXXXXX"); + "MergeReplaceOrReplaceXzTest_part.XXXXXX"); ASSERT_TRUE(test_utils::WriteFileVector(part_file.path(), part_data)); // Create original operations and blob data. @@ -239,7 +239,7 @@ void TestMergeReplaceOrReplaceBzOperations(InstallOperation_Type orig_type, if (orig_type == InstallOperation::REPLACE) { first_op_blob = first_op_data; } else { - ASSERT_TRUE(BzipCompress(first_op_data, &first_op_blob)); + ASSERT_TRUE(XzCompress(first_op_data, &first_op_blob)); } first_op.set_data_offset(0); first_op.set_data_length(first_op_blob.size()); @@ -259,7 +259,7 @@ void TestMergeReplaceOrReplaceBzOperations(InstallOperation_Type orig_type, if (orig_type == InstallOperation::REPLACE) { second_op_blob = second_op_data; } else { - ASSERT_TRUE(BzipCompress(second_op_data, &second_op_blob)); + ASSERT_TRUE(XzCompress(second_op_data, &second_op_blob)); } second_op.set_data_offset(first_op_blob.size()); second_op.set_data_length(second_op_blob.size()); @@ -272,7 +272,7 @@ void TestMergeReplaceOrReplaceBzOperations(InstallOperation_Type orig_type, // Create the data file. test_utils::ScopedTempFile data_file( - "MergeReplaceOrReplaceBzTest_data.XXXXXX"); + "MergeReplaceOrReplaceXzTest_data.XXXXXX"); EXPECT_TRUE(test_utils::WriteFileVector(data_file.path(), blob_data)); int data_fd = open(data_file.path().c_str(), O_RDWR, 000); EXPECT_GE(data_fd, 0); @@ -281,14 +281,14 @@ void TestMergeReplaceOrReplaceBzOperations(InstallOperation_Type orig_type, BlobFileWriter blob_file(data_fd, &data_file_size); // Merge the operations. - PayloadVersion version(kChromeOSMajorPayloadVersion, + PayloadVersion version(kBrilloMajorPayloadVersion, kSourceMinorPayloadVersion); EXPECT_TRUE(ABGenerator::MergeOperations( &aops, version, 5, part_file.path(), &blob_file)); // Check the result. InstallOperation_Type expected_op_type = - compressible ? InstallOperation::REPLACE_BZ : InstallOperation::REPLACE; + compressible ? InstallOperation::REPLACE_XZ : InstallOperation::REPLACE; EXPECT_EQ(1U, aops.size()); InstallOperation new_op = aops[0].op; EXPECT_EQ(expected_op_type, new_op.type()); @@ -303,7 +303,7 @@ void TestMergeReplaceOrReplaceBzOperations(InstallOperation_Type orig_type, part_data.begin() + total_op_size); brillo::Blob expected_blob; if (compressible) { - ASSERT_TRUE(BzipCompress(expected_data, &expected_blob)); + ASSERT_TRUE(XzCompress(expected_data, &expected_blob)); } else { expected_blob = expected_data; } @@ -384,19 +384,19 @@ TEST_F(ABGeneratorTest, SplitSourceCopyTest) { } TEST_F(ABGeneratorTest, SplitReplaceTest) { - TestSplitReplaceOrReplaceBzOperation(InstallOperation::REPLACE, false); + TestSplitReplaceOrReplaceXzOperation(InstallOperation::REPLACE, false); } -TEST_F(ABGeneratorTest, SplitReplaceIntoReplaceBzTest) { - TestSplitReplaceOrReplaceBzOperation(InstallOperation::REPLACE, true); +TEST_F(ABGeneratorTest, SplitReplaceIntoReplaceXzTest) { + TestSplitReplaceOrReplaceXzOperation(InstallOperation::REPLACE, true); } -TEST_F(ABGeneratorTest, SplitReplaceBzTest) { - TestSplitReplaceOrReplaceBzOperation(InstallOperation::REPLACE_BZ, true); +TEST_F(ABGeneratorTest, SplitReplaceXzTest) { + TestSplitReplaceOrReplaceXzOperation(InstallOperation::REPLACE_XZ, true); } -TEST_F(ABGeneratorTest, SplitReplaceBzIntoReplaceTest) { - TestSplitReplaceOrReplaceBzOperation(InstallOperation::REPLACE_BZ, false); +TEST_F(ABGeneratorTest, SplitReplaceXzIntoReplaceTest) { + TestSplitReplaceOrReplaceXzOperation(InstallOperation::REPLACE_XZ, false); } TEST_F(ABGeneratorTest, SortOperationsByDestinationTest) { @@ -464,7 +464,7 @@ TEST_F(ABGeneratorTest, MergeSourceCopyOperationsTest) { aops.push_back(third_aop); BlobFileWriter blob_file(0, nullptr); - PayloadVersion version(kChromeOSMajorPayloadVersion, + PayloadVersion version(kBrilloMajorPayloadVersion, kSourceMinorPayloadVersion); EXPECT_TRUE(ABGenerator::MergeOperations(&aops, version, 5, "", &blob_file)); @@ -484,19 +484,19 @@ TEST_F(ABGeneratorTest, MergeSourceCopyOperationsTest) { } TEST_F(ABGeneratorTest, MergeReplaceOperationsTest) { - TestMergeReplaceOrReplaceBzOperations(InstallOperation::REPLACE, false); + TestMergeReplaceOrReplaceXzOperations(InstallOperation::REPLACE, false); } -TEST_F(ABGeneratorTest, MergeReplaceOperationsToReplaceBzTest) { - TestMergeReplaceOrReplaceBzOperations(InstallOperation::REPLACE, true); +TEST_F(ABGeneratorTest, MergeReplaceOperationsToReplaceXzTest) { + TestMergeReplaceOrReplaceXzOperations(InstallOperation::REPLACE, true); } -TEST_F(ABGeneratorTest, MergeReplaceBzOperationsTest) { - TestMergeReplaceOrReplaceBzOperations(InstallOperation::REPLACE_BZ, true); +TEST_F(ABGeneratorTest, MergeReplaceXzOperationsTest) { + TestMergeReplaceOrReplaceXzOperations(InstallOperation::REPLACE_XZ, true); } -TEST_F(ABGeneratorTest, MergeReplaceBzOperationsToReplaceTest) { - TestMergeReplaceOrReplaceBzOperations(InstallOperation::REPLACE_BZ, false); +TEST_F(ABGeneratorTest, MergeReplaceXzOperationsToReplaceTest) { + TestMergeReplaceOrReplaceXzOperations(InstallOperation::REPLACE_XZ, false); } TEST_F(ABGeneratorTest, NoMergeOperationsTest) { @@ -537,7 +537,7 @@ TEST_F(ABGeneratorTest, NoMergeOperationsTest) { aops.push_back(fourth_aop); BlobFileWriter blob_file(0, nullptr); - PayloadVersion version(kChromeOSMajorPayloadVersion, + PayloadVersion version(kBrilloMajorPayloadVersion, kSourceMinorPayloadVersion); EXPECT_TRUE(ABGenerator::MergeOperations(&aops, version, 4, "", &blob_file)); diff --git a/payload_generator/delta_diff_utils_unittest.cc b/payload_generator/delta_diff_utils_unittest.cc index e25c867f..bc3dca57 100644 --- a/payload_generator/delta_diff_utils_unittest.cc +++ b/payload_generator/delta_diff_utils_unittest.cc @@ -136,7 +136,7 @@ class DeltaDiffUtilsTest : public ::testing::Test { bool RunDeltaMovedAndZeroBlocks(ssize_t chunk_blocks, uint32_t minor_version) { BlobFileWriter blob_file(blob_fd_, &blob_size_); - PayloadVersion version(kChromeOSMajorPayloadVersion, minor_version); + PayloadVersion version(kBrilloMajorPayloadVersion, minor_version); ExtentRanges old_zero_blocks; return diff_utils::DeltaMovedAndZeroBlocks(&aops_, old_part_.path, @@ -225,8 +225,7 @@ TEST_F(DeltaDiffUtilsTest, ReplaceSmallTest) { new_extents, {}, // old_deflates {}, // new_deflates - PayloadVersion(kChromeOSMajorPayloadVersion, - kSourceMinorPayloadVersion), + PayloadVersion(kBrilloMajorPayloadVersion, kSourceMinorPayloadVersion), &data, &op)); EXPECT_FALSE(data.empty()); @@ -268,7 +267,7 @@ TEST_F(DeltaDiffUtilsTest, SourceCopyTest) { new_extents, {}, // old_deflates {}, // new_deflates - PayloadVersion(kChromeOSMajorPayloadVersion, kSourceMinorPayloadVersion), + PayloadVersion(kBrilloMajorPayloadVersion, kSourceMinorPayloadVersion), &data, &op)); EXPECT_TRUE(data.empty()); @@ -302,7 +301,7 @@ TEST_F(DeltaDiffUtilsTest, SourceBsdiffTest) { new_extents, {}, // old_deflates {}, // new_deflates - PayloadVersion(kChromeOSMajorPayloadVersion, kSourceMinorPayloadVersion), + PayloadVersion(kBrilloMajorPayloadVersion, kSourceMinorPayloadVersion), &data, &op)); diff --git a/payload_generator/full_update_generator_unittest.cc b/payload_generator/full_update_generator_unittest.cc index e3981257..5f39e8bc 100644 --- a/payload_generator/full_update_generator_unittest.cc +++ b/payload_generator/full_update_generator_unittest.cc @@ -90,7 +90,7 @@ TEST_F(FullUpdateGeneratorTest, RunTest) { EXPECT_EQ(config_.hard_chunk_size / config_.block_size, aops[i].op.dst_extents(0).num_blocks()); if (aops[i].op.type() != InstallOperation::REPLACE) { - EXPECT_EQ(InstallOperation::REPLACE_BZ, aops[i].op.type()); + EXPECT_EQ(InstallOperation::REPLACE_XZ, aops[i].op.type()); } } } diff --git a/payload_generator/generate_delta_main.cc b/payload_generator/generate_delta_main.cc index 16f360f8..69ac8bbc 100644 --- a/payload_generator/generate_delta_main.cc +++ b/payload_generator/generate_delta_main.cc @@ -509,16 +509,10 @@ int Main(int argc, char** argv) { partition_names = base::SplitString( FLAGS_partition_names, ":", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL); CHECK(!partition_names.empty()); - if (FLAGS_major_version == kChromeOSMajorPayloadVersion || - FLAGS_new_partitions.empty()) { - LOG_IF(FATAL, partition_names.size() != 2) - << "To support more than 2 partitions, please use the " - << "--new_partitions flag and major version 2."; - LOG_IF(FATAL, - partition_names[0] != kPartitionNameRoot || - partition_names[1] != kPartitionNameKernel) - << "To support non-default partition name, please use the " - << "--new_partitions flag and major version 2."; + if (FLAGS_major_version < kMinSupportedMajorPayloadVersion || + FLAGS_major_version > kMaxSupportedMajorPayloadVersion) { + LOG(FATAL) << "Unsupported major version " << FLAGS_major_version; + return 1; } if (!FLAGS_new_partitions.empty()) { @@ -577,8 +571,6 @@ int Main(int argc, char** argv) { } if (!FLAGS_new_postinstall_config_file.empty()) { - LOG_IF(FATAL, FLAGS_major_version == kChromeOSMajorPayloadVersion) - << "Postinstall config is only allowed in major version 2 or newer."; brillo::KeyValueStore store; CHECK(store.Load(base::FilePath(FLAGS_new_postinstall_config_file))); CHECK(payload_config.target.LoadPostInstallConfig(store)); @@ -596,9 +588,6 @@ int Main(int argc, char** argv) { CHECK(payload_config.target.LoadImageSize()); if (!FLAGS_dynamic_partition_info_file.empty()) { - LOG_IF(FATAL, FLAGS_major_version == kChromeOSMajorPayloadVersion) - << "Dynamic partition info is only allowed in major version 2 or " - "newer."; brillo::KeyValueStore store; CHECK(store.Load(base::FilePath(FLAGS_dynamic_partition_info_file))); CHECK(payload_config.target.LoadDynamicPartitionMetadata(store)); diff --git a/payload_generator/payload_file.cc b/payload_generator/payload_file.cc index 775a509d..b55d03c0 100644 --- a/payload_generator/payload_file.cc +++ b/payload_generator/payload_file.cc @@ -74,11 +74,9 @@ bool PayloadFile::Init(const PayloadGenerationConfig& config) { manifest_.set_block_size(config.block_size); manifest_.set_max_timestamp(config.max_timestamp); - if (major_version_ == kBrilloMajorPayloadVersion) { - if (config.target.dynamic_partition_metadata != nullptr) - *(manifest_.mutable_dynamic_partition_metadata()) = - *(config.target.dynamic_partition_metadata); - } + if (config.target.dynamic_partition_metadata != nullptr) + *(manifest_.mutable_dynamic_partition_metadata()) = + *(config.target.dynamic_partition_metadata); return true; } @@ -86,13 +84,6 @@ bool PayloadFile::Init(const PayloadGenerationConfig& config) { bool PayloadFile::AddPartition(const PartitionConfig& old_conf, const PartitionConfig& new_conf, const vector& aops) { - // Check partitions order for Chrome OS - if (major_version_ == kChromeOSMajorPayloadVersion) { - const vector part_order = {kPartitionNameRoot, - kPartitionNameKernel}; - TEST_AND_RETURN_FALSE(part_vec_.size() < part_order.size()); - TEST_AND_RETURN_FALSE(new_conf.name == part_order[part_vec_.size()]); - } Partition part; part.name = new_conf.name; part.aops = aops; @@ -134,66 +125,45 @@ bool PayloadFile::WritePayload(const string& payload_file, } // Copy the operations and partition info from the part_vec_ to the manifest. - manifest_.clear_install_operations(); - manifest_.clear_kernel_install_operations(); manifest_.clear_partitions(); for (const auto& part : part_vec_) { - if (major_version_ == kBrilloMajorPayloadVersion) { - PartitionUpdate* partition = manifest_.add_partitions(); - partition->set_partition_name(part.name); - if (part.postinstall.run) { - partition->set_run_postinstall(true); - if (!part.postinstall.path.empty()) - partition->set_postinstall_path(part.postinstall.path); - if (!part.postinstall.filesystem_type.empty()) - partition->set_filesystem_type(part.postinstall.filesystem_type); - partition->set_postinstall_optional(part.postinstall.optional); - } - if (!part.verity.IsEmpty()) { - if (part.verity.hash_tree_extent.num_blocks() != 0) { - *partition->mutable_hash_tree_data_extent() = - part.verity.hash_tree_data_extent; - *partition->mutable_hash_tree_extent() = part.verity.hash_tree_extent; - partition->set_hash_tree_algorithm(part.verity.hash_tree_algorithm); - if (!part.verity.hash_tree_salt.empty()) - partition->set_hash_tree_salt(part.verity.hash_tree_salt.data(), - part.verity.hash_tree_salt.size()); - } - if (part.verity.fec_extent.num_blocks() != 0) { - *partition->mutable_fec_data_extent() = part.verity.fec_data_extent; - *partition->mutable_fec_extent() = part.verity.fec_extent; - partition->set_fec_roots(part.verity.fec_roots); - } - } - for (const AnnotatedOperation& aop : part.aops) { - *partition->add_operations() = aop.op; + PartitionUpdate* partition = manifest_.add_partitions(); + partition->set_partition_name(part.name); + if (part.postinstall.run) { + partition->set_run_postinstall(true); + if (!part.postinstall.path.empty()) + partition->set_postinstall_path(part.postinstall.path); + if (!part.postinstall.filesystem_type.empty()) + partition->set_filesystem_type(part.postinstall.filesystem_type); + partition->set_postinstall_optional(part.postinstall.optional); + } + if (!part.verity.IsEmpty()) { + if (part.verity.hash_tree_extent.num_blocks() != 0) { + *partition->mutable_hash_tree_data_extent() = + part.verity.hash_tree_data_extent; + *partition->mutable_hash_tree_extent() = part.verity.hash_tree_extent; + partition->set_hash_tree_algorithm(part.verity.hash_tree_algorithm); + if (!part.verity.hash_tree_salt.empty()) + partition->set_hash_tree_salt(part.verity.hash_tree_salt.data(), + part.verity.hash_tree_salt.size()); } - if (part.old_info.has_size() || part.old_info.has_hash()) - *(partition->mutable_old_partition_info()) = part.old_info; - if (part.new_info.has_size() || part.new_info.has_hash()) - *(partition->mutable_new_partition_info()) = part.new_info; - } else { - // major_version_ == kChromeOSMajorPayloadVersion - if (part.name == kPartitionNameKernel) { - for (const AnnotatedOperation& aop : part.aops) - *manifest_.add_kernel_install_operations() = aop.op; - if (part.old_info.has_size() || part.old_info.has_hash()) - *manifest_.mutable_old_kernel_info() = part.old_info; - if (part.new_info.has_size() || part.new_info.has_hash()) - *manifest_.mutable_new_kernel_info() = part.new_info; - } else { - for (const AnnotatedOperation& aop : part.aops) - *manifest_.add_install_operations() = aop.op; - if (part.old_info.has_size() || part.old_info.has_hash()) - *manifest_.mutable_old_rootfs_info() = part.old_info; - if (part.new_info.has_size() || part.new_info.has_hash()) - *manifest_.mutable_new_rootfs_info() = part.new_info; + if (part.verity.fec_extent.num_blocks() != 0) { + *partition->mutable_fec_data_extent() = part.verity.fec_data_extent; + *partition->mutable_fec_extent() = part.verity.fec_extent; + partition->set_fec_roots(part.verity.fec_roots); } } + for (const AnnotatedOperation& aop : part.aops) { + *partition->add_operations() = aop.op; + } + if (part.old_info.has_size() || part.old_info.has_hash()) + *(partition->mutable_old_partition_info()) = part.old_info; + if (part.new_info.has_size() || part.new_info.has_hash()) + *(partition->mutable_new_partition_info()) = part.new_info; } // Signatures appear at the end of the blobs. Note the offset in the - // manifest_. + // |manifest_|. uint64_t signature_blob_length = 0; if (!private_key_path.empty()) { TEST_AND_RETURN_FALSE(PayloadSigner::SignatureBlobLength( @@ -201,7 +171,6 @@ bool PayloadFile::WritePayload(const string& payload_file, PayloadSigner::AddSignatureToManifest( next_blob_offset, signature_blob_length, - major_version_ == kChromeOSMajorPayloadVersion, &manifest_); } @@ -229,18 +198,14 @@ bool PayloadFile::WritePayload(const string& payload_file, TEST_AND_RETURN_FALSE( WriteUint64AsBigEndian(&writer, serialized_manifest.size())); - // Write metadata signature size. - uint32_t metadata_signature_size = 0; - if (major_version_ == kBrilloMajorPayloadVersion) { - // Metadata signature has the same size as payload signature, because they - // are both the same kind of signature for the same kind of hash. - uint32_t metadata_signature_size = htobe32(signature_blob_length); - TEST_AND_RETURN_FALSE_ERRNO(writer.Write(&metadata_signature_size, - sizeof(metadata_signature_size))); - metadata_size += sizeof(metadata_signature_size); - // Set correct size instead of big endian size. - metadata_signature_size = signature_blob_length; - } + // Metadata signature has the same size as payload signature, because they + // are both the same kind of signature for the same kind of hash. + uint32_t metadata_signature_size = htobe32(signature_blob_length); + TEST_AND_RETURN_FALSE_ERRNO( + writer.Write(&metadata_signature_size, sizeof(metadata_signature_size))); + metadata_size += sizeof(metadata_signature_size); + // Set correct size instead of big endian size. + metadata_signature_size = signature_blob_length; // Write protobuf LOG(INFO) << "Writing final delta file protobuf... " @@ -249,8 +214,7 @@ bool PayloadFile::WritePayload(const string& payload_file, writer.Write(serialized_manifest.data(), serialized_manifest.size())); // Write metadata signature blob. - if (major_version_ == kBrilloMajorPayloadVersion && - !private_key_path.empty()) { + if (!private_key_path.empty()) { brillo::Blob metadata_hash, metadata_signature; TEST_AND_RETURN_FALSE(HashCalculator::RawHashOfFile( payload_file, metadata_size, &metadata_hash)); @@ -262,7 +226,7 @@ bool PayloadFile::WritePayload(const string& payload_file, writer.Write(metadata_signature.data(), metadata_signature.size())); } - // Append the data blobs + // Append the data blobs. LOG(INFO) << "Writing final delta file data blobs..."; int blobs_fd = open(ordered_blobs_path.c_str(), O_RDONLY, 0); ScopedFdCloser blobs_fd_closer(&blobs_fd); diff --git a/payload_generator/payload_generation_config.cc b/payload_generator/payload_generation_config.cc index 3b791c82..e1f700a2 100644 --- a/payload_generator/payload_generation_config.cc +++ b/payload_generator/payload_generation_config.cc @@ -219,8 +219,7 @@ PayloadVersion::PayloadVersion(uint64_t major_version, uint32_t minor_version) { } bool PayloadVersion::Validate() const { - TEST_AND_RETURN_FALSE(major == kChromeOSMajorPayloadVersion || - major == kBrilloMajorPayloadVersion); + TEST_AND_RETURN_FALSE(major == kBrilloMajorPayloadVersion); TEST_AND_RETURN_FALSE(minor == kFullPayloadMinorVersion || minor == kSourceMinorPayloadVersion || minor == kOpSrcHashMinorPayloadVersion || @@ -236,13 +235,10 @@ bool PayloadVersion::OperationAllowed(InstallOperation_Type operation) const { case InstallOperation::REPLACE: case InstallOperation::REPLACE_BZ: // These operations were included in the original payload format. - return true; - case InstallOperation::REPLACE_XZ: - // These operations are included in the major version used in Brillo, but - // can also be used with minor version 3 or newer. - return major == kBrilloMajorPayloadVersion || - minor >= kOpSrcHashMinorPayloadVersion; + // These operations are included minor version 3 or newer and full + // payloads. + return true; case InstallOperation::ZERO: case InstallOperation::DISCARD: @@ -298,8 +294,6 @@ bool PayloadGenerationConfig::Validate() const { for (const PartitionConfig& part : target.partitions) { TEST_AND_RETURN_FALSE(part.ValidateExists()); TEST_AND_RETURN_FALSE(part.size % block_size == 0); - if (version.major == kChromeOSMajorPayloadVersion) - TEST_AND_RETURN_FALSE(part.postinstall.IsEmpty()); if (version.minor < kVerityMinorPayloadVersion) TEST_AND_RETURN_FALSE(part.verity.IsEmpty()); } diff --git a/payload_generator/payload_properties.cc b/payload_generator/payload_properties.cc index 53e69f38..bc82eb7a 100644 --- a/payload_generator/payload_properties.cc +++ b/payload_generator/payload_properties.cc @@ -119,8 +119,7 @@ bool PayloadProperties::LoadFromPayload() { metadata_signatures_ = base::JoinString(base64_signatures, ":"); } - is_delta_ = manifest.has_old_image_info() || manifest.has_old_kernel_info() || - manifest.has_old_rootfs_info() || + is_delta_ = manifest.has_old_image_info() || std::any_of(manifest.partitions().begin(), manifest.partitions().end(), [](const PartitionUpdate& part) { diff --git a/payload_generator/payload_signer.cc b/payload_generator/payload_signer.cc index 2a7021f0..420329ff 100644 --- a/payload_generator/payload_signer.cc +++ b/payload_generator/payload_signer.cc @@ -98,23 +98,20 @@ bool AddSignatureBlobToPayload(const string& payload_path, uint64_t metadata_size = payload_metadata.GetMetadataSize(); uint32_t metadata_signature_size = payload_metadata.GetMetadataSignatureSize(); - if (payload_metadata.GetMajorVersion() == kBrilloMajorPayloadVersion) { - // Write metadata signature size in header. - uint32_t metadata_signature_size_be = - htobe32(metadata_signature_blob.size()); - memcpy(payload.data() + manifest_offset, - &metadata_signature_size_be, - sizeof(metadata_signature_size_be)); - manifest_offset += sizeof(metadata_signature_size_be); - // Replace metadata signature. - payload.erase(payload.begin() + metadata_size, - payload.begin() + metadata_size + metadata_signature_size); - payload.insert(payload.begin() + metadata_size, - metadata_signature_blob.begin(), - metadata_signature_blob.end()); - metadata_signature_size = metadata_signature_blob.size(); - LOG(INFO) << "Metadata signature size: " << metadata_signature_size; - } + // Write metadata signature size in header. + uint32_t metadata_signature_size_be = htobe32(metadata_signature_blob.size()); + memcpy(payload.data() + manifest_offset, + &metadata_signature_size_be, + sizeof(metadata_signature_size_be)); + manifest_offset += sizeof(metadata_signature_size_be); + // Replace metadata signature. + payload.erase(payload.begin() + metadata_size, + payload.begin() + metadata_size + metadata_signature_size); + payload.insert(payload.begin() + metadata_size, + metadata_signature_blob.begin(), + metadata_signature_blob.end()); + metadata_signature_size = metadata_signature_blob.size(); + LOG(INFO) << "Metadata signature size: " << metadata_signature_size; DeltaArchiveManifest manifest; TEST_AND_RETURN_FALSE(payload_metadata.GetManifest(payload, &manifest)); @@ -138,7 +135,6 @@ bool AddSignatureBlobToPayload(const string& payload_path, PayloadSigner::AddSignatureToManifest( payload.size() - metadata_size - metadata_signature_size, signature_blob.size(), - payload_metadata.GetMajorVersion() == kChromeOSMajorPayloadVersion, &manifest); // Updates the payload to include the new manifest. @@ -209,25 +205,12 @@ bool CalculateHashFromPayload(const brillo::Blob& payload, void PayloadSigner::AddSignatureToManifest(uint64_t signature_blob_offset, uint64_t signature_blob_length, - bool add_dummy_op, DeltaArchiveManifest* manifest) { LOG(INFO) << "Making room for signature in file"; manifest->set_signatures_offset(signature_blob_offset); LOG(INFO) << "set? " << manifest->has_signatures_offset(); manifest->set_signatures_offset(signature_blob_offset); manifest->set_signatures_size(signature_blob_length); - // Add a dummy op at the end to appease older clients - if (add_dummy_op) { - InstallOperation* dummy_op = manifest->add_kernel_install_operations(); - dummy_op->set_type(InstallOperation::REPLACE); - dummy_op->set_data_offset(signature_blob_offset); - dummy_op->set_data_length(signature_blob_length); - Extent* dummy_extent = dummy_op->add_dst_extents(); - // Tell the dummy op to write this data to a big sparse hole - dummy_extent->set_start_block(kSparseHole); - dummy_extent->set_num_blocks( - utils::DivRoundUp(signature_blob_length, kBlockSize)); - } } bool PayloadSigner::VerifySignedPayload(const string& payload_path, diff --git a/payload_generator/payload_signer.h b/payload_generator/payload_signer.h index 83ddadc1..71f4983a 100644 --- a/payload_generator/payload_signer.h +++ b/payload_generator/payload_signer.h @@ -39,12 +39,9 @@ class PayloadSigner { static bool VerifySignedPayload(const std::string& payload_path, const std::string& public_key_path); - // Adds specified signature offset/length to given |manifest|, also adds a - // dummy operation that points to a signature blob located at the specified - // offset/length if |add_dummy_op| is true. + // Adds specified signature offset/length to given |manifest|. static void AddSignatureToManifest(uint64_t signature_blob_offset, uint64_t signature_blob_length, - bool add_dummy_op, DeltaArchiveManifest* manifest); // Given a raw |hash| and a private key in |private_key_path| calculates the diff --git a/scripts/payload_info.py b/scripts/payload_info.py index d10cb241..bb7f8a41 100755 --- a/scripts/payload_info.py +++ b/scripts/payload_info.py @@ -27,7 +27,6 @@ import update_payload -MAJOR_PAYLOAD_VERSION_CHROMEOS = 1 MAJOR_PAYLOAD_VERSION_BRILLO = 2 def DisplayValue(key, value): @@ -69,15 +68,11 @@ def _DisplayHeader(self): def _DisplayManifest(self): """Show information from the payload manifest.""" manifest = self.payload.manifest - if self.payload.header.version == MAJOR_PAYLOAD_VERSION_BRILLO: - DisplayValue('Number of partitions', len(manifest.partitions)) - for partition in manifest.partitions: - DisplayValue(' Number of "%s" ops' % partition.partition_name, - len(partition.operations)) - else: - DisplayValue('Number of operations', len(manifest.install_operations)) - DisplayValue('Number of kernel ops', - len(manifest.kernel_install_operations)) + DisplayValue('Number of partitions', len(manifest.partitions)) + for partition in manifest.partitions: + DisplayValue(' Number of "%s" ops' % partition.partition_name, + len(partition.operations)) + DisplayValue('Block size', manifest.block_size) DisplayValue('Minor version', manifest.minor_version) @@ -131,8 +126,8 @@ def _DisplayOps(self, name, operations): Args: name: The name you want displayed above the operation table. - operations: The install_operations object that you want to display - information about. + operations: The operations object that you want to display information + about. """ def _DisplayExtents(extents, name): """Show information about extents.""" @@ -170,14 +165,9 @@ def _GetStats(self, manifest): read_blocks = 0 written_blocks = 0 num_write_seeks = 0 - if self.payload.header.version == MAJOR_PAYLOAD_VERSION_BRILLO: - partitions_operations = [part.operations for part in manifest.partitions] - else: - partitions_operations = [manifest.install_operations, - manifest.kernel_install_operations] - for operations in partitions_operations: + for partition in manifest.partitions: last_ext = None - for curr_op in operations: + for curr_op in partition.operations: read_blocks += sum([ext.num_blocks for ext in curr_op.src_extents]) written_blocks += sum([ext.num_blocks for ext in curr_op.dst_extents]) for curr_ext in curr_op.dst_extents: @@ -187,11 +177,10 @@ def _GetStats(self, manifest): num_write_seeks += 1 last_ext = curr_ext - # Old and new rootfs and kernel are read once during verification - read_blocks += manifest.old_rootfs_info.size / manifest.block_size - read_blocks += manifest.old_kernel_info.size / manifest.block_size - read_blocks += manifest.new_rootfs_info.size / manifest.block_size - read_blocks += manifest.new_kernel_info.size / manifest.block_size + # Old and new partitions are read once during verification. + read_blocks += partition.old_partition_info.size / manifest.block_size + read_blocks += partition.new_partition_info.size / manifest.block_size + stats = {'read_blocks': read_blocks, 'written_blocks': written_blocks, 'num_write_seeks': num_write_seeks} @@ -215,15 +204,9 @@ def Run(self): self._DisplayStats(self.payload.manifest) if self.options.list_ops: print() - if self.payload.header.version == MAJOR_PAYLOAD_VERSION_BRILLO: - for partition in self.payload.manifest.partitions: - self._DisplayOps('%s install operations' % partition.partition_name, - partition.operations) - else: - self._DisplayOps('Install operations', - self.payload.manifest.install_operations) - self._DisplayOps('Kernel install operations', - self.payload.manifest.kernel_install_operations) + for partition in self.payload.manifest.partitions: + self._DisplayOps('%s install operations' % partition.partition_name, + partition.operations) def main(): diff --git a/scripts/payload_info_unittest.py b/scripts/payload_info_unittest.py index a4ee9d50..bf9f60a1 100755 --- a/scripts/payload_info_unittest.py +++ b/scripts/payload_info_unittest.py @@ -20,16 +20,16 @@ from __future__ import print_function import StringIO -import collections -import mock import sys import unittest +from contextlib import contextmanager + +import mock # pylint: disable=import-error + import payload_info import update_payload -from contextlib import contextmanager - from update_payload import update_metadata_pb2 class FakePayloadError(Exception): @@ -60,42 +60,47 @@ def __init__(self, src_extents, dst_extents, op_type, **kwargs): def HasField(self, field): return hasattr(self, field) +class FakeExtent(object): + """Fake Extent for testing.""" + def __init__(self, start_block, num_blocks): + self.start_block = start_block + self.num_blocks = num_blocks + +class FakePartitionInfo(object): + """Fake PartitionInfo for testing.""" + def __init__(self, size): + self.size = size + class FakePartition(object): """Fake PartitionUpdate field for testing.""" - def __init__(self, partition_name, operations): + def __init__(self, partition_name, operations, old_size, new_size): self.partition_name = partition_name self.operations = operations + self.old_partition_info = FakePartitionInfo(old_size) + self.new_partition_info = FakePartitionInfo(new_size) class FakeManifest(object): """Fake manifest for testing.""" - def __init__(self, major_version): - FakeExtent = collections.namedtuple('FakeExtent', - ['start_block', 'num_blocks']) - self.install_operations = [FakeOp([], - [FakeExtent(1, 1), FakeExtent(2, 2)], - update_payload.common.OpType.REPLACE_BZ, - dst_length=3*4096, - data_offset=1, - data_length=1)] - self.kernel_install_operations = [FakeOp( - [FakeExtent(1, 1)], - [FakeExtent(x, x) for x in xrange(20)], - update_payload.common.OpType.SOURCE_COPY, - src_length=4096)] - if major_version == payload_info.MAJOR_PAYLOAD_VERSION_BRILLO: - self.partitions = [FakePartition('root', self.install_operations), - FakePartition('kernel', - self.kernel_install_operations)] - self.install_operations = self.kernel_install_operations = [] + def __init__(self): + self.partitions = [ + FakePartition(update_payload.common.ROOTFS, + [FakeOp([], [FakeExtent(1, 1), FakeExtent(2, 2)], + update_payload.common.OpType.REPLACE_BZ, + dst_length=3*4096, + data_offset=1, + data_length=1) + ], 1 * 4096, 3 * 4096), + FakePartition(update_payload.common.KERNEL, + [FakeOp([FakeExtent(1, 1)], + [FakeExtent(x, x) for x in xrange(20)], + update_payload.common.OpType.SOURCE_COPY, + src_length=4096) + ], 2 * 4096, 4 * 4096), + ] self.block_size = 4096 self.minor_version = 4 - FakePartInfo = collections.namedtuple('FakePartInfo', ['size']) - self.old_rootfs_info = FakePartInfo(1 * 4096) - self.old_kernel_info = FakePartInfo(2 * 4096) - self.new_rootfs_info = FakePartInfo(3 * 4096) - self.new_kernel_info = FakePartInfo(4 * 4096) self.signatures_offset = None self.signatures_size = None @@ -106,23 +111,22 @@ def HasField(self, field_name): class FakeHeader(object): """Fake payload header for testing.""" - def __init__(self, version, manifest_len, metadata_signature_len): - self.version = version + def __init__(self, manifest_len, metadata_signature_len): + self.version = payload_info.MAJOR_PAYLOAD_VERSION_BRILLO self.manifest_len = manifest_len self.metadata_signature_len = metadata_signature_len @property def size(self): - return (20 if self.version == payload_info.MAJOR_PAYLOAD_VERSION_CHROMEOS - else 24) + return 24 class FakePayload(object): """Fake payload for testing.""" - def __init__(self, major_version): - self._header = FakeHeader(major_version, 222, 0) + def __init__(self): + self._header = FakeHeader(222, 0) self.header = None - self._manifest = FakeManifest(major_version) + self._manifest = FakeManifest() self.manifest = None self._blobs = {} @@ -203,41 +207,14 @@ def testDisplayValue(self): def testRun(self): """Verify that Run parses and displays the payload like we expect.""" payload_cmd = payload_info.PayloadCommand(FakeOption(action='show')) - payload = FakePayload(payload_info.MAJOR_PAYLOAD_VERSION_CHROMEOS) - expected_out = """Payload version: 1 + payload = FakePayload() + expected_out = """Payload version: 2 Manifest length: 222 -Number of operations: 1 -Number of kernel ops: 1 +Number of partitions: 2 + Number of "root" ops: 1 + Number of "kernel" ops: 1 Block size: 4096 Minor version: 4 -""" - self.TestCommand(payload_cmd, payload, expected_out) - - def testListOpsOnVersion1(self): - """Verify that the --list_ops option gives the correct output.""" - payload_cmd = payload_info.PayloadCommand( - FakeOption(list_ops=True, action='show')) - payload = FakePayload(payload_info.MAJOR_PAYLOAD_VERSION_CHROMEOS) - expected_out = """Payload version: 1 -Manifest length: 222 -Number of operations: 1 -Number of kernel ops: 1 -Block size: 4096 -Minor version: 4 - -Install operations: - 0: REPLACE_BZ - Data offset: 1 - Data length: 1 - Destination: 2 extents (3 blocks) - (1,1) (2,2) -Kernel install operations: - 0: SOURCE_COPY - Source: 1 extent (1 block) - (1,1) - Destination: 20 extents (190 blocks) - (0,0) (1,1) (2,2) (3,3) (4,4) (5,5) (6,6) (7,7) (8,8) (9,9) (10,10) - (11,11) (12,12) (13,13) (14,14) (15,15) (16,16) (17,17) (18,18) (19,19) """ self.TestCommand(payload_cmd, payload, expected_out) @@ -245,7 +222,7 @@ def testListOpsOnVersion2(self): """Verify that the --list_ops option gives the correct output.""" payload_cmd = payload_info.PayloadCommand( FakeOption(list_ops=True, action='show')) - payload = FakePayload(payload_info.MAJOR_PAYLOAD_VERSION_BRILLO) + payload = FakePayload() expected_out = """Payload version: 2 Manifest length: 222 Number of partitions: 2 @@ -267,23 +244,6 @@ def testListOpsOnVersion2(self): Destination: 20 extents (190 blocks) (0,0) (1,1) (2,2) (3,3) (4,4) (5,5) (6,6) (7,7) (8,8) (9,9) (10,10) (11,11) (12,12) (13,13) (14,14) (15,15) (16,16) (17,17) (18,18) (19,19) -""" - self.TestCommand(payload_cmd, payload, expected_out) - - def testStatsOnVersion1(self): - """Verify that the --stats option works correctly.""" - payload_cmd = payload_info.PayloadCommand( - FakeOption(stats=True, action='show')) - payload = FakePayload(payload_info.MAJOR_PAYLOAD_VERSION_CHROMEOS) - expected_out = """Payload version: 1 -Manifest length: 222 -Number of operations: 1 -Number of kernel ops: 1 -Block size: 4096 -Minor version: 4 -Blocks read: 11 -Blocks written: 193 -Seeks when writing: 18 """ self.TestCommand(payload_cmd, payload, expected_out) @@ -291,7 +251,7 @@ def testStatsOnVersion2(self): """Verify that the --stats option works correctly on version 2.""" payload_cmd = payload_info.PayloadCommand( FakeOption(stats=True, action='show')) - payload = FakePayload(payload_info.MAJOR_PAYLOAD_VERSION_BRILLO) + payload = FakePayload() expected_out = """Payload version: 2 Manifest length: 222 Number of partitions: 2 @@ -309,11 +269,12 @@ def testEmptySignatures(self): """Verify that the --signatures option works with unsigned payloads.""" payload_cmd = payload_info.PayloadCommand( FakeOption(action='show', signatures=True)) - payload = FakePayload(payload_info.MAJOR_PAYLOAD_VERSION_CHROMEOS) - expected_out = """Payload version: 1 + payload = FakePayload() + expected_out = """Payload version: 2 Manifest length: 222 -Number of operations: 1 -Number of kernel ops: 1 +Number of partitions: 2 + Number of "root" ops: 1 + Number of "kernel" ops: 1 Block size: 4096 Minor version: 4 No metadata signatures stored in the payload @@ -325,7 +286,7 @@ def testSignatures(self): """Verify that the --signatures option shows the present signatures.""" payload_cmd = payload_info.PayloadCommand( FakeOption(action='show', signatures=True)) - payload = FakePayload(payload_info.MAJOR_PAYLOAD_VERSION_BRILLO) + payload = FakePayload() payload.AddPayloadSignature(version=1, data='12345678abcdefgh\x00\x01\x02\x03') payload.AddPayloadSignature(data='I am a signature so access is yes.') diff --git a/scripts/update_payload/applier.py b/scripts/update_payload/applier.py index 3f644448..511ed497 100644 --- a/scripts/update_payload/applier.py +++ b/scripts/update_payload/applier.py @@ -29,7 +29,6 @@ import array import bz2 import hashlib -import itertools # Not everywhere we can have the lzma library so we ignore it if we didn't have # it because it is not going to be used. For example, 'cros flash' uses # devserver code which eventually loads this file, but the lzma library is not @@ -45,7 +44,6 @@ except ImportError: pass import os -import shutil import subprocess import sys import tempfile @@ -116,12 +114,8 @@ def _ReadExtents(file_obj, extents, block_size, max_length=-1): break read_length = min(max_length, ex.num_blocks * block_size) - # Fill with zeros or read from file, depending on the type of extent. - if ex.start_block == common.PSEUDO_EXTENT_MARKER: - data.extend(itertools.repeat('\0', read_length)) - else: - file_obj.seek(ex.start_block * block_size) - data.fromfile(file_obj, read_length) + file_obj.seek(ex.start_block * block_size) + data.fromfile(file_obj, read_length) max_length -= read_length @@ -150,11 +144,9 @@ def _WriteExtents(file_obj, data, extents, block_size, base_name): raise PayloadError('%s: more write extents than data' % ex_name) write_length = min(data_length, ex.num_blocks * block_size) - # Only do actual writing if this is not a pseudo-extent. - if ex.start_block != common.PSEUDO_EXTENT_MARKER: - file_obj.seek(ex.start_block * block_size) - data_view = buffer(data, data_offset, write_length) - file_obj.write(data_view) + file_obj.seek(ex.start_block * block_size) + data_view = buffer(data, data_offset, write_length) + file_obj.write(data_view) data_offset += write_length data_length -= write_length @@ -189,15 +181,12 @@ def _ExtentsToBspatchArg(extents, block_size, base_name, data_length=-1): if not data_length: raise PayloadError('%s: more extents than total data length' % ex_name) - is_pseudo = ex.start_block == common.PSEUDO_EXTENT_MARKER - start_byte = -1 if is_pseudo else ex.start_block * block_size + start_byte = ex.start_block * block_size num_bytes = ex.num_blocks * block_size if data_length < num_bytes: # We're only padding a real extent. - if not is_pseudo: - pad_off = start_byte + data_length - pad_len = num_bytes - data_length - + pad_off = start_byte + data_length + pad_len = num_bytes - data_length num_bytes = data_length arg += '%s%d:%d' % (arg and ',', start_byte, num_bytes) @@ -274,30 +263,28 @@ def _ApplyReplaceOperation(self, op, op_name, out_data, part_file, part_size): num_blocks = ex.num_blocks count = num_blocks * block_size - # Make sure it's not a fake (signature) operation. - if start_block != common.PSEUDO_EXTENT_MARKER: - data_end = data_start + count + data_end = data_start + count - # Make sure we're not running past partition boundary. - if (start_block + num_blocks) * block_size > part_size: - raise PayloadError( - '%s: extent (%s) exceeds partition size (%d)' % - (ex_name, common.FormatExtent(ex, block_size), - part_size)) + # Make sure we're not running past partition boundary. + if (start_block + num_blocks) * block_size > part_size: + raise PayloadError( + '%s: extent (%s) exceeds partition size (%d)' % + (ex_name, common.FormatExtent(ex, block_size), + part_size)) - # Make sure that we have enough data to write. - if data_end >= data_length + block_size: - raise PayloadError( - '%s: more dst blocks than data (even with padding)') + # Make sure that we have enough data to write. + if data_end >= data_length + block_size: + raise PayloadError( + '%s: more dst blocks than data (even with padding)') - # Pad with zeros if necessary. - if data_end > data_length: - padding = data_end - data_length - out_data += '\0' * padding + # Pad with zeros if necessary. + if data_end > data_length: + padding = data_end - data_length + out_data += '\0' * padding - self.payload.payload_file.seek(start_block * block_size) - part_file.seek(start_block * block_size) - part_file.write(out_data[data_start:data_end]) + self.payload.payload_file.seek(start_block * block_size) + part_file.seek(start_block * block_size) + part_file.write(out_data[data_start:data_end]) data_start += count @@ -323,10 +310,8 @@ def _ApplyZeroOperation(self, op, op_name, part_file): # Iterate over the extents and write zero. # pylint: disable=unused-variable for ex, ex_name in common.ExtentIter(op.dst_extents, base_name): - # Only do actual writing if this is not a pseudo-extent. - if ex.start_block != common.PSEUDO_EXTENT_MARKER: - part_file.seek(ex.start_block * block_size) - part_file.write('\0' * (ex.num_blocks * block_size)) + part_file.seek(ex.start_block * block_size) + part_file.write('\0' * (ex.num_blocks * block_size)) def _ApplySourceCopyOperation(self, op, op_name, old_part_file, new_part_file): @@ -597,20 +582,11 @@ def Run(self, new_parts, old_parts=None): install_operations = [] manifest = self.payload.manifest - if self.payload.header.version == 1: - for real_name, proto_name in common.CROS_PARTITIONS: - new_part_info[real_name] = getattr(manifest, 'new_%s_info' % proto_name) - old_part_info[real_name] = getattr(manifest, 'old_%s_info' % proto_name) - - install_operations.append((common.ROOTFS, manifest.install_operations)) - install_operations.append((common.KERNEL, - manifest.kernel_install_operations)) - else: - for part in manifest.partitions: - name = part.partition_name - new_part_info[name] = part.new_partition_info - old_part_info[name] = part.old_partition_info - install_operations.append((name, part.operations)) + for part in manifest.partitions: + name = part.partition_name + new_part_info[name] = part.new_partition_info + old_part_info[name] = part.old_partition_info + install_operations.append((name, part.operations)) part_names = set(new_part_info.keys()) # Equivalently, old_part_info.keys() diff --git a/scripts/update_payload/checker.py b/scripts/update_payload/checker.py index 674d9f4e..4558872b 100644 --- a/scripts/update_payload/checker.py +++ b/scripts/update_payload/checker.py @@ -45,11 +45,9 @@ # Constants. # -_CHECK_DST_PSEUDO_EXTENTS = 'dst-pseudo-extents' _CHECK_MOVE_SAME_SRC_DST_BLOCK = 'move-same-src-dst-block' _CHECK_PAYLOAD_SIG = 'payload-sig' CHECKS_TO_DISABLE = ( - _CHECK_DST_PSEUDO_EXTENTS, _CHECK_MOVE_SAME_SRC_DST_BLOCK, _CHECK_PAYLOAD_SIG, ) @@ -320,8 +318,6 @@ def __init__(self, payload, assert_type=None, block_size=0, self.allow_unhashed = allow_unhashed # Disable specific tests. - self.check_dst_pseudo_extents = ( - _CHECK_DST_PSEUDO_EXTENTS not in disabled_tests) self.check_move_same_src_dst_block = ( _CHECK_MOVE_SAME_SRC_DST_BLOCK not in disabled_tests) self.check_payload_sig = _CHECK_PAYLOAD_SIG not in disabled_tests @@ -625,35 +621,23 @@ def _CheckManifest(self, report, part_sizes=None): self._CheckPresentIff(self.sigs_offset, self.sigs_size, 'signatures_offset', 'signatures_size', 'manifest') - if self.major_version == common.CHROMEOS_MAJOR_PAYLOAD_VERSION: - for real_name, proto_name in common.CROS_PARTITIONS: - self.old_part_info[real_name] = self._CheckOptionalSubMsg( - manifest, 'old_%s_info' % proto_name, report) - self.new_part_info[real_name] = self._CheckMandatorySubMsg( - manifest, 'new_%s_info' % proto_name, report, 'manifest') - - # Check: old_kernel_info <==> old_rootfs_info. - self._CheckPresentIff(self.old_part_info[common.KERNEL].msg, - self.old_part_info[common.ROOTFS].msg, - 'old_kernel_info', 'old_rootfs_info', 'manifest') - else: - for part in manifest.partitions: - name = part.partition_name - self.old_part_info[name] = self._CheckOptionalSubMsg( - part, 'old_partition_info', report) - self.new_part_info[name] = self._CheckMandatorySubMsg( - part, 'new_partition_info', report, 'manifest.partitions') - - # Check: Old-style partition infos should not be specified. - for _, part in common.CROS_PARTITIONS: - self._CheckElemNotPresent(manifest, 'old_%s_info' % part, 'manifest') - self._CheckElemNotPresent(manifest, 'new_%s_info' % part, 'manifest') - - # Check: If old_partition_info is specified anywhere, it must be - # specified everywhere. - old_part_msgs = [part.msg for part in self.old_part_info.values() if part] - self._CheckPresentIffMany(old_part_msgs, 'old_partition_info', - 'manifest.partitions') + for part in manifest.partitions: + name = part.partition_name + self.old_part_info[name] = self._CheckOptionalSubMsg( + part, 'old_partition_info', report) + self.new_part_info[name] = self._CheckMandatorySubMsg( + part, 'new_partition_info', report, 'manifest.partitions') + + # Check: Old-style partition infos should not be specified. + for _, part in common.CROS_PARTITIONS: + self._CheckElemNotPresent(manifest, 'old_%s_info' % part, 'manifest') + self._CheckElemNotPresent(manifest, 'new_%s_info' % part, 'manifest') + + # Check: If old_partition_info is specified anywhere, it must be + # specified everywhere. + old_part_msgs = [part.msg for part in self.old_part_info.values() if part] + self._CheckPresentIffMany(old_part_msgs, 'old_partition_info', + 'manifest.partitions') is_delta = any(part and part.msg for part in self.old_part_info.values()) if is_delta: @@ -721,8 +705,7 @@ def _CheckLength(self, length, total_blocks, op_name, length_name): self._CheckBlocksFitLength(length, total_blocks, self.block_size, '%s: %s' % (op_name, length_name)) - def _CheckExtents(self, extents, usable_size, block_counters, name, - allow_pseudo=False, allow_signature=False): + def _CheckExtents(self, extents, usable_size, block_counters, name): """Checks a sequence of extents. Args: @@ -730,8 +713,6 @@ def _CheckExtents(self, extents, usable_size, block_counters, name, usable_size: The usable size of the partition to which the extents apply. block_counters: Array of counters corresponding to the number of blocks. name: The name of the extent block. - allow_pseudo: Whether or not pseudo block numbers are allowed. - allow_signature: Whether or not the extents are used for a signature. Returns: The total number of blocks in the extents. @@ -752,20 +733,15 @@ def _CheckExtents(self, extents, usable_size, block_counters, name, if num_blocks == 0: raise error.PayloadError('%s: extent length is zero.' % ex_name) - if start_block != common.PSEUDO_EXTENT_MARKER: - # Check: Make sure we're within the partition limit. - if usable_size and end_block * self.block_size > usable_size: - raise error.PayloadError( - '%s: extent (%s) exceeds usable partition size (%d).' % - (ex_name, common.FormatExtent(ex, self.block_size), usable_size)) + # Check: Make sure we're within the partition limit. + if usable_size and end_block * self.block_size > usable_size: + raise error.PayloadError( + '%s: extent (%s) exceeds usable partition size (%d).' % + (ex_name, common.FormatExtent(ex, self.block_size), usable_size)) - # Record block usage. - for i in xrange(start_block, end_block): - block_counters[i] += 1 - elif not (allow_pseudo or (allow_signature and len(extents) == 1)): - # Pseudo-extents must be allowed explicitly, or otherwise be part of a - # signature operation (in which case there has to be exactly one). - raise error.PayloadError('%s: unexpected pseudo-extent.' % ex_name) + # Record block usage. + for i in xrange(start_block, end_block): + block_counters[i] += 1 total_num_blocks += num_blocks @@ -896,21 +872,19 @@ def _CheckAnySourceOperation(self, op, total_src_blocks, op_name): if self.minor_version >= 3 and op.src_sha256_hash is None: raise error.PayloadError('%s: source hash missing.' % op_name) - def _CheckOperation(self, op, op_name, is_last, old_block_counters, - new_block_counters, old_usable_size, new_usable_size, - prev_data_offset, allow_signature, blob_hash_counts): + def _CheckOperation(self, op, op_name, old_block_counters, new_block_counters, + old_usable_size, new_usable_size, prev_data_offset, + blob_hash_counts): """Checks a single update operation. Args: op: The operation object. op_name: Operation name string for error reporting. - is_last: Whether this is the last operation in the sequence. old_block_counters: Arrays of block read counters. new_block_counters: Arrays of block write counters. old_usable_size: The overall usable size for src data in bytes. new_usable_size: The overall usable size for dst data in bytes. prev_data_offset: Offset of last used data bytes. - allow_signature: Whether this may be a signature operation. blob_hash_counts: Counters for hashed/unhashed blobs. Returns: @@ -922,14 +896,10 @@ def _CheckOperation(self, op, op_name, is_last, old_block_counters, # Check extents. total_src_blocks = self._CheckExtents( op.src_extents, old_usable_size, old_block_counters, - op_name + '.src_extents', allow_pseudo=True) - allow_signature_in_extents = (allow_signature and is_last and - op.type == common.OpType.REPLACE) + op_name + '.src_extents') total_dst_blocks = self._CheckExtents( op.dst_extents, new_usable_size, new_block_counters, - op_name + '.dst_extents', - allow_pseudo=(not self.check_dst_pseudo_extents), - allow_signature=allow_signature_in_extents) + op_name + '.dst_extents') # Check: data_offset present <==> data_length present. data_offset = self._CheckOptionalField(op, 'data_offset', None) @@ -965,9 +935,7 @@ def _CheckOperation(self, op, op_name, is_last, old_block_counters, (op_name, common.FormatSha256(op.data_sha256_hash), common.FormatSha256(actual_hash.digest()))) elif data_offset is not None: - if allow_signature_in_extents: - blob_hash_counts['signature'] += 1 - elif self.allow_unhashed: + if self.allow_unhashed: blob_hash_counts['unhashed'] += 1 else: raise error.PayloadError('%s: unhashed operation not allowed.' % @@ -981,11 +949,8 @@ def _CheckOperation(self, op, op_name, is_last, old_block_counters, (op_name, data_offset, prev_data_offset)) # Type-specific checks. - if op.type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ): - self._CheckReplaceOperation(op, data_length, total_dst_blocks, op_name) - elif (op.type == common.OpType.REPLACE_XZ and - (self.minor_version >= 3 or - self.major_version >= common.BRILLO_MAJOR_PAYLOAD_VERSION)): + if op.type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ, + common.OpType.REPLACE_XZ): self._CheckReplaceOperation(op, data_length, total_dst_blocks, op_name) elif op.type == common.OpType.ZERO and self.minor_version >= 4: self._CheckZeroOperation(op, op_name) @@ -1030,7 +995,7 @@ def _AllocBlockCounters(self, total_size): def _CheckOperations(self, operations, report, base_name, old_fs_size, new_fs_size, old_usable_size, new_usable_size, - prev_data_offset, allow_signature): + prev_data_offset): """Checks a sequence of update operations. Args: @@ -1042,7 +1007,6 @@ def _CheckOperations(self, operations, report, base_name, old_fs_size, old_usable_size: The overall usable size of the old partition in bytes. new_usable_size: The overall usable size of the new partition in bytes. prev_data_offset: Offset of last used data bytes. - allow_signature: Whether this sequence may contain signature operations. Returns: The total data blob size used. @@ -1078,8 +1042,6 @@ def _CheckOperations(self, operations, report, base_name, old_fs_size, 'hashed': 0, 'unhashed': 0, } - if allow_signature: - blob_hash_counts['signature'] = 0 # Allocate old and new block counters. old_block_counters = (self._AllocBlockCounters(old_usable_size) @@ -1096,12 +1058,10 @@ def _CheckOperations(self, operations, report, base_name, old_fs_size, raise error.PayloadError('%s: invalid type (%d).' % (op_name, op.type)) op_counts[op.type] += 1 - is_last = op_num == len(operations) curr_data_used = self._CheckOperation( - op, op_name, is_last, old_block_counters, new_block_counters, + op, op_name, old_block_counters, new_block_counters, old_usable_size, new_usable_size, - prev_data_offset + total_data_used, allow_signature, - blob_hash_counts) + prev_data_offset + total_data_used, blob_hash_counts) if curr_data_used: op_blob_totals[op.type] += curr_data_used total_data_used += curr_data_used @@ -1155,21 +1115,18 @@ def _CheckSignatures(self, report, pubkey_file_name): if not sigs.signatures: raise error.PayloadError('Signature block is empty.') - last_ops_section = (self.payload.manifest.kernel_install_operations or - self.payload.manifest.install_operations) - - # Only major version 1 has the fake signature OP at the end. - if self.major_version == common.CHROMEOS_MAJOR_PAYLOAD_VERSION: - fake_sig_op = last_ops_section[-1] + # Check that we don't have the signature operation blob at the end (used to + # be for major version 1). + last_partition = self.payload.manifest.partitions[-1] + if last_partition.operations: + last_op = last_partition.operations[-1] # Check: signatures_{offset,size} must match the last (fake) operation. - if not (fake_sig_op.type == common.OpType.REPLACE and - self.sigs_offset == fake_sig_op.data_offset and - self.sigs_size == fake_sig_op.data_length): - raise error.PayloadError('Signatures_{offset,size} (%d+%d) does not' - ' match last operation (%d+%d).' % - (self.sigs_offset, self.sigs_size, - fake_sig_op.data_offset, - fake_sig_op.data_length)) + if (last_op.type == common.OpType.REPLACE and + last_op.data_offset == self.sigs_offset and + last_op.data_length == self.sigs_size): + raise error.PayloadError('It seems like the last operation is the ' + 'signature blob. This is an invalid payload.') + # Compute the checksum of all data up to signature blob. # TODO(garnold) we're re-reading the whole data section into a string @@ -1248,29 +1205,17 @@ def Run(self, pubkey_file_name=None, metadata_sig_file=None, metadata_size=0, self._CheckManifest(report, part_sizes) assert self.payload_type, 'payload type should be known by now' - manifest = self.payload.manifest - - # Part 3: Examine partition operations. - install_operations = [] - if self.major_version == common.CHROMEOS_MAJOR_PAYLOAD_VERSION: - # partitions field should not ever exist in major version 1 payloads - self._CheckRepeatedElemNotPresent(manifest, 'partitions', 'manifest') - - install_operations.append((common.ROOTFS, manifest.install_operations)) - install_operations.append((common.KERNEL, - manifest.kernel_install_operations)) - - else: - self._CheckRepeatedElemNotPresent(manifest, 'install_operations', - 'manifest') - self._CheckRepeatedElemNotPresent(manifest, 'kernel_install_operations', + # Make sure deprecated values are not present in the payload. + for field in ('install_operations', 'kernel_install_operations'): + self._CheckRepeatedElemNotPresent(self.payload.manifest, field, 'manifest') - - for update in manifest.partitions: - install_operations.append((update.partition_name, update.operations)) + for field in ('old_kernel_info', 'old_rootfs_info', + 'new_kernel_info', 'new_rootfs_info'): + self._CheckElemNotPresent(self.payload.manifest, field, 'manifest') total_blob_size = 0 - for part, operations in install_operations: + for part, operations in ((p.partition_name, p.operations) + for p in self.payload.manifest.partitions): report.AddSection('%s operations' % part) new_fs_usable_size = self.new_fs_sizes[part] @@ -1285,16 +1230,13 @@ def Run(self, pubkey_file_name=None, metadata_sig_file=None, metadata_size=0, total_blob_size += self._CheckOperations( operations, report, '%s_install_operations' % part, self.old_fs_sizes[part], self.new_fs_sizes[part], - old_fs_usable_size, new_fs_usable_size, total_blob_size, - (self.major_version == common.CHROMEOS_MAJOR_PAYLOAD_VERSION - and part == common.KERNEL)) + old_fs_usable_size, new_fs_usable_size, total_blob_size) # Check: Operations data reach the end of the payload file. used_payload_size = self.payload.data_offset + total_blob_size # Major versions 2 and higher have a signature at the end, so it should be # considered in the total size of the image. - if (self.major_version >= common.BRILLO_MAJOR_PAYLOAD_VERSION and - self.sigs_size): + if self.sigs_size: used_payload_size += self.sigs_size if used_payload_size != payload_file_size: diff --git a/scripts/update_payload/checker_unittest.py b/scripts/update_payload/checker_unittest.py index b5f2f3e2..48816539 100755 --- a/scripts/update_payload/checker_unittest.py +++ b/scripts/update_payload/checker_unittest.py @@ -427,10 +427,10 @@ def DoCheckManifestTest(self, fail_mismatched_block_size, fail_bad_sigs, payload_gen.SetBlockSize(test_utils.KiB(4)) # Add some operations. - payload_gen.AddOperation(False, common.OpType.SOURCE_COPY, + payload_gen.AddOperation(common.ROOTFS, common.OpType.SOURCE_COPY, src_extents=[(0, 16), (16, 497)], dst_extents=[(16, 496), (0, 16)]) - payload_gen.AddOperation(True, common.OpType.SOURCE_COPY, + payload_gen.AddOperation(common.KERNEL, common.OpType.SOURCE_COPY, src_extents=[(0, 8), (8, 8)], dst_extents=[(8, 8), (0, 8)]) @@ -456,19 +456,21 @@ def DoCheckManifestTest(self, fail_mismatched_block_size, fail_bad_sigs, if fail_mismatched_oki_ori or fail_old_kernel_fs_size or fail_bad_oki: oki_hash = (None if fail_bad_oki else hashlib.sha256('fake-oki-content').digest()) - payload_gen.SetPartInfo(True, False, old_kernel_fs_size, oki_hash) + payload_gen.SetPartInfo(common.KERNEL, False, old_kernel_fs_size, + oki_hash) if not fail_mismatched_oki_ori and (fail_old_rootfs_fs_size or fail_bad_ori): ori_hash = (None if fail_bad_ori else hashlib.sha256('fake-ori-content').digest()) - payload_gen.SetPartInfo(False, False, old_rootfs_fs_size, ori_hash) + payload_gen.SetPartInfo(common.ROOTFS, False, old_rootfs_fs_size, + ori_hash) # Add new kernel/rootfs partition info. payload_gen.SetPartInfo( - True, True, new_kernel_fs_size, + common.KERNEL, True, new_kernel_fs_size, None if fail_bad_nki else hashlib.sha256('fake-nki-content').digest()) payload_gen.SetPartInfo( - False, True, new_rootfs_fs_size, + common.ROOTFS, True, new_rootfs_fs_size, None if fail_bad_nri else hashlib.sha256('fake-nri-content').digest()) # Set the minor version. @@ -521,23 +523,6 @@ def testCheckExtents(self): payload_checker._CheckExtents(extents, (1024 + 16) * block_size, collections.defaultdict(int), 'foo')) - # Passes w/ pseudo-extents (aka sparse holes). - extents = self.NewExtentList((0, 4), (common.PSEUDO_EXTENT_MARKER, 5), - (8, 3)) - self.assertEquals( - 12, - payload_checker._CheckExtents(extents, (1024 + 16) * block_size, - collections.defaultdict(int), 'foo', - allow_pseudo=True)) - - # Passes w/ pseudo-extent due to a signature. - extents = self.NewExtentList((common.PSEUDO_EXTENT_MARKER, 2)) - self.assertEquals( - 2, - payload_checker._CheckExtents(extents, (1024 + 16) * block_size, - collections.defaultdict(int), 'foo', - allow_signature=True)) - # Fails, extent missing a start block. extents = self.NewExtentList((-1, 4), (8, 3), (1024, 16)) self.assertRaises( @@ -704,8 +689,8 @@ def testCheckSourceCopyOperation_FailBlockCountsMismatch(self): self.assertRaises(PayloadError, payload_checker._CheckSourceCopyOperation, None, 0, 1, 'foo') - def DoCheckOperationTest(self, op_type_name, is_last, allow_signature, - allow_unhashed, fail_src_extents, fail_dst_extents, + def DoCheckOperationTest(self, op_type_name, allow_unhashed, + fail_src_extents, fail_dst_extents, fail_mismatched_data_offset_length, fail_missing_dst_extents, fail_src_length, fail_dst_length, fail_data_hash, @@ -715,8 +700,6 @@ def DoCheckOperationTest(self, op_type_name, is_last, allow_signature, Args: op_type_name: 'REPLACE', 'REPLACE_BZ', 'REPLACE_XZ', 'SOURCE_COPY', 'SOURCE_BSDIFF', BROTLI_BSDIFF or 'PUFFDIFF'. - is_last: Whether we're testing the last operation in a sequence. - allow_signature: Whether we're testing a signature-capable operation. allow_unhashed: Whether we're allowing to not hash the data. fail_src_extents: Tamper with src extents. fail_dst_extents: Tamper with dst extents. @@ -762,8 +745,7 @@ def DoCheckOperationTest(self, op_type_name, is_last, allow_signature, self.NewExtentList((1, 16))) total_src_blocks = 16 - # TODO(tbrindus): add major version 2 tests. - payload_checker.major_version = common.CHROMEOS_MAJOR_PAYLOAD_VERSION + payload_checker.major_version = common.BRILLO_MAJOR_PAYLOAD_VERSION if op_type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ): payload_checker.minor_version = 0 elif op_type in (common.OpType.SOURCE_COPY, common.OpType.SOURCE_BSDIFF): @@ -785,13 +767,11 @@ def DoCheckOperationTest(self, op_type_name, is_last, allow_signature, op.data_offset = prev_data_offset fake_data = 'fake-data'.ljust(op.data_length) - if not (allow_unhashed or (is_last and allow_signature and - op_type == common.OpType.REPLACE)): - if not fail_data_hash: - # Create a valid data blob hash. - op.data_sha256_hash = hashlib.sha256(fake_data).digest() - payload.ReadDataBlob(op.data_offset, op.data_length).AndReturn( - fake_data) + if not allow_unhashed and not fail_data_hash: + # Create a valid data blob hash. + op.data_sha256_hash = hashlib.sha256(fake_data).digest() + payload.ReadDataBlob(op.data_offset, op.data_length).AndReturn( + fake_data) elif fail_data_hash: # Create an invalid data blob hash. @@ -833,8 +813,8 @@ def DoCheckOperationTest(self, op_type_name, is_last, allow_signature, fail_missing_dst_extents or fail_src_length or fail_dst_length or fail_data_hash or fail_prev_data_offset or fail_bad_minor_version) - args = (op, 'foo', is_last, old_block_counters, new_block_counters, - old_part_size, new_part_size, prev_data_offset, allow_signature, + args = (op, 'foo', old_block_counters, new_block_counters, + old_part_size, new_part_size, prev_data_offset, blob_hash_counts) if should_fail: self.assertRaises(PayloadError, payload_checker._CheckOperation, *args) @@ -876,7 +856,7 @@ def DoCheckOperationsTest(self, fail_nonexhaustive_full_update): if fail_nonexhaustive_full_update: rootfs_data_length -= block_size - payload_gen.AddOperation(False, rootfs_op_type, + payload_gen.AddOperation(common.ROOTFS, rootfs_op_type, dst_extents=[(0, rootfs_data_length / block_size)], data_offset=0, data_length=rootfs_data_length) @@ -887,17 +867,17 @@ def DoCheckOperationsTest(self, fail_nonexhaustive_full_update): 'allow_unhashed': True}) payload_checker.payload_type = checker._TYPE_FULL report = checker._PayloadReport() - - args = (payload_checker.payload.manifest.install_operations, report, 'foo', - 0, rootfs_part_size, rootfs_part_size, rootfs_part_size, 0, False) + partition = next((p for p in payload_checker.payload.manifest.partitions + if p.partition_name == common.ROOTFS), None) + args = (partition.operations, report, 'foo', + 0, rootfs_part_size, rootfs_part_size, rootfs_part_size, 0) if fail_nonexhaustive_full_update: self.assertRaises(PayloadError, payload_checker._CheckOperations, *args) else: self.assertEqual(rootfs_data_length, payload_checker._CheckOperations(*args)) - def DoCheckSignaturesTest(self, fail_empty_sigs_blob, fail_missing_pseudo_op, - fail_mismatched_pseudo_op, fail_sig_missing_fields, + def DoCheckSignaturesTest(self, fail_empty_sigs_blob, fail_sig_missing_fields, fail_unknown_sig_version, fail_incorrect_sig): """Tests _CheckSignatures().""" # Generate a test payload. For this test, we only care about the signature @@ -908,20 +888,18 @@ def DoCheckSignaturesTest(self, fail_empty_sigs_blob, fail_missing_pseudo_op, payload_gen.SetBlockSize(block_size) rootfs_part_size = test_utils.MiB(2) kernel_part_size = test_utils.KiB(16) - payload_gen.SetPartInfo(False, True, rootfs_part_size, + payload_gen.SetPartInfo(common.ROOTFS, True, rootfs_part_size, hashlib.sha256('fake-new-rootfs-content').digest()) - payload_gen.SetPartInfo(True, True, kernel_part_size, + payload_gen.SetPartInfo(common.KERNEL, True, kernel_part_size, hashlib.sha256('fake-new-kernel-content').digest()) payload_gen.SetMinorVersion(0) payload_gen.AddOperationWithData( - False, common.OpType.REPLACE, + common.ROOTFS, common.OpType.REPLACE, dst_extents=[(0, rootfs_part_size / block_size)], data_blob=os.urandom(rootfs_part_size)) - do_forge_pseudo_op = (fail_missing_pseudo_op or fail_mismatched_pseudo_op) - do_forge_sigs_data = (do_forge_pseudo_op or fail_empty_sigs_blob or - fail_sig_missing_fields or fail_unknown_sig_version - or fail_incorrect_sig) + do_forge_sigs_data = (fail_empty_sigs_blob or fail_sig_missing_fields or + fail_unknown_sig_version or fail_incorrect_sig) sigs_data = None if do_forge_sigs_data: @@ -937,22 +915,12 @@ def DoCheckSignaturesTest(self, fail_empty_sigs_blob, fail_missing_pseudo_op, sigs_data = sigs_gen.ToBinary() payload_gen.SetSignatures(payload_gen.curr_offset, len(sigs_data)) - if do_forge_pseudo_op: - assert sigs_data is not None, 'should have forged signatures blob by now' - sigs_len = len(sigs_data) - payload_gen.AddOperation( - False, common.OpType.REPLACE, - data_offset=payload_gen.curr_offset / 2, - data_length=sigs_len / 2, - dst_extents=[(0, (sigs_len / 2 + block_size - 1) / block_size)]) - # Generate payload (complete w/ signature) and create the test object. payload_checker = _GetPayloadChecker( payload_gen.WriteToFileWithData, payload_gen_dargs={ 'sigs_data': sigs_data, - 'privkey_file_name': test_utils._PRIVKEY_FILE_NAME, - 'do_add_pseudo_operation': not do_forge_pseudo_op}) + 'privkey_file_name': test_utils._PRIVKEY_FILE_NAME}) payload_checker.payload_type = checker._TYPE_FULL report = checker._PayloadReport() @@ -962,8 +930,7 @@ def DoCheckSignaturesTest(self, fail_empty_sigs_blob, fail_missing_pseudo_op, common.KERNEL: kernel_part_size }) - should_fail = (fail_empty_sigs_blob or fail_missing_pseudo_op or - fail_mismatched_pseudo_op or fail_sig_missing_fields or + should_fail = (fail_empty_sigs_blob or fail_sig_missing_fields or fail_unknown_sig_version or fail_incorrect_sig) args = (report, test_utils._PUBKEY_FILE_NAME) if should_fail: @@ -1016,9 +983,9 @@ def DoRunTest(self, rootfs_part_size_provided, kernel_part_size_provided, payload_gen.SetBlockSize(block_size) kernel_filesystem_size = test_utils.KiB(16) rootfs_filesystem_size = test_utils.MiB(2) - payload_gen.SetPartInfo(False, True, rootfs_filesystem_size, + payload_gen.SetPartInfo(common.ROOTFS, True, rootfs_filesystem_size, hashlib.sha256('fake-new-rootfs-content').digest()) - payload_gen.SetPartInfo(True, True, kernel_filesystem_size, + payload_gen.SetPartInfo(common.KERNEL, True, kernel_filesystem_size, hashlib.sha256('fake-new-kernel-content').digest()) payload_gen.SetMinorVersion(0) @@ -1029,7 +996,7 @@ def DoRunTest(self, rootfs_part_size_provided, kernel_part_size_provided, if fail_rootfs_part_size_exceeded: rootfs_op_size += block_size payload_gen.AddOperationWithData( - False, common.OpType.REPLACE, + common.ROOTFS, common.OpType.REPLACE, dst_extents=[(0, rootfs_op_size / block_size)], data_blob=os.urandom(rootfs_op_size)) @@ -1040,7 +1007,7 @@ def DoRunTest(self, rootfs_part_size_provided, kernel_part_size_provided, if fail_kernel_part_size_exceeded: kernel_op_size += block_size payload_gen.AddOperationWithData( - True, common.OpType.REPLACE, + common.KERNEL, common.OpType.REPLACE, dst_extents=[(0, kernel_op_size / block_size)], data_blob=os.urandom(kernel_op_size)) @@ -1052,16 +1019,14 @@ def DoRunTest(self, rootfs_part_size_provided, kernel_part_size_provided, else: use_block_size = block_size - # For the unittests 246 is the value that generated for the payload. - metadata_size = 246 + # For the unittests 237 is the value that generated for the payload. + metadata_size = 237 if fail_mismatched_metadata_size: metadata_size += 1 kwargs = { 'payload_gen_dargs': { 'privkey_file_name': test_utils._PRIVKEY_FILE_NAME, - 'do_add_pseudo_operation': True, - 'is_pseudo_in_kernel': True, 'padding': os.urandom(1024) if fail_excess_data else None}, 'checker_init_dargs': { 'assert_type': 'delta' if fail_wrong_payload_type else 'full', @@ -1073,7 +1038,7 @@ def DoRunTest(self, rootfs_part_size_provided, kernel_part_size_provided, payload_checker = _GetPayloadChecker(payload_gen.WriteToFileWithData, **kwargs) - kwargs = { + kwargs2 = { 'pubkey_file_name': test_utils._PUBKEY_FILE_NAME, 'metadata_size': metadata_size, 'part_sizes': { @@ -1085,15 +1050,14 @@ def DoRunTest(self, rootfs_part_size_provided, kernel_part_size_provided, fail_rootfs_part_size_exceeded or fail_kernel_part_size_exceeded) if should_fail: - self.assertRaises(PayloadError, payload_checker.Run, **kwargs) + self.assertRaises(PayloadError, payload_checker.Run, **kwargs2) else: - self.assertIsNone(payload_checker.Run(**kwargs)) + self.assertIsNone(payload_checker.Run(**kwargs2)) # This implements a generic API, hence the occasional unused args. # pylint: disable=W0613 -def ValidateCheckOperationTest(op_type_name, is_last, allow_signature, - allow_unhashed, fail_src_extents, - fail_dst_extents, +def ValidateCheckOperationTest(op_type_name, allow_unhashed, + fail_src_extents, fail_dst_extents, fail_mismatched_data_offset_length, fail_missing_dst_extents, fail_src_length, fail_dst_length, fail_data_hash, @@ -1147,7 +1111,7 @@ def AddParametricTests(tested_method_name, arg_space, validate_func=None): run_method_name = 'Do%sTest' % tested_method_name test_method_name = 'test%s' % tested_method_name for arg_key, arg_val in run_dargs.iteritems(): - if arg_val or type(arg_val) is int: + if arg_val or isinstance(arg_val, int): test_method_name += '__%s=%s' % (arg_key, arg_val) setattr(PayloadCheckerTest, test_method_name, TestMethodBody(run_method_name, run_dargs)) @@ -1196,8 +1160,6 @@ def AddAllParametricTests(): {'op_type_name': ('REPLACE', 'REPLACE_BZ', 'REPLACE_XZ', 'SOURCE_COPY', 'SOURCE_BSDIFF', 'PUFFDIFF', 'BROTLI_BSDIFF'), - 'is_last': (True, False), - 'allow_signature': (True, False), 'allow_unhashed': (True, False), 'fail_src_extents': (True, False), 'fail_dst_extents': (True, False), @@ -1217,8 +1179,6 @@ def AddAllParametricTests(): # Add all _CheckOperations() test cases. AddParametricTests('CheckSignatures', {'fail_empty_sigs_blob': (True, False), - 'fail_missing_pseudo_op': (True, False), - 'fail_mismatched_pseudo_op': (True, False), 'fail_sig_missing_fields': (True, False), 'fail_unknown_sig_version': (True, False), 'fail_incorrect_sig': (True, False)}) diff --git a/scripts/update_payload/common.py b/scripts/update_payload/common.py index b7b53dc8..dfb8181a 100644 --- a/scripts/update_payload/common.py +++ b/scripts/update_payload/common.py @@ -25,15 +25,12 @@ # # Constants. # -PSEUDO_EXTENT_MARKER = (1L << 64) - 1 # UINT64_MAX - SIG_ASN1_HEADER = ( '\x30\x31\x30\x0d\x06\x09\x60\x86' '\x48\x01\x65\x03\x04\x02\x01\x05' '\x00\x04\x20' ) -CHROMEOS_MAJOR_PAYLOAD_VERSION = 1 BRILLO_MAJOR_PAYLOAD_VERSION = 2 SOURCE_MINOR_PAYLOAD_VERSION = 2 @@ -162,8 +159,7 @@ def FormatExtent(ex, block_size=0): end_block = ex.start_block + ex.num_blocks if block_size: return '%d->%d * %d' % (ex.start_block, end_block, block_size) - else: - return '%d->%d' % (ex.start_block, end_block) + return '%d->%d' % (ex.start_block, end_block) def FormatSha256(digest): diff --git a/scripts/update_payload/payload.py b/scripts/update_payload/payload.py index 2a0cb58d..1ed5f99e 100644 --- a/scripts/update_payload/payload.py +++ b/scripts/update_payload/payload.py @@ -263,9 +263,7 @@ def ResetFile(self): def IsDelta(self): """Returns True iff the payload appears to be a delta.""" self._AssertInit() - return (self.manifest.HasField('old_kernel_info') or - self.manifest.HasField('old_rootfs_info') or - any(partition.HasField('old_partition_info') + return (any(partition.HasField('old_partition_info') for partition in self.manifest.partitions)) def IsFull(self): diff --git a/scripts/update_payload/test_utils.py b/scripts/update_payload/test_utils.py index f0edad57..4f5fed03 100644 --- a/scripts/update_payload/test_utils.py +++ b/scripts/update_payload/test_utils.py @@ -173,31 +173,37 @@ def SetBlockSize(self, block_size): self.block_size = block_size _SetMsgField(self.manifest, 'block_size', block_size) - def SetPartInfo(self, is_kernel, is_new, part_size, part_hash): + def SetPartInfo(self, part_name, is_new, part_size, part_hash): """Set the partition info entry. Args: - is_kernel: whether this is kernel partition info - is_new: whether to set old (False) or new (True) info - part_size: the partition size (in fact, filesystem size) - part_hash: the partition hash + part_name: The name of the partition. + is_new: Whether to set old (False) or new (True) info. + part_size: The partition size (in fact, filesystem size). + part_hash: The partition hash. """ - if is_kernel: - part_info = (self.manifest.new_kernel_info if is_new - else self.manifest.old_kernel_info) - else: - part_info = (self.manifest.new_rootfs_info if is_new - else self.manifest.old_rootfs_info) + partition = next((x for x in self.manifest.partitions + if x.partition_name == part_name), None) + if partition is None: + partition = self.manifest.partitions.add() + partition.partition_name = part_name + + part_info = (partition.new_partition_info if is_new + else partition.old_partition_info) _SetMsgField(part_info, 'size', part_size) _SetMsgField(part_info, 'hash', part_hash) - def AddOperation(self, is_kernel, op_type, data_offset=None, + def AddOperation(self, part_name, op_type, data_offset=None, data_length=None, src_extents=None, src_length=None, dst_extents=None, dst_length=None, data_sha256_hash=None): """Adds an InstallOperation entry.""" - operations = (self.manifest.kernel_install_operations if is_kernel - else self.manifest.install_operations) + partition = next((x for x in self.manifest.partitions + if x.partition_name == part_name), None) + if partition is None: + partition = self.manifest.partitions.add() + partition.partition_name = part_name + operations = partition.operations op = operations.add() op.type = op_type @@ -277,7 +283,7 @@ def AddData(self, data_blob): self.data_blobs.append(data_blob) return data_length, data_offset - def AddOperationWithData(self, is_kernel, op_type, src_extents=None, + def AddOperationWithData(self, part_name, op_type, src_extents=None, src_length=None, dst_extents=None, dst_length=None, data_blob=None, do_hash_data_blob=True): """Adds an install operation and associated data blob. @@ -287,7 +293,7 @@ def AddOperationWithData(self, is_kernel, op_type, src_extents=None, necessary offset/length accounting. Args: - is_kernel: whether this is a kernel (True) or rootfs (False) operation + part_name: The name of the partition (e.g. kernel or root). op_type: one of REPLACE, REPLACE_BZ, REPLACE_XZ. src_extents: list of (start, length) pairs indicating src block ranges src_length: size of the src data in bytes (needed for diff operations) @@ -302,15 +308,13 @@ def AddOperationWithData(self, is_kernel, op_type, src_extents=None, data_sha256_hash = hashlib.sha256(data_blob).digest() data_length, data_offset = self.AddData(data_blob) - self.AddOperation(is_kernel, op_type, data_offset=data_offset, + self.AddOperation(part_name, op_type, data_offset=data_offset, data_length=data_length, src_extents=src_extents, src_length=src_length, dst_extents=dst_extents, dst_length=dst_length, data_sha256_hash=data_sha256_hash) def WriteToFileWithData(self, file_obj, sigs_data=None, - privkey_file_name=None, - do_add_pseudo_operation=False, - is_pseudo_in_kernel=False, padding=None): + privkey_file_name=None, padding=None): """Writes the payload content to a file, optionally signing the content. Args: @@ -319,10 +323,6 @@ def WriteToFileWithData(self, file_obj, sigs_data=None, payload signature fields assumed to be preset by the caller) privkey_file_name: key used for signing the payload (optional; used only if explicit signatures blob not provided) - do_add_pseudo_operation: whether a pseudo-operation should be added to - account for the signature blob - is_pseudo_in_kernel: whether the pseudo-operation should be added to - kernel (True) or rootfs (False) operations padding: stuff to dump past the normal data blobs provided (optional) Raises: @@ -343,17 +343,6 @@ def WriteToFileWithData(self, file_obj, sigs_data=None, # Update the payload with proper signature attributes. self.SetSignatures(self.curr_offset, sigs_len) - # Add a pseudo-operation to account for the signature blob, if requested. - if do_add_pseudo_operation: - if not self.block_size: - raise TestError('cannot add pseudo-operation without knowing the ' - 'payload block size') - self.AddOperation( - is_pseudo_in_kernel, common.OpType.REPLACE, - data_offset=self.curr_offset, data_length=sigs_len, - dst_extents=[(common.PSEUDO_EXTENT_MARKER, - (sigs_len + self.block_size - 1) / self.block_size)]) - if do_generate_sigs_data: # Once all payload fields are updated, dump and sign it. temp_payload_file = cStringIO.StringIO() diff --git a/scripts/update_payload/update_metadata_pb2.py b/scripts/update_payload/update_metadata_pb2.py index 62756423..907cc18a 100644 --- a/scripts/update_payload/update_metadata_pb2.py +++ b/scripts/update_payload/update_metadata_pb2.py @@ -20,7 +20,7 @@ package='chromeos_update_engine', syntax='proto2', serialized_options=_b('H\003'), - serialized_pb=_b('\n\x15update_metadata.proto\x12\x16\x63hromeos_update_engine\"1\n\x06\x45xtent\x12\x13\n\x0bstart_block\x18\x01 \x01(\x04\x12\x12\n\nnum_blocks\x18\x02 \x01(\x04\"z\n\nSignatures\x12@\n\nsignatures\x18\x01 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x1a*\n\tSignature\x12\x0f\n\x07version\x18\x01 \x01(\r\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"+\n\rPartitionInfo\x12\x0c\n\x04size\x18\x01 \x01(\x04\x12\x0c\n\x04hash\x18\x02 \x01(\x0c\"w\n\tImageInfo\x12\r\n\x05\x62oard\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\x0f\n\x07\x63hannel\x18\x03 \x01(\t\x12\x0f\n\x07version\x18\x04 \x01(\t\x12\x15\n\rbuild_channel\x18\x05 \x01(\t\x12\x15\n\rbuild_version\x18\x06 \x01(\t\"\xd0\x03\n\x10InstallOperation\x12;\n\x04type\x18\x01 \x02(\x0e\x32-.chromeos_update_engine.InstallOperation.Type\x12\x13\n\x0b\x64\x61ta_offset\x18\x02 \x01(\x04\x12\x13\n\x0b\x64\x61ta_length\x18\x03 \x01(\x04\x12\x33\n\x0bsrc_extents\x18\x04 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\nsrc_length\x18\x05 \x01(\x04\x12\x33\n\x0b\x64st_extents\x18\x06 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\ndst_length\x18\x07 \x01(\x04\x12\x18\n\x10\x64\x61ta_sha256_hash\x18\x08 \x01(\x0c\x12\x17\n\x0fsrc_sha256_hash\x18\t \x01(\x0c\"\x8f\x01\n\x04Type\x12\x0b\n\x07REPLACE\x10\x00\x12\x0e\n\nREPLACE_BZ\x10\x01\x12\x0f\n\x0bSOURCE_COPY\x10\x04\x12\x11\n\rSOURCE_BSDIFF\x10\x05\x12\x0e\n\nREPLACE_XZ\x10\x08\x12\x08\n\x04ZERO\x10\x06\x12\x0b\n\x07\x44ISCARD\x10\x07\x12\x11\n\rBROTLI_BSDIFF\x10\n\x12\x0c\n\x08PUFFDIFF\x10\t\"\xd7\x05\n\x0fPartitionUpdate\x12\x16\n\x0epartition_name\x18\x01 \x02(\t\x12\x17\n\x0frun_postinstall\x18\x02 \x01(\x08\x12\x18\n\x10postinstall_path\x18\x03 \x01(\t\x12\x17\n\x0f\x66ilesystem_type\x18\x04 \x01(\t\x12M\n\x17new_partition_signature\x18\x05 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x12\x41\n\x12old_partition_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x41\n\x12new_partition_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12<\n\noperations\x18\x08 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x1c\n\x14postinstall_optional\x18\t \x01(\x08\x12=\n\x15hash_tree_data_extent\x18\n \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x38\n\x10hash_tree_extent\x18\x0b \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x1b\n\x13hash_tree_algorithm\x18\x0c \x01(\t\x12\x16\n\x0ehash_tree_salt\x18\r \x01(\x0c\x12\x37\n\x0f\x66\x65\x63_data_extent\x18\x0e \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x32\n\nfec_extent\x18\x0f \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x14\n\tfec_roots\x18\x10 \x01(\r:\x01\x32\"L\n\x15\x44ynamicPartitionGroup\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\x0c\n\x04size\x18\x02 \x01(\x04\x12\x17\n\x0fpartition_names\x18\x03 \x03(\t\"Y\n\x18\x44ynamicPartitionMetadata\x12=\n\x06groups\x18\x01 \x03(\x0b\x32-.chromeos_update_engine.DynamicPartitionGroup\"\xb1\x06\n\x14\x44\x65ltaArchiveManifest\x12\x44\n\x12install_operations\x18\x01 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12K\n\x19kernel_install_operations\x18\x02 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x18\n\nblock_size\x18\x03 \x01(\r:\x04\x34\x30\x39\x36\x12\x19\n\x11signatures_offset\x18\x04 \x01(\x04\x12\x17\n\x0fsignatures_size\x18\x05 \x01(\x04\x12>\n\x0fold_kernel_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fnew_kernel_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fold_rootfs_info\x18\x08 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fnew_rootfs_info\x18\t \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x39\n\x0eold_image_info\x18\n \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x39\n\x0enew_image_info\x18\x0b \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x18\n\rminor_version\x18\x0c \x01(\r:\x01\x30\x12;\n\npartitions\x18\r \x03(\x0b\x32\'.chromeos_update_engine.PartitionUpdate\x12\x15\n\rmax_timestamp\x18\x0e \x01(\x03\x12T\n\x1a\x64ynamic_partition_metadata\x18\x0f \x01(\x0b\x32\x30.chromeos_update_engine.DynamicPartitionMetadataB\x02H\x03') + serialized_pb=_b('\n\x15update_metadata.proto\x12\x16\x63hromeos_update_engine\"1\n\x06\x45xtent\x12\x13\n\x0bstart_block\x18\x01 \x01(\x04\x12\x12\n\nnum_blocks\x18\x02 \x01(\x04\"z\n\nSignatures\x12@\n\nsignatures\x18\x01 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x1a*\n\tSignature\x12\x0f\n\x07version\x18\x01 \x01(\r\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"+\n\rPartitionInfo\x12\x0c\n\x04size\x18\x01 \x01(\x04\x12\x0c\n\x04hash\x18\x02 \x01(\x0c\"w\n\tImageInfo\x12\r\n\x05\x62oard\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\x0f\n\x07\x63hannel\x18\x03 \x01(\t\x12\x0f\n\x07version\x18\x04 \x01(\t\x12\x15\n\rbuild_channel\x18\x05 \x01(\t\x12\x15\n\rbuild_version\x18\x06 \x01(\t\"\xee\x03\n\x10InstallOperation\x12;\n\x04type\x18\x01 \x02(\x0e\x32-.chromeos_update_engine.InstallOperation.Type\x12\x13\n\x0b\x64\x61ta_offset\x18\x02 \x01(\x04\x12\x13\n\x0b\x64\x61ta_length\x18\x03 \x01(\x04\x12\x33\n\x0bsrc_extents\x18\x04 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\nsrc_length\x18\x05 \x01(\x04\x12\x33\n\x0b\x64st_extents\x18\x06 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\ndst_length\x18\x07 \x01(\x04\x12\x18\n\x10\x64\x61ta_sha256_hash\x18\x08 \x01(\x0c\x12\x17\n\x0fsrc_sha256_hash\x18\t \x01(\x0c\"\xad\x01\n\x04Type\x12\x0b\n\x07REPLACE\x10\x00\x12\x0e\n\nREPLACE_BZ\x10\x01\x12\x0c\n\x04MOVE\x10\x02\x1a\x02\x08\x01\x12\x0e\n\x06\x42SDIFF\x10\x03\x1a\x02\x08\x01\x12\x0f\n\x0bSOURCE_COPY\x10\x04\x12\x11\n\rSOURCE_BSDIFF\x10\x05\x12\x0e\n\nREPLACE_XZ\x10\x08\x12\x08\n\x04ZERO\x10\x06\x12\x0b\n\x07\x44ISCARD\x10\x07\x12\x11\n\rBROTLI_BSDIFF\x10\n\x12\x0c\n\x08PUFFDIFF\x10\t\"\xd7\x05\n\x0fPartitionUpdate\x12\x16\n\x0epartition_name\x18\x01 \x02(\t\x12\x17\n\x0frun_postinstall\x18\x02 \x01(\x08\x12\x18\n\x10postinstall_path\x18\x03 \x01(\t\x12\x17\n\x0f\x66ilesystem_type\x18\x04 \x01(\t\x12M\n\x17new_partition_signature\x18\x05 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x12\x41\n\x12old_partition_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x41\n\x12new_partition_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12<\n\noperations\x18\x08 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x1c\n\x14postinstall_optional\x18\t \x01(\x08\x12=\n\x15hash_tree_data_extent\x18\n \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x38\n\x10hash_tree_extent\x18\x0b \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x1b\n\x13hash_tree_algorithm\x18\x0c \x01(\t\x12\x16\n\x0ehash_tree_salt\x18\r \x01(\x0c\x12\x37\n\x0f\x66\x65\x63_data_extent\x18\x0e \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x32\n\nfec_extent\x18\x0f \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x14\n\tfec_roots\x18\x10 \x01(\r:\x01\x32\"L\n\x15\x44ynamicPartitionGroup\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\x0c\n\x04size\x18\x02 \x01(\x04\x12\x17\n\x0fpartition_names\x18\x03 \x03(\t\"Y\n\x18\x44ynamicPartitionMetadata\x12=\n\x06groups\x18\x01 \x03(\x0b\x32-.chromeos_update_engine.DynamicPartitionGroup\"\xc9\x06\n\x14\x44\x65ltaArchiveManifest\x12H\n\x12install_operations\x18\x01 \x03(\x0b\x32(.chromeos_update_engine.InstallOperationB\x02\x18\x01\x12O\n\x19kernel_install_operations\x18\x02 \x03(\x0b\x32(.chromeos_update_engine.InstallOperationB\x02\x18\x01\x12\x18\n\nblock_size\x18\x03 \x01(\r:\x04\x34\x30\x39\x36\x12\x19\n\x11signatures_offset\x18\x04 \x01(\x04\x12\x17\n\x0fsignatures_size\x18\x05 \x01(\x04\x12\x42\n\x0fold_kernel_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x42\n\x0fnew_kernel_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x42\n\x0fold_rootfs_info\x18\x08 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x42\n\x0fnew_rootfs_info\x18\t \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x39\n\x0eold_image_info\x18\n \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x39\n\x0enew_image_info\x18\x0b \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x18\n\rminor_version\x18\x0c \x01(\r:\x01\x30\x12;\n\npartitions\x18\r \x03(\x0b\x32\'.chromeos_update_engine.PartitionUpdate\x12\x15\n\rmax_timestamp\x18\x0e \x01(\x03\x12T\n\x1a\x64ynamic_partition_metadata\x18\x0f \x01(\x0b\x32\x30.chromeos_update_engine.DynamicPartitionMetadataB\x02H\x03') ) @@ -40,38 +40,46 @@ serialized_options=None, type=None), _descriptor.EnumValueDescriptor( - name='SOURCE_COPY', index=2, number=4, + name='MOVE', index=2, number=2, + serialized_options=_b('\010\001'), + type=None), + _descriptor.EnumValueDescriptor( + name='BSDIFF', index=3, number=3, + serialized_options=_b('\010\001'), + type=None), + _descriptor.EnumValueDescriptor( + name='SOURCE_COPY', index=4, number=4, serialized_options=None, type=None), _descriptor.EnumValueDescriptor( - name='SOURCE_BSDIFF', index=3, number=5, + name='SOURCE_BSDIFF', index=5, number=5, serialized_options=None, type=None), _descriptor.EnumValueDescriptor( - name='REPLACE_XZ', index=4, number=8, + name='REPLACE_XZ', index=6, number=8, serialized_options=None, type=None), _descriptor.EnumValueDescriptor( - name='ZERO', index=5, number=6, + name='ZERO', index=7, number=6, serialized_options=None, type=None), _descriptor.EnumValueDescriptor( - name='DISCARD', index=6, number=7, + name='DISCARD', index=8, number=7, serialized_options=None, type=None), _descriptor.EnumValueDescriptor( - name='BROTLI_BSDIFF', index=7, number=10, + name='BROTLI_BSDIFF', index=9, number=10, serialized_options=None, type=None), _descriptor.EnumValueDescriptor( - name='PUFFDIFF', index=8, number=9, + name='PUFFDIFF', index=10, number=9, serialized_options=None, type=None), ], containing_type=None, serialized_options=None, serialized_start=712, - serialized_end=855, + serialized_end=885, ) _sym_db.RegisterEnumDescriptor(_INSTALLOPERATION_TYPE) @@ -370,7 +378,7 @@ oneofs=[ ], serialized_start=391, - serialized_end=855, + serialized_end=885, ) @@ -505,8 +513,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=858, - serialized_end=1585, + serialized_start=888, + serialized_end=1615, ) @@ -550,8 +558,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1587, - serialized_end=1663, + serialized_start=1617, + serialized_end=1693, ) @@ -581,8 +589,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1665, - serialized_end=1754, + serialized_start=1695, + serialized_end=1784, ) @@ -599,14 +607,14 @@ has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), + serialized_options=_b('\030\001'), file=DESCRIPTOR), _descriptor.FieldDescriptor( name='kernel_install_operations', full_name='chromeos_update_engine.DeltaArchiveManifest.kernel_install_operations', index=1, number=2, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), + serialized_options=_b('\030\001'), file=DESCRIPTOR), _descriptor.FieldDescriptor( name='block_size', full_name='chromeos_update_engine.DeltaArchiveManifest.block_size', index=2, number=3, type=13, cpp_type=3, label=1, @@ -634,28 +642,28 @@ has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), + serialized_options=_b('\030\001'), file=DESCRIPTOR), _descriptor.FieldDescriptor( name='new_kernel_info', full_name='chromeos_update_engine.DeltaArchiveManifest.new_kernel_info', index=6, number=7, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), + serialized_options=_b('\030\001'), file=DESCRIPTOR), _descriptor.FieldDescriptor( name='old_rootfs_info', full_name='chromeos_update_engine.DeltaArchiveManifest.old_rootfs_info', index=7, number=8, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), + serialized_options=_b('\030\001'), file=DESCRIPTOR), _descriptor.FieldDescriptor( name='new_rootfs_info', full_name='chromeos_update_engine.DeltaArchiveManifest.new_rootfs_info', index=8, number=9, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), + serialized_options=_b('\030\001'), file=DESCRIPTOR), _descriptor.FieldDescriptor( name='old_image_info', full_name='chromeos_update_engine.DeltaArchiveManifest.old_image_info', index=9, number=10, type=11, cpp_type=10, label=1, @@ -710,8 +718,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1757, - serialized_end=2574, + serialized_start=1787, + serialized_end=2628, ) _SIGNATURES_SIGNATURE.containing_type = _SIGNATURES @@ -823,4 +831,12 @@ DESCRIPTOR._options = None +_INSTALLOPERATION_TYPE.values_by_name["MOVE"]._options = None +_INSTALLOPERATION_TYPE.values_by_name["BSDIFF"]._options = None +_DELTAARCHIVEMANIFEST.fields_by_name['install_operations']._options = None +_DELTAARCHIVEMANIFEST.fields_by_name['kernel_install_operations']._options = None +_DELTAARCHIVEMANIFEST.fields_by_name['old_kernel_info']._options = None +_DELTAARCHIVEMANIFEST.fields_by_name['new_kernel_info']._options = None +_DELTAARCHIVEMANIFEST.fields_by_name['old_rootfs_info']._options = None +_DELTAARCHIVEMANIFEST.fields_by_name['new_rootfs_info']._options = None # @@protoc_insertion_point(module_scope) diff --git a/update_metadata.proto b/update_metadata.proto index 3382f849..40db6785 100644 --- a/update_metadata.proto +++ b/update_metadata.proto @@ -303,8 +303,8 @@ message DeltaArchiveManifest { // Only present in major version = 1. List of install operations for the // kernel and rootfs partitions. For major version = 2 see the |partitions| // field. - repeated InstallOperation install_operations = 1; - repeated InstallOperation kernel_install_operations = 2; + repeated InstallOperation install_operations = 1 [deprecated = true]; + repeated InstallOperation kernel_install_operations = 2 [deprecated = true]; // (At time of writing) usually 4096 optional uint32 block_size = 3 [default = 4096]; @@ -319,10 +319,10 @@ message DeltaArchiveManifest { // Only present in major version = 1. Partition metadata used to validate the // update. For major version = 2 see the |partitions| field. - optional PartitionInfo old_kernel_info = 6; - optional PartitionInfo new_kernel_info = 7; - optional PartitionInfo old_rootfs_info = 8; - optional PartitionInfo new_rootfs_info = 9; + optional PartitionInfo old_kernel_info = 6 [deprecated = true]; + optional PartitionInfo new_kernel_info = 7 [deprecated = true]; + optional PartitionInfo old_rootfs_info = 8 [deprecated = true]; + optional PartitionInfo new_rootfs_info = 9 [deprecated = true]; // old_image_info will only be present for delta images. optional ImageInfo old_image_info = 10; From c73cb8ce94c14270c7e932bedb1889f5f150f038 Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Thu, 24 Oct 2019 18:28:31 -0700 Subject: [PATCH 137/624] Setup android-base logging in sideload. Some depending modules (libsnapshot, etc.) uses logging library from libbase. Make sure those logs are dumped to /tmp/recovery.log. Test: sideload and view /tmp/recovery.log Change-Id: I1a27f6345482928684fb9e22494783d2c57016b0 --- Android.bp | 1 + sideload_logging_android.cc | 27 +++++++++++++++++++++++++++ sideload_logging_android.h | 25 +++++++++++++++++++++++++ sideload_main.cc | 2 ++ 4 files changed, 55 insertions(+) create mode 100644 sideload_logging_android.cc create mode 100644 sideload_logging_android.h diff --git a/Android.bp b/Android.bp index 54fd0c2c..d6f1090d 100644 --- a/Android.bp +++ b/Android.bp @@ -339,6 +339,7 @@ cc_binary { "metrics_reporter_stub.cc", "metrics_utils.cc", "network_selector_stub.cc", + "sideload_logging_android.cc", "sideload_main.cc", "update_attempter_android.cc", "update_boot_flags_action.cc", diff --git a/sideload_logging_android.cc b/sideload_logging_android.cc new file mode 100644 index 00000000..f82259f3 --- /dev/null +++ b/sideload_logging_android.cc @@ -0,0 +1,27 @@ +// +// Copyright (C) 2019 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include "update_engine/sideload_logging_android.h" + +#include + +namespace chromeos_update_engine { + +void SetupAndroidLogging(char* argv[]) { + android::base::InitLogging(argv, android::base::StdioLogger); +} + +} // namespace chromeos_update_engine diff --git a/sideload_logging_android.h b/sideload_logging_android.h new file mode 100644 index 00000000..0bb87146 --- /dev/null +++ b/sideload_logging_android.h @@ -0,0 +1,25 @@ +// +// Copyright (C) 2019 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#pragma once + +namespace chromeos_update_engine { + +// Some depending modules uses logging functions from android-base. +// Redirect android-base logging to stdio, which redirects to /tmp/recovery.log. +void SetupAndroidLogging(char* argv[]); + +} // namespace chromeos_update_engine diff --git a/sideload_main.cc b/sideload_main.cc index 818fa5c9..29d6f2ce 100644 --- a/sideload_main.cc +++ b/sideload_main.cc @@ -36,6 +36,7 @@ #include "update_engine/common/subprocess.h" #include "update_engine/common/terminator.h" #include "update_engine/common/utils.h" +#include "update_engine/sideload_logging_android.h" #include "update_engine/update_attempter_android.h" using std::string; @@ -196,6 +197,7 @@ int main(int argc, char** argv) { chromeos_update_engine::Terminator::Init(); chromeos_update_engine::SetupLogging(); + chromeos_update_engine::SetupAndroidLogging(argv); brillo::FlagHelper::Init(argc, argv, "Update Engine Sideload"); LOG(INFO) << "Update Engine Sideloading starting"; From 2c62c13b19d6b75217b9ee86f0993b03370f4239 Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Thu, 24 Oct 2019 14:53:40 -0700 Subject: [PATCH 138/624] Mount /metadata in sideload. In recovery, /metadata needs to be mounted before doing any updates (even regular ones) on VAB device. Test: sideload Bug: 140749209 Change-Id: Iad8ef63c572b7b6d3ad8be5ee7ec11001b337435 --- dynamic_partition_control_android.cc | 6 ++++++ dynamic_partition_control_android.h | 2 ++ 2 files changed, 8 insertions(+) diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc index e194670f..f017c9f9 100644 --- a/dynamic_partition_control_android.cc +++ b/dynamic_partition_control_android.cc @@ -213,6 +213,7 @@ bool DynamicPartitionControlAndroid::UnmapPartitionOnDeviceMapper( } void DynamicPartitionControlAndroid::CleanupInternal(bool wait) { + metadata_device_.reset(); if (mapped_devices_.empty()) { return; } @@ -356,6 +357,11 @@ bool DynamicPartitionControlAndroid::PreparePartitionsForUpdate( target_supports_snapshot_ = manifest.dynamic_partition_metadata().snapshot_enabled(); + if (GetVirtualAbFeatureFlag().IsEnabled()) { + metadata_device_ = snapshot_->EnsureMetadataMounted(); + TEST_AND_RETURN_FALSE(metadata_device_ != nullptr); + } + if (!update) return true; diff --git a/dynamic_partition_control_android.h b/dynamic_partition_control_android.h index d70a2aa0..8f45682d 100644 --- a/dynamic_partition_control_android.h +++ b/dynamic_partition_control_android.h @@ -23,6 +23,7 @@ #include #include +#include #include namespace chromeos_update_engine { @@ -115,6 +116,7 @@ class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { const FeatureFlag dynamic_partitions_; const FeatureFlag virtual_ab_; std::unique_ptr snapshot_; + std::unique_ptr metadata_device_; bool target_supports_snapshot_ = false; DISALLOW_COPY_AND_ASSIGN(DynamicPartitionControlAndroid); From 02513dc7b6666eae2c76f6c52cf6355ea4223b36 Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Wed, 30 Oct 2019 11:23:04 -0700 Subject: [PATCH 139/624] DynamicPartitionControlAndroid: remove unused arg Test: builds Change-Id: I6a8375951c3ba5560a5ee91231419301e58776bd --- dynamic_partition_control_android.cc | 6 +++--- dynamic_partition_control_android.h | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc index f017c9f9..bc60c578 100644 --- a/dynamic_partition_control_android.cc +++ b/dynamic_partition_control_android.cc @@ -63,7 +63,7 @@ constexpr std::chrono::milliseconds kMapTimeout{1000}; constexpr std::chrono::milliseconds kMapSnapshotTimeout{5000}; DynamicPartitionControlAndroid::~DynamicPartitionControlAndroid() { - CleanupInternal(false /* wait */); + CleanupInternal(); } static FeatureFlag GetFeatureFlag(const char* enable_prop, @@ -212,7 +212,7 @@ bool DynamicPartitionControlAndroid::UnmapPartitionOnDeviceMapper( return true; } -void DynamicPartitionControlAndroid::CleanupInternal(bool wait) { +void DynamicPartitionControlAndroid::CleanupInternal() { metadata_device_.reset(); if (mapped_devices_.empty()) { return; @@ -227,7 +227,7 @@ void DynamicPartitionControlAndroid::CleanupInternal(bool wait) { } void DynamicPartitionControlAndroid::Cleanup() { - CleanupInternal(true /* wait */); + CleanupInternal(); } bool DynamicPartitionControlAndroid::DeviceExists(const std::string& path) { diff --git a/dynamic_partition_control_android.h b/dynamic_partition_control_android.h index 8f45682d..07ce2810 100644 --- a/dynamic_partition_control_android.h +++ b/dynamic_partition_control_android.h @@ -87,7 +87,7 @@ class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { private: friend class DynamicPartitionControlAndroidTest; - void CleanupInternal(bool wait); + void CleanupInternal(); bool MapPartitionInternal(const std::string& super_device, const std::string& target_partition_name, uint32_t slot, From 4ad3af63309d9b9887723d6a0addf803186c9427 Mon Sep 17 00:00:00 2001 From: Tianjie Xu Date: Wed, 30 Oct 2019 11:59:45 -0700 Subject: [PATCH 140/624] Allow skipping timestamp check in certain debug conditions As proposed in go/ota-downgrade, the android auto team wants to install a full OTA with the older timestamp. We will only allow it on devices with userdebug build, and has the property "ro.ota.allow_downgrade" set. Data wipe is almost inevitable due to the probability of security patch level and database rollback. Test: set the property and install a full payload with older timestamp. Change-Id: I897bbb19bfec820340f791abf3c6d8138995fa90 --- common/fake_hardware.h | 2 ++ common/hardware_interface.h | 4 ++++ hardware_android.cc | 7 +++++++ hardware_android.h | 1 + hardware_chromeos.h | 1 + payload_consumer/delta_performer.cc | 6 +++++- 6 files changed, 20 insertions(+), 1 deletion(-) diff --git a/common/fake_hardware.h b/common/fake_hardware.h index 3e5a66e6..8da5326c 100644 --- a/common/fake_hardware.h +++ b/common/fake_hardware.h @@ -128,6 +128,8 @@ class FakeHardware : public HardwareInterface { int64_t GetBuildTimestamp() const override { return build_timestamp_; } + bool AllowDowngrade() const override { return false; } + bool GetFirstActiveOmahaPingSent() const override { return first_active_omaha_ping_sent_; } diff --git a/common/hardware_interface.h b/common/hardware_interface.h index 01405881..4a64c3e8 100644 --- a/common/hardware_interface.h +++ b/common/hardware_interface.h @@ -122,6 +122,10 @@ class HardwareInterface { // Returns the timestamp of the current OS build. virtual int64_t GetBuildTimestamp() const = 0; + // Returns true if the current OS build allows installing the payload with an + // older timestamp. + virtual bool AllowDowngrade() const = 0; + // Returns whether the first active ping was sent to Omaha at some point, and // that the value is persisted across recovery (and powerwash) once set with // |SetFirstActiveOmahaPingSent()|. diff --git a/hardware_android.cc b/hardware_android.cc index 21d46595..9611ba68 100644 --- a/hardware_android.cc +++ b/hardware_android.cc @@ -192,6 +192,13 @@ int64_t HardwareAndroid::GetBuildTimestamp() const { return GetIntProperty(kPropBuildDateUTC, 0); } +// Returns true if the device runs an userdebug build, and explicitly allows OTA +// downgrade. +bool HardwareAndroid::AllowDowngrade() const { + return GetBoolProperty("ro.ota.allow_downgrade", false) && + GetBoolProperty("ro.debuggable", false); +} + bool HardwareAndroid::GetFirstActiveOmahaPingSent() const { LOG(WARNING) << "STUB: Assuming first active omaha was never set."; return false; diff --git a/hardware_android.h b/hardware_android.h index 5b3c99d8..2a8f6692 100644 --- a/hardware_android.h +++ b/hardware_android.h @@ -53,6 +53,7 @@ class HardwareAndroid final : public HardwareInterface { bool GetNonVolatileDirectory(base::FilePath* path) const override; bool GetPowerwashSafeDirectory(base::FilePath* path) const override; int64_t GetBuildTimestamp() const override; + bool AllowDowngrade() const override; bool GetFirstActiveOmahaPingSent() const override; bool SetFirstActiveOmahaPingSent() override; diff --git a/hardware_chromeos.h b/hardware_chromeos.h index 8829866a..57be3b03 100644 --- a/hardware_chromeos.h +++ b/hardware_chromeos.h @@ -58,6 +58,7 @@ class HardwareChromeOS final : public HardwareInterface { bool GetNonVolatileDirectory(base::FilePath* path) const override; bool GetPowerwashSafeDirectory(base::FilePath* path) const override; int64_t GetBuildTimestamp() const override; + bool AllowDowngrade() const override { return false; } bool GetFirstActiveOmahaPingSent() const override; bool SetFirstActiveOmahaPingSent() override; diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc index 4aec00bf..8b3f61cf 100644 --- a/payload_consumer/delta_performer.cc +++ b/payload_consumer/delta_performer.cc @@ -1692,7 +1692,11 @@ ErrorCode DeltaPerformer::ValidateManifest() { << hardware_->GetBuildTimestamp() << ") is newer than the maximum timestamp in the manifest (" << manifest_.max_timestamp() << ")"; - return ErrorCode::kPayloadTimestampError; + if (!hardware_->AllowDowngrade()) { + return ErrorCode::kPayloadTimestampError; + } + LOG(INFO) << "The current OS build allows downgrade, continuing to apply" + " the payload with an older timestamp."; } if (major_payload_version_ == kChromeOSMajorPayloadVersion) { From 3f9be772bd1abcfe922fbebf78a846d38191bce9 Mon Sep 17 00:00:00 2001 From: Tianjie Xu Date: Sat, 2 Nov 2019 18:31:50 -0700 Subject: [PATCH 141/624] Allow update_device install the secondary payload Add an option to support installation of the secondary payload. This is used to verify an android factory OTA, where it has one step to install the secondary payload. The payload and its property file store under the secondary/ directory of the update package. Test: install a secondary payload from a factory OTA package Change-Id: I1d2e1d6945f1daa9afaab6af8ce9aad0f7c2100f --- scripts/update_device.py | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/scripts/update_device.py b/scripts/update_device.py index 5c19b89a..49f766da 100755 --- a/scripts/update_device.py +++ b/scripts/update_device.py @@ -83,17 +83,24 @@ class AndroidOTAPackage(object): # Android OTA package file paths. OTA_PAYLOAD_BIN = 'payload.bin' OTA_PAYLOAD_PROPERTIES_TXT = 'payload_properties.txt' + SECONDARY_OTA_PAYLOAD_BIN = 'secondary/payload.bin' + SECONDARY_OTA_PAYLOAD_PROPERTIES_TXT = 'secondary/payload_properties.txt' - def __init__(self, otafilename): + def __init__(self, otafilename, secondary_payload=False): self.otafilename = otafilename otazip = zipfile.ZipFile(otafilename, 'r') - payload_info = otazip.getinfo(self.OTA_PAYLOAD_BIN) + payload_entry = (self.SECONDARY_OTA_PAYLOAD_BIN if secondary_payload else + self.OTA_PAYLOAD_BIN) + payload_info = otazip.getinfo(payload_entry) self.offset = payload_info.header_offset self.offset += zipfile.sizeFileHeader self.offset += len(payload_info.extra) + len(payload_info.filename) self.size = payload_info.file_size - self.properties = otazip.read(self.OTA_PAYLOAD_PROPERTIES_TXT) + + property_entry = (self.SECONDARY_OTA_PAYLOAD_PROPERTIES_TXT if + secondary_payload else self.OTA_PAYLOAD_PROPERTIES_TXT) + self.properties = otazip.read(property_entry) class UpdateHandler(BaseHTTPServer.BaseHTTPRequestHandler): @@ -278,9 +285,9 @@ def StartServer(ota_filename, serving_range): return t -def AndroidUpdateCommand(ota_filename, payload_url, extra_headers): +def AndroidUpdateCommand(ota_filename, secondary, payload_url, extra_headers): """Return the command to run to start the update in the Android device.""" - ota = AndroidOTAPackage(ota_filename) + ota = AndroidOTAPackage(ota_filename, secondary) headers = ota.properties headers += 'USER_AGENT=Dalvik (something, something)\n' headers += 'NETWORK_ID=0\n' @@ -363,6 +370,8 @@ def main(): help='Override the public key used to verify payload.') parser.add_argument('--extra-headers', type=str, default='', help='Extra headers to pass to the device.') + parser.add_argument('--secondary', action='store_true', + help='Update with the secondary payload in the package.') args = parser.parse_args() logging.basicConfig( level=logging.WARNING if args.no_verbose else logging.INFO) @@ -398,7 +407,7 @@ def main(): # command. payload_url = 'http://127.0.0.1:%d/payload' % DEVICE_PORT if use_omaha and zipfile.is_zipfile(args.otafile): - ota = AndroidOTAPackage(args.otafile) + ota = AndroidOTAPackage(args.otafile, args.secondary) serving_range = (ota.offset, ota.size) else: serving_range = (0, os.stat(args.otafile).st_size) @@ -426,8 +435,8 @@ def main(): update_cmd = \ OmahaUpdateCommand('http://127.0.0.1:%d/update' % DEVICE_PORT) else: - update_cmd = \ - AndroidUpdateCommand(args.otafile, payload_url, args.extra_headers) + update_cmd = AndroidUpdateCommand(args.otafile, args.secondary, + payload_url, args.extra_headers) cmds.append(['shell', 'su', '0'] + update_cmd) for cmd in cmds: From 2f1ca0367fc818506f86ed95e61b9fe3ec619c6c Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Fri, 1 Nov 2019 22:28:01 +0000 Subject: [PATCH 142/624] Partially revert "Sanity check that no downgrade package on launch VAB device." This partially reverts commit 6e0d0ef979d2d0dd99c586f83fd7edbf356c63c3. Reason for revert: This breaks factory OTA on launch VAB device where snapshot_enabled is set to false. Bug: 138733621 Change-Id: Iee0aa33208238652a55b6d9a6a50f12ba6efde17 Test: manual --- dynamic_partition_control_android.cc | 7 ------- 1 file changed, 7 deletions(-) diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc index bc60c578..c641a6b3 100644 --- a/dynamic_partition_control_android.cc +++ b/dynamic_partition_control_android.cc @@ -375,13 +375,6 @@ bool DynamicPartitionControlAndroid::PreparePartitionsForUpdate( return PrepareSnapshotPartitionsForUpdate( source_slot, target_slot, manifest); } - - if (GetVirtualAbFeatureFlag().IsLaunch() && !target_supports_snapshot_) { - LOG(ERROR) << "Cannot downgrade to a build that does not support " - << "snapshots because this device launches with Virtual A/B."; - return false; - } - if (!snapshot_->CancelUpdate()) { LOG(ERROR) << "Cannot cancel previous update."; return false; From 2cbb069d65b1da20b3323022e12fe863bc0b98d0 Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Wed, 30 Oct 2019 13:36:17 -0700 Subject: [PATCH 143/624] update_engine: Deprecate EOL Status This is not used anymore. It has been replaced by date based EOL. It has been deprecated from Chrome per CL:1891467 BUG=chromium:1005511 TEST=sudo FEATURES=test emerge update_engine-client update_engine Change-Id: I047ca8d641906208728e89adb7c430b71ab11239 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/1891571 Tested-by: Amin Hassani Commit-Queue: Amin Hassani Reviewed-by: Jae Hoon Kim --- UpdateEngine.conf | 3 -- client_library/client_dbus.cc | 4 -- client_library/client_dbus.h | 2 - client_library/include/update_engine/client.h | 3 -- common/constants.cc | 1 - common/constants.h | 1 - common_service.cc | 16 ------ common_service.h | 4 -- common_service_unittest.cc | 15 ------ ...rg.chromium.UpdateEngineInterface.dbus-xml | 3 -- dbus_service.cc | 5 -- dbus_service.h | 3 -- omaha_request_action.cc | 26 +++------- omaha_request_action.h | 8 ++- omaha_request_action_unittest.cc | 50 ++++--------------- omaha_utils.cc | 29 ----------- omaha_utils.h | 19 ------- omaha_utils_unittest.cc | 16 ------ update_engine_client.cc | 1 - 19 files changed, 18 insertions(+), 191 deletions(-) diff --git a/UpdateEngine.conf b/UpdateEngine.conf index 42f73fc3..e01c673d 100644 --- a/UpdateEngine.conf +++ b/UpdateEngine.conf @@ -84,9 +84,6 @@ - diff --git a/client_library/client_dbus.cc b/client_library/client_dbus.cc index d1d6cc01..3497ce08 100644 --- a/client_library/client_dbus.cc +++ b/client_library/client_dbus.cc @@ -272,9 +272,5 @@ bool DBusUpdateEngineClient::GetLastAttemptError( return proxy_->GetLastAttemptError(last_attempt_error, nullptr); } -bool DBusUpdateEngineClient::GetEolStatus(int32_t* eol_status) const { - return proxy_->GetEolStatus(eol_status, nullptr); -} - } // namespace internal } // namespace update_engine diff --git a/client_library/client_dbus.h b/client_library/client_dbus.h index c9631cf7..e964399b 100644 --- a/client_library/client_dbus.h +++ b/client_library/client_dbus.h @@ -85,8 +85,6 @@ class DBusUpdateEngineClient : public UpdateEngineClient { bool GetLastAttemptError(int32_t* last_attempt_error) const override; - bool GetEolStatus(int32_t* eol_status) const override; - private: void DBusStatusHandlersRegistered(const std::string& interface, const std::string& signal_name, diff --git a/client_library/include/update_engine/client.h b/client_library/include/update_engine/client.h index 89f36af6..1a0461c0 100644 --- a/client_library/include/update_engine/client.h +++ b/client_library/include/update_engine/client.h @@ -135,9 +135,6 @@ class UpdateEngineClient { // Get the last UpdateAttempt error code. virtual bool GetLastAttemptError(int32_t* last_attempt_error) const = 0; - // Get the current end-of-life status code. See EolStatus enum for details. - virtual bool GetEolStatus(int32_t* eol_status) const = 0; - protected: // Use CreateInstance(). UpdateEngineClient() = default; diff --git a/common/constants.cc b/common/constants.cc index 64bdf0cf..d779dd44 100644 --- a/common/constants.cc +++ b/common/constants.cc @@ -56,7 +56,6 @@ const char kPrefsOmahaCohort[] = "omaha-cohort"; const char kPrefsOmahaCohortHint[] = "omaha-cohort-hint"; const char kPrefsOmahaCohortName[] = "omaha-cohort-name"; const char kPrefsOmahaEolDate[] = "omaha-eol-date"; -const char kPrefsOmahaEolStatus[] = "omaha-eol-status"; const char kPrefsP2PEnabled[] = "p2p-enabled"; const char kPrefsP2PFirstAttemptTimestamp[] = "p2p-first-attempt-timestamp"; const char kPrefsP2PNumAttempts[] = "p2p-num-attempts"; diff --git a/common/constants.h b/common/constants.h index 23c9003d..8685f7e7 100644 --- a/common/constants.h +++ b/common/constants.h @@ -57,7 +57,6 @@ extern const char kPrefsOmahaCohort[]; extern const char kPrefsOmahaCohortHint[]; extern const char kPrefsOmahaCohortName[]; extern const char kPrefsOmahaEolDate[]; -extern const char kPrefsOmahaEolStatus[]; extern const char kPrefsP2PEnabled[]; extern const char kPrefsP2PFirstAttemptTimestamp[]; extern const char kPrefsP2PNumAttempts[]; diff --git a/common_service.cc b/common_service.cc index 0d5ee6dc..d520cf13 100644 --- a/common_service.cc +++ b/common_service.cc @@ -412,20 +412,4 @@ bool UpdateEngineService::GetLastAttemptError(ErrorPtr* /* error */, return true; } -bool UpdateEngineService::GetEolStatus(ErrorPtr* error, - int32_t* out_eol_status) { - PrefsInterface* prefs = system_state_->prefs(); - - string str_eol_status; - if (prefs->Exists(kPrefsOmahaEolStatus) && - !prefs->GetString(kPrefsOmahaEolStatus, &str_eol_status)) { - LogAndSetError(error, FROM_HERE, "Error getting the end-of-life status."); - return false; - } - - // StringToEolStatus will return kSupported for invalid values. - *out_eol_status = static_cast(StringToEolStatus(str_eol_status)); - return true; -} - } // namespace chromeos_update_engine diff --git a/common_service.h b/common_service.h index f93855d9..3349244e 100644 --- a/common_service.h +++ b/common_service.h @@ -153,10 +153,6 @@ class UpdateEngineService { bool GetLastAttemptError(brillo::ErrorPtr* error, int32_t* out_last_attempt_error); - // Returns the current end-of-life status of the device. This value is updated - // on every update check and persisted on disk across reboots. - bool GetEolStatus(brillo::ErrorPtr* error, int32_t* out_eol_status); - private: SystemState* system_state_; }; diff --git a/common_service_unittest.cc b/common_service_unittest.cc index 65202a06..00c4357e 100644 --- a/common_service_unittest.cc +++ b/common_service_unittest.cc @@ -169,19 +169,4 @@ TEST_F(UpdateEngineServiceTest, ResetStatusFails) { UpdateEngineService::kErrorFailed)); } -TEST_F(UpdateEngineServiceTest, GetEolStatusTest) { - FakePrefs fake_prefs; - fake_system_state_.set_prefs(&fake_prefs); - // The default value should be "supported". - int32_t eol_status = static_cast(EolStatus::kEol); - EXPECT_TRUE(common_service_.GetEolStatus(&error_, &eol_status)); - EXPECT_EQ(nullptr, error_); - EXPECT_EQ(EolStatus::kSupported, static_cast(eol_status)); - - fake_prefs.SetString(kPrefsOmahaEolStatus, "security-only"); - EXPECT_TRUE(common_service_.GetEolStatus(&error_, &eol_status)); - EXPECT_EQ(nullptr, error_); - EXPECT_EQ(EolStatus::kSecurityOnly, static_cast(eol_status)); -} - } // namespace chromeos_update_engine diff --git a/dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml b/dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml index a1831476..b9e0a479 100644 --- a/dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml +++ b/dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml @@ -137,8 +137,5 @@ - - - diff --git a/dbus_service.cc b/dbus_service.cc index 065fe0c3..72e11eba 100644 --- a/dbus_service.cc +++ b/dbus_service.cc @@ -213,11 +213,6 @@ bool DBusUpdateEngineService::GetLastAttemptError( return common_->GetLastAttemptError(error, out_last_attempt_error); } -bool DBusUpdateEngineService::GetEolStatus(ErrorPtr* error, - int32_t* out_eol_status) { - return common_->GetEolStatus(error, out_eol_status); -} - UpdateEngineAdaptor::UpdateEngineAdaptor(SystemState* system_state) : org::chromium::UpdateEngineInterfaceAdaptor(&dbus_service_), bus_(DBusConnection::Get()->GetDBus()), diff --git a/dbus_service.h b/dbus_service.h index 2babf8c7..4ea1a532 100644 --- a/dbus_service.h +++ b/dbus_service.h @@ -157,9 +157,6 @@ class DBusUpdateEngineService bool GetLastAttemptError(brillo::ErrorPtr* error, int32_t* out_last_attempt_error) override; - // Returns the current end-of-life status of the device in |out_eol_status|. - bool GetEolStatus(brillo::ErrorPtr* error, int32_t* out_eol_status) override; - private: std::unique_ptr common_; }; diff --git a/omaha_request_action.cc b/omaha_request_action.cc index 7ca43720..6ebab0d1 100644 --- a/omaha_request_action.cc +++ b/omaha_request_action.cc @@ -108,7 +108,7 @@ constexpr char kValPostInstall[] = "postinstall"; constexpr char kValNoUpdate[] = "noupdate"; // updatecheck attributes (without the underscore prefix). -constexpr char kAttrEol[] = "eol"; +// Deprecated: "eol" constexpr char kAttrEolDate[] = "eol_date"; constexpr char kAttrRollback[] = "rollback"; constexpr char kAttrFirmwareVersion[] = "firmware_version"; @@ -699,8 +699,8 @@ bool OmahaRequestAction::ParseResponse(OmahaParserData* parser_data, } } - // Parse the updatecheck attributes. - PersistEolStatus(parser_data->updatecheck_attrs); + PersistEolInfo(parser_data->updatecheck_attrs); + // Rollback-related updatecheck attributes. // Defaults to false if attribute is not present. output_object->is_rollback = @@ -1317,30 +1317,16 @@ bool OmahaRequestAction::PersistCohortData(const string& prefs_key, return true; } -bool OmahaRequestAction::PersistEolStatus(const map& attrs) { - bool ret = true; - - // Set EOL date. +bool OmahaRequestAction::PersistEolInfo(const map& attrs) { auto eol_date_attr = attrs.find(kAttrEolDate); if (eol_date_attr == attrs.end()) { system_state_->prefs()->Delete(kPrefsOmahaEolDate); } else if (!system_state_->prefs()->SetString(kPrefsOmahaEolDate, eol_date_attr->second)) { LOG(ERROR) << "Setting EOL date failed."; - ret = false; - } - - // Set EOL. - auto eol_attr = attrs.find(kAttrEol); - if (eol_attr == attrs.end()) { - system_state_->prefs()->Delete(kPrefsOmahaEolStatus); - } else if (!system_state_->prefs()->SetString(kPrefsOmahaEolStatus, - eol_attr->second)) { - LOG(ERROR) << "Setting EOL status failed."; - ret = false; + return false; } - - return ret; + return true; } void OmahaRequestAction::ActionCompleted(ErrorCode code) { diff --git a/omaha_request_action.h b/omaha_request_action.h index 96f09e92..3f66de9b 100644 --- a/omaha_request_action.h +++ b/omaha_request_action.h @@ -183,11 +183,9 @@ class OmahaRequestAction : public Action, bool PersistCohortData(const std::string& prefs_key, const std::string& new_value); - // Parse and persist the end-of-life status flag sent back in the updatecheck - // tag attributes. In addition, the optional end-of-life date flag will also - // be parsed and persisted. The flags will be validated and stored in the - // Prefs. - bool PersistEolStatus(const std::map& attrs); + // Parses and persists the end-of-life date flag sent back in the updatecheck + // tag attributes. The flags will be validated and stored in the Prefs. + bool PersistEolInfo(const std::map& attrs); // If this is an update check request, initializes // |ping_active_days_| and |ping_roll_call_days_| to values that may diff --git a/omaha_request_action_unittest.cc b/omaha_request_action_unittest.cc index 94d5152a..b66375f2 100644 --- a/omaha_request_action_unittest.cc +++ b/omaha_request_action_unittest.cc @@ -1992,27 +1992,6 @@ TEST_F(OmahaRequestActionTest, BadElapsedSecondsTest) { ASSERT_TRUE(TestUpdateCheck()); } -TEST_F(OmahaRequestActionTest, ParseUpdateCheckAttributesTest) { - // Test that the "eol" flags is only parsed from the "_eol" attribute and not - // the "eol" attribute. - tuc_params_.http_response = - "" - ""; - tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable; - tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; - - ASSERT_TRUE(TestUpdateCheck()); - - string eol_pref; - EXPECT_TRUE( - fake_system_state_.prefs()->GetString(kPrefsOmahaEolStatus, &eol_pref)); - // Note that the eol="eol" attribute should be ignored and the _eol should be - // used instead. - EXPECT_EQ("security-only", eol_pref); -} - TEST_F(OmahaRequestActionTest, NoUniqueIDTest) { tuc_params_.http_response = "invalid xml>"; tuc_params_.expected_code = ErrorCode::kOmahaRequestXMLParseError; @@ -2810,62 +2789,51 @@ TEST_F(OmahaRequestActionTest, NoIncludeRequisitionTest) { EXPECT_EQ(string::npos, post_str.find("requisition")); } -TEST_F(OmahaRequestActionTest, PersistEolDatesTest) { +TEST_F(OmahaRequestActionTest, PersistEolDateTest) { tuc_params_.http_response = "" ""; + "_eol_date=\"200\" _foo=\"bar\"/>"; tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable; tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; ASSERT_TRUE(TestUpdateCheck()); - string eol, eol_date; - EXPECT_TRUE( - fake_system_state_.prefs()->GetString(kPrefsOmahaEolStatus, &eol)); - EXPECT_EQ(kEolStatusSupported, eol); + string eol_date; EXPECT_TRUE( fake_system_state_.prefs()->GetString(kPrefsOmahaEolDate, &eol_date)); EXPECT_EQ("200", eol_date); } -TEST_F(OmahaRequestActionTest, PersistEolMissingDatesTest) { +TEST_F(OmahaRequestActionTest, PersistEolMissingDateTest) { tuc_params_.http_response = "" ""; + "_foo=\"bar\"/>"; tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable; tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; ASSERT_TRUE(TestUpdateCheck()); - string eol, eol_date; - EXPECT_TRUE( - fake_system_state_.prefs()->GetString(kPrefsOmahaEolStatus, &eol)); - EXPECT_EQ(kEolStatusSupported, eol); + string eol_date; EXPECT_FALSE( fake_system_state_.prefs()->GetString(kPrefsOmahaEolDate, &eol_date)); } -TEST_F(OmahaRequestActionTest, PersistEolBadDatesTest) { +TEST_F(OmahaRequestActionTest, PersistEolBadDateTest) { tuc_params_.http_response = "" ""; + "_eol_date=\"bad\" foo=\"bar\"/>"; tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable; tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; ASSERT_TRUE(TestUpdateCheck()); - string eol, eol_date; - EXPECT_TRUE( - fake_system_state_.prefs()->GetString(kPrefsOmahaEolStatus, &eol)); - EXPECT_EQ(kEolStatusSupported, eol); + string eol_date; EXPECT_TRUE( fake_system_state_.prefs()->GetString(kPrefsOmahaEolDate, &eol_date)); EXPECT_EQ(kEolDateInvalid, StringToEolDate(eol_date)); diff --git a/omaha_utils.cc b/omaha_utils.cc index f9ec85ac..18a99cea 100644 --- a/omaha_utils.cc +++ b/omaha_utils.cc @@ -21,37 +21,8 @@ namespace chromeos_update_engine { -const char kEolStatusSupported[] = "supported"; -const char kEolStatusSecurityOnly[] = "security-only"; -const char kEolStatusEol[] = "eol"; - const EolDate kEolDateInvalid = -9999; -const char* EolStatusToString(EolStatus eol_status) { - switch (eol_status) { - case EolStatus::kSupported: - return kEolStatusSupported; - case EolStatus::kSecurityOnly: - return kEolStatusSecurityOnly; - case EolStatus::kEol: - return kEolStatusEol; - } - // Only reached if an invalid number is casted to |EolStatus|. - LOG(WARNING) << "Invalid EolStatus value: " << static_cast(eol_status); - return kEolStatusSupported; -} - -EolStatus StringToEolStatus(const std::string& eol_status) { - if (eol_status == kEolStatusSupported || eol_status.empty()) - return EolStatus::kSupported; - if (eol_status == kEolStatusSecurityOnly) - return EolStatus::kSecurityOnly; - if (eol_status == kEolStatusEol) - return EolStatus::kEol; - LOG(WARNING) << "Invalid end-of-life attribute: " << eol_status; - return EolStatus::kSupported; -} - std::string EolDateToString(EolDate eol_date) { #if BASE_VER < 576279 return base::Int64ToString(eol_date); diff --git a/omaha_utils.h b/omaha_utils.h index 128232af..458bf9eb 100644 --- a/omaha_utils.h +++ b/omaha_utils.h @@ -26,25 +26,6 @@ using EolDate = int64_t; // |EolDate| indicating an invalid end-of-life date. extern const EolDate kEolDateInvalid; -// The possible string values for the end-of-life status. -extern const char kEolStatusSupported[]; -extern const char kEolStatusSecurityOnly[]; -extern const char kEolStatusEol[]; - -// The end-of-life status of the device. -enum class EolStatus { - kSupported = 0, - kSecurityOnly, - kEol, -}; - -// Returns the string representation of the |eol_status|. -const char* EolStatusToString(EolStatus eol_status); - -// Converts the end-of-life status string to an EolStatus numeric value. In case -// of an invalid string, the default "supported" value will be used instead. -EolStatus StringToEolStatus(const std::string& eol_status); - // Returns the string representation of the |eol_date|. std::string EolDateToString(EolDate eol_date); diff --git a/omaha_utils_unittest.cc b/omaha_utils_unittest.cc index ccb9578d..849905aa 100644 --- a/omaha_utils_unittest.cc +++ b/omaha_utils_unittest.cc @@ -23,22 +23,6 @@ namespace chromeos_update_engine { class OmahaUtilsTest : public ::testing::Test {}; -TEST(OmahaUtilsTest, EolStatusTest) { - EXPECT_EQ(EolStatus::kEol, StringToEolStatus("eol")); - - // Supported values are converted back and forth properly. - const std::vector tests = { - EolStatus::kSupported, EolStatus::kSecurityOnly, EolStatus::kEol}; - for (EolStatus eol_status : tests) { - EXPECT_EQ(eol_status, StringToEolStatus(EolStatusToString(eol_status))) - << "The StringToEolStatus() was " << EolStatusToString(eol_status); - } - - // Invalid values are assumed as "supported". - EXPECT_EQ(EolStatus::kSupported, StringToEolStatus("")); - EXPECT_EQ(EolStatus::kSupported, StringToEolStatus("hello, world!")); -} - TEST(OmahaUtilsTest, EolDateTest) { // Supported values are converted back and forth properly. const std::vector tests = {kEolDateInvalid, -1, 0, 1}; diff --git a/update_engine_client.cc b/update_engine_client.cc index 7b5c4df1..e78eccfa 100644 --- a/update_engine_client.cc +++ b/update_engine_client.cc @@ -44,7 +44,6 @@ using brillo::KeyValueStore; using chromeos_update_engine::EolDate; using chromeos_update_engine::EolDateToString; -using chromeos_update_engine::EolStatus; using chromeos_update_engine::ErrorCode; using chromeos_update_engine::UpdateEngineStatusToString; using chromeos_update_engine::UpdateStatusToString; From 8683ed40d94c29da0d755e4427ec53dfe49daa02 Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Fri, 1 Nov 2019 16:58:29 -0700 Subject: [PATCH 144/624] Update keys in dynamic_partitions_info.txt. These key names have been changed to match the same keys in misc_info.txt. Read new key names as well. Test: m otapackage -j Change-Id: I70e009f2e5668863fd54db45b2b230c7f4a8e896 --- payload_generator/payload_generation_config.cc | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/payload_generator/payload_generation_config.cc b/payload_generator/payload_generation_config.cc index 2dd2626e..88cca302 100644 --- a/payload_generator/payload_generation_config.cc +++ b/payload_generator/payload_generation_config.cc @@ -141,19 +141,23 @@ bool ImageConfig::LoadDynamicPartitionMetadata( for (const auto& group_name : group_names) { DynamicPartitionGroup* group = metadata->add_groups(); group->set_name(group_name); - if (!store.GetString(group_name + "_size", &buf)) { - LOG(ERROR) << "Missing " << group_name + "_size."; + if (!store.GetString("super_" + group_name + "_group_size", &buf) && + !store.GetString(group_name + "_size", &buf)) { + LOG(ERROR) << "Missing super_" << group_name + "_group_size or " + << group_name << "_size."; return false; } uint64_t max_size; if (!base::StringToUint64(buf, &max_size)) { - LOG(ERROR) << group_name << "_size=" << buf << " is not an integer."; + LOG(ERROR) << "Group size for " << group_name << " = " << buf + << " is not an integer."; return false; } group->set_size(max_size); - if (store.GetString(group_name + "_partition_list", &buf)) { + if (store.GetString("super_" + group_name + "_partition_list", &buf) || + store.GetString(group_name + "_partition_list", &buf)) { auto partition_names = brillo::string_utils::Split(buf, " "); for (const auto& partition_name : partition_names) { group->add_partition_names()->assign(partition_name); From 3a4caa13a26ad50e6b742b881baba0a2eed49bd0 Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Wed, 6 Nov 2019 11:12:28 -0800 Subject: [PATCH 145/624] update_engine: pipe stderr individually in SycnronousExec Currently, SyncronousExec pipes stderr to stdout which is fine but not ideal. Specially we have an issue with vpd_get_value script that exits with 0 even with underlying failures. This is problematic, because we get the combined stdout/stderr of the command and since the exit code is 0 we assume the output is correct. Then, we create the XML request based on this output but with stderr combined (too much junk) as the value of an XML attribute. This causes update failure. Fortunately, vpd_get_value correctly separates its children's stderr and stdout. So as long as we don't combine both stdout and stderr into one stream, this error wil not happen again anymore. Also a few other nitpicks in this CL: - Constructing the command for shutdown using simpler syntax. - Logging the command before running it for all external subprocess runs. BUG=chromium:1010306 TEST=sudo FEATURES=test emerge update_engine Change-Id: Ia620afed814e4fe9ba24b1a0ad01680481c6ba7c Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/1901886 Tested-by: Amin Hassani Reviewed-by: Andrew Lassalle Reviewed-by: Jae Hoon Kim Commit-Queue: Amin Hassani --- common/subprocess.cc | 57 +++++++++++++++--------- common/subprocess.h | 8 ++-- common/subprocess_unittest.cc | 11 ++--- hardware_chromeos.cc | 33 +++++++++----- p2p_manager.cc | 6 +-- payload_generator/squashfs_filesystem.cc | 14 +++--- update_attempter.cc | 8 +--- 7 files changed, 82 insertions(+), 55 deletions(-) diff --git a/common/subprocess.cc b/common/subprocess.cc index 36655c7e..52065030 100644 --- a/common/subprocess.cc +++ b/common/subprocess.cc @@ -95,6 +95,7 @@ bool LaunchProcess(const vector& cmd, proc->RedirectUsingPipe(STDOUT_FILENO, false); proc->SetPreExecCallback(base::Bind(&SetupChild, env, flags)); + LOG(INFO) << "Running \"" << base::JoinString(cmd, " ") << "\""; return proc->Start(); } @@ -229,22 +230,20 @@ int Subprocess::GetPipeFd(pid_t pid, int fd) const { bool Subprocess::SynchronousExec(const vector& cmd, int* return_code, - string* stdout) { - // The default for SynchronousExec is to use kSearchPath since the code relies - // on that. - return SynchronousExecFlags( - cmd, kRedirectStderrToStdout | kSearchPath, return_code, stdout); + string* stdout, + string* stderr) { + // The default for |SynchronousExec| is to use |kSearchPath| since the code + // relies on that. + return SynchronousExecFlags(cmd, kSearchPath, return_code, stdout, stderr); } bool Subprocess::SynchronousExecFlags(const vector& cmd, uint32_t flags, int* return_code, - string* stdout) { + string* stdout, + string* stderr) { brillo::ProcessImpl proc; - // It doesn't make sense to redirect some pipes in the synchronous case - // because we won't be reading on our end, so we don't expose the output_pipes - // in this case. - if (!LaunchProcess(cmd, flags, {}, &proc)) { + if (!LaunchProcess(cmd, flags, {STDERR_FILENO}, &proc)) { LOG(ERROR) << "Failed to launch subprocess"; return false; } @@ -252,21 +251,39 @@ bool Subprocess::SynchronousExecFlags(const vector& cmd, if (stdout) { stdout->clear(); } + if (stderr) { + stderr->clear(); + } - int fd = proc.GetPipe(STDOUT_FILENO); + // Read from both stdout and stderr individually. + int stdout_fd = proc.GetPipe(STDOUT_FILENO); + int stderr_fd = proc.GetPipe(STDERR_FILENO); vector buffer(32 * 1024); - while (true) { - int rc = HANDLE_EINTR(read(fd, buffer.data(), buffer.size())); - if (rc < 0) { - PLOG(ERROR) << "Reading from child's output"; - break; - } else if (rc == 0) { - break; - } else { - if (stdout) + bool stdout_closed = false, stderr_closed = false; + while (!stdout_closed || !stderr_closed) { + if (!stdout_closed) { + int rc = HANDLE_EINTR(read(stdout_fd, buffer.data(), buffer.size())); + if (rc <= 0) { + stdout_closed = true; + if (rc < 0) + PLOG(ERROR) << "Reading from child's stdout"; + } else if (stdout != nullptr) { stdout->append(buffer.data(), rc); + } + } + + if (!stderr_closed) { + int rc = HANDLE_EINTR(read(stderr_fd, buffer.data(), buffer.size())); + if (rc <= 0) { + stderr_closed = true; + if (rc < 0) + PLOG(ERROR) << "Reading from child's stderr"; + } else if (stderr != nullptr) { + stderr->append(buffer.data(), rc); + } } } + // At this point, the subprocess already closed the output, so we only need to // wait for it to finish. int proc_return_code = proc.Wait(); diff --git a/common/subprocess.h b/common/subprocess.h index bac9e489..3eda8d50 100644 --- a/common/subprocess.h +++ b/common/subprocess.h @@ -88,14 +88,16 @@ class Subprocess { // Executes a command synchronously. Returns true on success. If |stdout| is // non-null, the process output is stored in it, otherwise the output is - // logged. Note that stderr is redirected to stdout. + // logged. static bool SynchronousExec(const std::vector& cmd, int* return_code, - std::string* stdout); + std::string* stdout, + std::string* stderr); static bool SynchronousExecFlags(const std::vector& cmd, uint32_t flags, int* return_code, - std::string* stdout); + std::string* stdout, + std::string* stderr); // Gets the one instance. static Subprocess& Get() { return *subprocess_singleton_; } diff --git a/common/subprocess_unittest.cc b/common/subprocess_unittest.cc index 104ef419..8dbaa0b2 100644 --- a/common/subprocess_unittest.cc +++ b/common/subprocess_unittest.cc @@ -193,7 +193,7 @@ TEST_F(SubprocessTest, EnvVarsAreFiltered) { TEST_F(SubprocessTest, SynchronousTrueSearchsOnPath) { int rc = -1; EXPECT_TRUE(Subprocess::SynchronousExecFlags( - {"true"}, Subprocess::kSearchPath, &rc, nullptr)); + {"true"}, Subprocess::kSearchPath, &rc, nullptr, nullptr)); EXPECT_EQ(0, rc); } @@ -201,16 +201,17 @@ TEST_F(SubprocessTest, SynchronousEchoTest) { vector cmd = { kBinPath "/sh", "-c", "echo -n stdout-here; echo -n stderr-there >&2"}; int rc = -1; - string stdout; - ASSERT_TRUE(Subprocess::SynchronousExec(cmd, &rc, &stdout)); + string stdout, stderr; + ASSERT_TRUE(Subprocess::SynchronousExec(cmd, &rc, &stdout, &stderr)); EXPECT_EQ(0, rc); - EXPECT_EQ("stdout-herestderr-there", stdout); + EXPECT_EQ("stdout-here", stdout); + EXPECT_EQ("stderr-there", stderr); } TEST_F(SubprocessTest, SynchronousEchoNoOutputTest) { int rc = -1; ASSERT_TRUE(Subprocess::SynchronousExec( - {kBinPath "/sh", "-c", "echo test"}, &rc, nullptr)); + {kBinPath "/sh", "-c", "echo test"}, &rc, nullptr, nullptr)); EXPECT_EQ(0, rc); } diff --git a/hardware_chromeos.cc b/hardware_chromeos.cc index dd21c1bc..de1d7c01 100644 --- a/hardware_chromeos.cc +++ b/hardware_chromeos.cc @@ -87,14 +87,16 @@ const char* kOemRequisitionKey = "oem_device_requisition"; // shell command. Returns true on success. int GetVpdValue(string key, string* result) { int exit_code = 0; - string value; + string value, error; vector cmd = {"vpd_get_value", key}; if (!chromeos_update_engine::Subprocess::SynchronousExec( - cmd, &exit_code, &value) || + cmd, &exit_code, &value, &error) || exit_code) { LOG(ERROR) << "Failed to get vpd key for " << value - << " with exit code: " << exit_code; + << " with exit code: " << exit_code << " and error: " << error; return false; + } else if (!error.empty()) { + LOG(INFO) << "vpd_get_value succeeded but with following errors: " << error; } base::TrimWhitespaceASCII(value, base::TRIM_ALL, &value); @@ -198,13 +200,14 @@ string HardwareChromeOS::GetFirmwareVersion() const { } string HardwareChromeOS::GetECVersion() const { - string input_line; + string input_line, error; int exit_code = 0; vector cmd = {"/usr/sbin/mosys", "-k", "ec", "info"}; - bool success = Subprocess::SynchronousExec(cmd, &exit_code, &input_line); - if (!success || exit_code) { - LOG(ERROR) << "Unable to read ec info from mosys (" << exit_code << ")"; + if (!Subprocess::SynchronousExec(cmd, &exit_code, &input_line, &error) || + exit_code != 0) { + LOG(ERROR) << "Unable to read EC info from mosys with exit code: " + << exit_code << " and error: " << error; return ""; } @@ -353,22 +356,28 @@ bool HardwareChromeOS::GetFirstActiveOmahaPingSent() const { bool HardwareChromeOS::SetFirstActiveOmahaPingSent() { int exit_code = 0; - string output; + string output, error; vector vpd_set_cmd = { "vpd", "-i", "RW_VPD", "-s", string(kActivePingKey) + "=1"}; - if (!Subprocess::SynchronousExec(vpd_set_cmd, &exit_code, &output) || + if (!Subprocess::SynchronousExec(vpd_set_cmd, &exit_code, &output, &error) || exit_code) { LOG(ERROR) << "Failed to set vpd key for " << kActivePingKey - << " with exit code: " << exit_code << " with error: " << output; + << " with exit code: " << exit_code << " with output: " << output + << " and error: " << error; return false; + } else if (!error.empty()) { + LOG(INFO) << "vpd succeeded but with error logs: " << error; } vector vpd_dump_cmd = {"dump_vpd_log", "--force"}; - if (!Subprocess::SynchronousExec(vpd_dump_cmd, &exit_code, &output) || + if (!Subprocess::SynchronousExec(vpd_dump_cmd, &exit_code, &output, &error) || exit_code) { LOG(ERROR) << "Failed to cache " << kActivePingKey << " using dump_vpd_log" - << " with exit code: " << exit_code << " with error: " << output; + << " with exit code: " << exit_code << " with output: " << output + << " and error: " << error; return false; + } else if (!error.empty()) { + LOG(INFO) << "dump_vpd_log succeeded but with error logs: " << error; } return true; } diff --git a/p2p_manager.cc b/p2p_manager.cc index 67209086..5de91d13 100644 --- a/p2p_manager.cc +++ b/p2p_manager.cc @@ -249,12 +249,12 @@ bool P2PManagerImpl::IsP2PEnabled() { bool P2PManagerImpl::EnsureP2P(bool should_be_running) { int return_code = 0; - string output; + string stderr; may_be_running_ = true; // Unless successful, we must be conservative. vector args = configuration_->GetInitctlArgs(should_be_running); - if (!Subprocess::SynchronousExec(args, &return_code, &output)) { + if (!Subprocess::SynchronousExec(args, &return_code, nullptr, &stderr)) { LOG(ERROR) << "Error spawning " << utils::StringVectorToString(args); return false; } @@ -268,7 +268,7 @@ bool P2PManagerImpl::EnsureP2P(bool should_be_running) { const char* expected_error_message = should_be_running ? "initctl: Job is already running: p2p\n" : "initctl: Unknown instance \n"; - if (output != expected_error_message) + if (stderr != expected_error_message) return false; } diff --git a/payload_generator/squashfs_filesystem.cc b/payload_generator/squashfs_filesystem.cc index e43bc0a9..98387949 100644 --- a/payload_generator/squashfs_filesystem.cc +++ b/payload_generator/squashfs_filesystem.cc @@ -81,12 +81,12 @@ bool GetFileMapContent(const string& sqfs_path, string* map) { // Run unsquashfs to get the system file map. // unsquashfs -m vector cmd = {"unsquashfs", "-m", map_file, sqfs_path}; - string stdout; + string stdout, stderr; int exit_code; - if (!Subprocess::SynchronousExec(cmd, &exit_code, &stdout) || + if (!Subprocess::SynchronousExec(cmd, &exit_code, &stdout, &stderr) || exit_code != 0) { - LOG(ERROR) << "Failed to run unsquashfs -m. The stdout content was: " - << stdout; + LOG(ERROR) << "Failed to run `unsquashfs -m` with stdout content: " + << stdout << " and stderr content: " << stderr; return false; } TEST_AND_RETURN_FALSE(utils::ReadFile(map_file, map)); @@ -109,10 +109,12 @@ bool GetUpdateEngineConfig(const std::string& sqfs_path, string* config) { unsquash_dir.GetPath().value(), sqfs_path, kUpdateEngineConf}; + string stdout, stderr; int exit_code; - if (!Subprocess::SynchronousExec(cmd, &exit_code, nullptr) || + if (!Subprocess::SynchronousExec(cmd, &exit_code, &stdout, &stderr) || exit_code != 0) { - PLOG(ERROR) << "Failed to unsquashfs etc/update_engine.conf: "; + PLOG(ERROR) << "Failed to unsquashfs etc/update_engine.conf with stdout: " + << stdout << " and stderr: " << stderr; return false; } diff --git a/update_attempter.cc b/update_attempter.cc index 18e50881..f5e2037f 100644 --- a/update_attempter.cc +++ b/update_attempter.cc @@ -956,13 +956,9 @@ void UpdateAttempter::WriteUpdateCompletedMarker() { } bool UpdateAttempter::RebootDirectly() { - vector command; - command.push_back("/sbin/shutdown"); - command.push_back("-r"); - command.push_back("now"); - LOG(INFO) << "Running \"" << base::JoinString(command, " ") << "\""; + vector command = {"/sbin/shutdown", "-r", "now"}; int rc = 0; - Subprocess::SynchronousExec(command, &rc, nullptr); + Subprocess::SynchronousExec(command, &rc, nullptr, nullptr); return rc == 0; } From b314674e8ba0e275836261cfd59144882b7e1a30 Mon Sep 17 00:00:00 2001 From: Jae Hoon Kim Date: Thu, 7 Nov 2019 13:24:24 -0800 Subject: [PATCH 146/624] update_engine: Persist EOL date continously Need to not delete the old EOL date persisted file in the case that Omaha does not send EOL date attribute. Previous approach of clearing could cause unexpected behavior where a previously persisted EOL date is removed when it shouldn't be. BUG=chromium:1022550 TEST=FEATURES="test" P2_TEST_FILTER="*OmahaRequestActionTest.*-*RunAsRoot*" emerge-$B update_engine Change-Id: I1c491951496377940c8f1592d5451c181bc81508 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/1904628 Tested-by: Jae Hoon Kim Reviewed-by: Amin Hassani Reviewed-by: Regan Hsu Commit-Queue: Jae Hoon Kim Auto-Submit: Jae Hoon Kim --- omaha_request_action.cc | 9 +++++---- omaha_request_action_unittest.cc | 6 +++++- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/omaha_request_action.cc b/omaha_request_action.cc index 6ebab0d1..f25f8ee4 100644 --- a/omaha_request_action.cc +++ b/omaha_request_action.cc @@ -1318,11 +1318,12 @@ bool OmahaRequestAction::PersistCohortData(const string& prefs_key, } bool OmahaRequestAction::PersistEolInfo(const map& attrs) { + // If EOL date attribute is not sent, don't delete the old persisted EOL + // date information. auto eol_date_attr = attrs.find(kAttrEolDate); - if (eol_date_attr == attrs.end()) { - system_state_->prefs()->Delete(kPrefsOmahaEolDate); - } else if (!system_state_->prefs()->SetString(kPrefsOmahaEolDate, - eol_date_attr->second)) { + if (eol_date_attr != attrs.end() && + !system_state_->prefs()->SetString(kPrefsOmahaEolDate, + eol_date_attr->second)) { LOG(ERROR) << "Setting EOL date failed."; return false; } diff --git a/omaha_request_action_unittest.cc b/omaha_request_action_unittest.cc index b66375f2..8dcec044 100644 --- a/omaha_request_action_unittest.cc +++ b/omaha_request_action_unittest.cc @@ -2815,11 +2815,15 @@ TEST_F(OmahaRequestActionTest, PersistEolMissingDateTest) { tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable; tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; + const string kDate = "123"; + fake_system_state_.prefs()->SetString(kPrefsOmahaEolDate, kDate); + ASSERT_TRUE(TestUpdateCheck()); string eol_date; - EXPECT_FALSE( + EXPECT_TRUE( fake_system_state_.prefs()->GetString(kPrefsOmahaEolDate, &eol_date)); + EXPECT_EQ(kDate, eol_date); } TEST_F(OmahaRequestActionTest, PersistEolBadDateTest) { From 2b0f10a8c150e2094dbd1c5cdd81aed3af078474 Mon Sep 17 00:00:00 2001 From: Artur Satayev Date: Mon, 4 Nov 2019 19:56:23 +0000 Subject: [PATCH 147/624] Add @UnsupportedAppUsage annotations for greylist. go/cleanup-greylist-txt These have already been greylisted, however due to bugs/omissions in the tooling have been kept in go/greylist-txt instead of being annotated in the code. Bug: 137350495 Test: m Change-Id: I088c868c3121cd37be4a8f113637b2a96c43df38 --- binder_bindings/android/os/IUpdateEngineCallback.aidl | 1 + 1 file changed, 1 insertion(+) diff --git a/binder_bindings/android/os/IUpdateEngineCallback.aidl b/binder_bindings/android/os/IUpdateEngineCallback.aidl index ee15c8b0..4bacf9a6 100644 --- a/binder_bindings/android/os/IUpdateEngineCallback.aidl +++ b/binder_bindings/android/os/IUpdateEngineCallback.aidl @@ -19,6 +19,7 @@ package android.os; /** @hide */ oneway interface IUpdateEngineCallback { /** @hide */ + @UnsupportedAppUsage void onStatusUpdate(int status_code, float percentage); /** @hide */ void onPayloadApplicationComplete(int error_code); From e99511f9987c1315a019e1903d81e0d401331229 Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Wed, 30 Oct 2019 12:57:06 -0700 Subject: [PATCH 148/624] update_engine: Deprecate GetStatus DBus and StatusUpdate Signal Nobody uses these anymore. BUG=chromium:977320 TEST=sudo FEATURES=test emerge update_engine-client update_engine Cq-Depend: chrome-internal:2099079 Change-Id: Ie7b31d95346fbf6d391d805db1b808a518bd2bda Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/1891572 Tested-by: Amin Hassani Reviewed-by: Jae Hoon Kim --- UpdateEngine.conf | 6 ----- client_library/client_dbus.cc | 18 ------------- client_library/client_dbus.h | 6 ----- client_library/include/update_engine/client.h | 20 -------------- ...rg.chromium.UpdateEngineInterface.dbus-xml | 16 ------------ dbus_service.cc | 26 ------------------- dbus_service.h | 10 ------- 7 files changed, 102 deletions(-) diff --git a/UpdateEngine.conf b/UpdateEngine.conf index e01c673d..ab776e33 100644 --- a/UpdateEngine.conf +++ b/UpdateEngine.conf @@ -39,9 +39,6 @@ - @@ -87,9 +84,6 @@ - diff --git a/client_library/client_dbus.cc b/client_library/client_dbus.cc index 3497ce08..18b155b3 100644 --- a/client_library/client_dbus.cc +++ b/client_library/client_dbus.cc @@ -98,24 +98,6 @@ bool DBusUpdateEngineClient::AttemptInstall(const string& omaha_url, nullptr /* brillo::ErrorPtr* */); } -bool DBusUpdateEngineClient::GetStatus(int64_t* out_last_checked_time, - double* out_progress, - UpdateStatus* out_update_status, - string* out_new_version, - int64_t* out_new_size) const { - StatusResult status; - if (!proxy_->GetStatusAdvanced(&status, nullptr)) { - return false; - } - - *out_last_checked_time = status.last_checked_time(); - *out_progress = status.progress(); - *out_new_version = status.new_version(); - *out_new_size = status.new_size(); - *out_update_status = static_cast(status.current_operation()); - return true; -} - bool DBusUpdateEngineClient::GetStatus(UpdateEngineStatus* out_status) const { StatusResult status; if (!proxy_->GetStatusAdvanced(&status, nullptr)) { diff --git a/client_library/client_dbus.h b/client_library/client_dbus.h index e964399b..6d7784ad 100644 --- a/client_library/client_dbus.h +++ b/client_library/client_dbus.h @@ -46,12 +46,6 @@ class DBusUpdateEngineClient : public UpdateEngineClient { bool AttemptInstall(const std::string& omaha_url, const std::vector& dlc_module_ids) override; - bool GetStatus(int64_t* out_last_checked_time, - double* out_progress, - UpdateStatus* out_update_status, - std::string* out_new_version, - int64_t* out_new_size) const override; - bool GetStatus(UpdateEngineStatus* out_status) const override; bool SetCohortHint(const std::string& cohort_hint) override; diff --git a/client_library/include/update_engine/client.h b/client_library/include/update_engine/client.h index 1a0461c0..d13359b6 100644 --- a/client_library/include/update_engine/client.h +++ b/client_library/include/update_engine/client.h @@ -60,26 +60,6 @@ class UpdateEngineClient { const std::string& omaha_url, const std::vector& dlc_module_ids) = 0; - // Returns the current status of the Update Engine. - // - // |out_last_checked_time| - // the last time the update engine checked for an update in seconds since - // the epoc. - // |out_progress| - // when downloading an update, this is calculated as - // (number of bytes received) / (total bytes). - // |out_update_status| - // See update_status.h. - // |out_new_version| - // string version of the new system image. - // |out_new_size| - // number of bytes in the new system image. - virtual bool GetStatus(int64_t* out_last_checked_time, - double* out_progress, - UpdateStatus* out_update_status, - std::string* out_new_version, - int64_t* out_new_size) const = 0; - // Same as above but return the entire struct instead. virtual bool GetStatus(UpdateEngineStatus* out_status) const = 0; diff --git a/dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml b/dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml index b9e0a479..afa34d76 100644 --- a/dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml +++ b/dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml @@ -51,14 +51,6 @@ - - - - - - - - @@ -111,14 +103,6 @@ - - - - - - - - diff --git a/dbus_service.cc b/dbus_service.cc index 72e11eba..b1cc2980 100644 --- a/dbus_service.cc +++ b/dbus_service.cc @@ -110,24 +110,6 @@ bool DBusUpdateEngineService::ResetStatus(ErrorPtr* error) { return common_->ResetStatus(error); } -bool DBusUpdateEngineService::GetStatus(ErrorPtr* error, - int64_t* out_last_checked_time, - double* out_progress, - string* out_current_operation, - string* out_new_version, - int64_t* out_new_size) { - UpdateEngineStatus status; - if (!common_->GetStatus(error, &status)) { - return false; - } - *out_last_checked_time = status.last_checked_time; - *out_progress = status.progress; - *out_current_operation = UpdateStatusToString(status.status); - *out_new_version = status.new_version; - *out_new_size = status.new_size_bytes; - return true; -} - bool DBusUpdateEngineService::GetStatusAdvanced(ErrorPtr* error, StatusResult* out_status) { UpdateEngineStatus status; @@ -237,14 +219,6 @@ void UpdateEngineAdaptor::SendStatusUpdate( StatusResult status; ConvertToStatusResult(update_engine_status, &status); - // TODO(crbug.com/977320): Deprecate |StatusUpdate| signal. - SendStatusUpdateSignal(status.last_checked_time(), - status.progress(), - UpdateStatusToString(static_cast( - status.current_operation())), - status.new_version(), - status.new_size()); - // Send |StatusUpdateAdvanced| signal. SendStatusUpdateAdvancedSignal(status); } diff --git a/dbus_service.h b/dbus_service.h index 4ea1a532..28ba268f 100644 --- a/dbus_service.h +++ b/dbus_service.h @@ -64,16 +64,6 @@ class DBusUpdateEngineService // update. This is used for development only. bool ResetStatus(brillo::ErrorPtr* error) override; - // Returns the current status of the Update Engine. If an update is in - // progress, the number of operations, size to download and overall progress - // is reported. - bool GetStatus(brillo::ErrorPtr* error, - int64_t* out_last_checked_time, - double* out_progress, - std::string* out_current_operation, - std::string* out_new_version, - int64_t* out_new_size) override; - // Similar to Above, but returns a protobuffer instead. In the future it will // have more features and is easily extendable. bool GetStatusAdvanced(brillo::ErrorPtr* error, From 3a1a5618390aeb56a2d7d4ca1cc61b97ea1515a2 Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Tue, 5 Nov 2019 16:34:32 -0800 Subject: [PATCH 149/624] Proper split of BootControl and DynamicPartitionControl. All dynamic/static partitions stuff are moved to DynamicPartitionControlAndroid. After this patch: (1) BootControl remains a simple shim over the boot control HAL. (BootControl still have two calls that is a delegate to DynamicPartitionControl, which will be cleaned up in follow up CLs.) (2) DynamicPartitionControlInterface API is minimized. All libdm and other Android specific details are hidden from the API surface now. Also move tests from boot_control_unittest to dynamic_partition_control_unittest because functionalities are moved. Test: update_engine_unittests Change-Id: I6ed902197569f9f0ef40e02703634e9078a4b060 --- Android.bp | 1 - boot_control_android.cc | 148 +---------- boot_control_android.h | 29 +- boot_control_android_unittest.cc | 250 ------------------ dynamic_partition_control_android.cc | 128 +++++++++ dynamic_partition_control_android.h | 89 ++++++- dynamic_partition_control_android_unittest.cc | 130 +++++++++ dynamic_partition_control_interface.h | 44 --- mock_dynamic_partition_control.h | 8 - 9 files changed, 338 insertions(+), 489 deletions(-) delete mode 100644 boot_control_android_unittest.cc diff --git a/Android.bp b/Android.bp index d6f1090d..84d4a7ae 100644 --- a/Android.bp +++ b/Android.bp @@ -677,7 +677,6 @@ cc_test { test_suites: ["device-tests"], srcs: [ - "boot_control_android_unittest.cc", "certificate_checker_unittest.cc", "common/action_pipe_unittest.cc", "common/action_processor_unittest.cc", diff --git a/boot_control_android.cc b/boot_control_android.cc index b1d775e2..05e9637f 100644 --- a/boot_control_android.cc +++ b/boot_control_android.cc @@ -24,8 +24,6 @@ #include #include #include -#include -#include #include "update_engine/common/utils.h" #include "update_engine/dynamic_partition_control_android.h" @@ -88,131 +86,12 @@ BootControlInterface::Slot BootControlAndroid::GetCurrentSlot() const { return module_->getCurrentSlot(); } -bool BootControlAndroid::GetSuffix(Slot slot, string* suffix) const { - auto store_suffix_cb = [&suffix](hidl_string cb_suffix) { - *suffix = cb_suffix.c_str(); - }; - Return ret = module_->getSuffix(slot, store_suffix_cb); - - if (!ret.isOk()) { - LOG(ERROR) << "boot_control impl returned no suffix for slot " - << SlotName(slot); - return false; - } - return true; -} - -bool BootControlAndroid::IsSuperBlockDevice( - const base::FilePath& device_dir, - Slot slot, - const string& partition_name_suffix) const { - string source_device = - device_dir.Append(dynamic_control_->GetSuperPartitionName(slot)).value(); - auto source_metadata = - dynamic_control_->LoadMetadataBuilder(source_device, slot); - return source_metadata->HasBlockDevice(partition_name_suffix); -} - -BootControlAndroid::DynamicPartitionDeviceStatus -BootControlAndroid::GetDynamicPartitionDevice( - const base::FilePath& device_dir, - const string& partition_name_suffix, - Slot slot, - string* device) const { - string super_device = - device_dir.Append(dynamic_control_->GetSuperPartitionName(slot)).value(); - - auto builder = dynamic_control_->LoadMetadataBuilder(super_device, slot); - - if (builder == nullptr) { - LOG(ERROR) << "No metadata in slot " - << BootControlInterface::SlotName(slot); - return DynamicPartitionDeviceStatus::ERROR; - } - - Slot current_slot = GetCurrentSlot(); - if (builder->FindPartition(partition_name_suffix) == nullptr) { - LOG(INFO) << partition_name_suffix - << " is not in super partition metadata."; - - if (IsSuperBlockDevice(device_dir, current_slot, partition_name_suffix)) { - LOG(ERROR) << "The static partition " << partition_name_suffix - << " is a block device for current metadata (" - << dynamic_control_->GetSuperPartitionName(current_slot) - << ", slot " << BootControlInterface::SlotName(current_slot) - << "). It cannot be used as a logical partition."; - return DynamicPartitionDeviceStatus::ERROR; - } - - return DynamicPartitionDeviceStatus::TRY_STATIC; - } - - if (slot == current_slot) { - if (dynamic_control_->GetState(partition_name_suffix) != - DmDeviceState::ACTIVE) { - LOG(WARNING) << partition_name_suffix << " is at current slot but it is " - << "not mapped. Now try to map it."; - } else { - if (dynamic_control_->GetDmDevicePathByName(partition_name_suffix, - device)) { - LOG(INFO) << partition_name_suffix - << " is mapped on device mapper: " << *device; - return DynamicPartitionDeviceStatus::SUCCESS; - } - LOG(ERROR) << partition_name_suffix << "is mapped but path is unknown."; - return DynamicPartitionDeviceStatus::ERROR; - } - } - - bool force_writable = slot != current_slot; - if (dynamic_control_->MapPartitionOnDeviceMapper( - super_device, partition_name_suffix, slot, force_writable, device)) { - return DynamicPartitionDeviceStatus::SUCCESS; - } - return DynamicPartitionDeviceStatus::ERROR; -} bool BootControlAndroid::GetPartitionDevice(const string& partition_name, Slot slot, string* device) const { - string suffix; - if (!GetSuffix(slot, &suffix)) { - return false; - } - const string partition_name_suffix = partition_name + suffix; - - string device_dir_str; - if (!dynamic_control_->GetDeviceDir(&device_dir_str)) { - return false; - } - base::FilePath device_dir(device_dir_str); - - // When looking up target partition devices, treat them as static if the - // current payload doesn't encode them as dynamic partitions. This may happen - // when applying a retrofit update on top of a dynamic-partitions-enabled - // build. - if (dynamic_control_->GetDynamicPartitionsFeatureFlag().IsEnabled() && - (slot == GetCurrentSlot() || is_target_dynamic_)) { - switch (GetDynamicPartitionDevice( - device_dir, partition_name_suffix, slot, device)) { - case DynamicPartitionDeviceStatus::SUCCESS: - return true; - case DynamicPartitionDeviceStatus::TRY_STATIC: - break; - case DynamicPartitionDeviceStatus::ERROR: // fallthrough - default: - return false; - } - } - - base::FilePath path = device_dir.Append(partition_name_suffix); - if (!dynamic_control_->DeviceExists(path.value())) { - LOG(ERROR) << "Device file " << path.value() << " does not exist."; - return false; - } - - *device = path.value(); - return true; + return dynamic_control_->GetPartitionDevice( + partition_name, slot, GetCurrentSlot(), device); } bool BootControlAndroid::IsSlotBootable(Slot slot) const { @@ -283,31 +162,8 @@ bool BootControlAndroid::PreparePartitionsForUpdate( Slot target_slot, const DeltaArchiveManifest& manifest, bool update_metadata) { - if (fs_mgr_overlayfs_is_setup()) { - // Non DAP devices can use overlayfs as well. - LOG(WARNING) - << "overlayfs overrides are active and can interfere with our " - "resources.\n" - << "run adb enable-verity to deactivate if required and try again."; - } - if (!dynamic_control_->GetDynamicPartitionsFeatureFlag().IsEnabled()) { - return true; - } auto source_slot = GetCurrentSlot(); - if (target_slot == source_slot) { - LOG(ERROR) << "Cannot call PreparePartitionsForUpdate on current slot."; - return false; - } - - // Although the current build supports dynamic partitions, the given payload - // doesn't use it for target partitions. This could happen when applying a - // retrofit update. Skip updating the partition metadata for the target slot. - is_target_dynamic_ = !manifest.dynamic_partition_metadata().groups().empty(); - if (!is_target_dynamic_) { - return true; - } - return dynamic_control_->PreparePartitionsForUpdate( source_slot, target_slot, manifest, update_metadata); } diff --git a/boot_control_android.h b/boot_control_android.h index 65543ca2..c81f86d4 100644 --- a/boot_control_android.h +++ b/boot_control_android.h @@ -22,10 +22,10 @@ #include #include -#include #include #include "update_engine/common/boot_control.h" +#include "update_engine/dynamic_partition_control_android.h" #include "update_engine/dynamic_partition_control_interface.h" namespace chromeos_update_engine { @@ -58,35 +58,10 @@ class BootControlAndroid : public BootControlInterface { private: ::android::sp<::android::hardware::boot::V1_0::IBootControl> module_; - std::unique_ptr dynamic_control_; + std::unique_ptr dynamic_control_; friend class BootControlAndroidTest; - // Wrapper method of IBootControl::getSuffix(). - bool GetSuffix(Slot slot, std::string* out) const; - - enum class DynamicPartitionDeviceStatus { - SUCCESS, - ERROR, - TRY_STATIC, - }; - - DynamicPartitionDeviceStatus GetDynamicPartitionDevice( - const base::FilePath& device_dir, - const std::string& partition_name_suffix, - Slot slot, - std::string* device) const; - - // Return true if |partition_name_suffix| is a block device of - // super partition metadata slot |slot|. - bool IsSuperBlockDevice(const base::FilePath& device_dir, - Slot slot, - const std::string& partition_name_suffix) const; - - // Whether the target partitions should be loaded as dynamic partitions. Set - // by PreparePartitionsForUpdate() per each update. - bool is_target_dynamic_{false}; - DISALLOW_COPY_AND_ASSIGN(BootControlAndroid); }; diff --git a/boot_control_android_unittest.cc b/boot_control_android_unittest.cc deleted file mode 100644 index e44af157..00000000 --- a/boot_control_android_unittest.cc +++ /dev/null @@ -1,250 +0,0 @@ -// -// Copyright (C) 2018 The Android Open Source Project -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -#include "update_engine/boot_control_android.h" - -#include -#include - -#include -#include -#include -#include -#include -#include - -#include "update_engine/dynamic_partition_test_utils.h" -#include "update_engine/mock_boot_control_hal.h" -#include "update_engine/mock_dynamic_partition_control.h" - -using android::dm::DmDeviceState; -using android::hardware::Void; -using std::string; -using testing::_; -using testing::AnyNumber; -using testing::Invoke; -using testing::NiceMock; -using testing::Not; -using testing::Return; - -namespace chromeos_update_engine { - -class BootControlAndroidTest : public ::testing::Test { - protected: - void SetUp() override { - // Fake init bootctl_ - bootctl_.module_ = new NiceMock(); - bootctl_.dynamic_control_ = - std::make_unique>(); - - ON_CALL(module(), getNumberSlots()).WillByDefault(Invoke([] { - return kMaxNumSlots; - })); - ON_CALL(module(), getSuffix(_, _)) - .WillByDefault(Invoke([](auto slot, auto cb) { - EXPECT_LE(slot, kMaxNumSlots); - cb(slot < kMaxNumSlots ? kSlotSuffixes[slot] : ""); - return Void(); - })); - - ON_CALL(dynamicControl(), GetDynamicPartitionsFeatureFlag()) - .WillByDefault(Return(FeatureFlag(FeatureFlag::Value::LAUNCH))); - ON_CALL(dynamicControl(), GetVirtualAbFeatureFlag()) - .WillByDefault(Return(FeatureFlag(FeatureFlag::Value::NONE))); - ON_CALL(dynamicControl(), DeviceExists(_)).WillByDefault(Return(true)); - ON_CALL(dynamicControl(), GetDeviceDir(_)) - .WillByDefault(Invoke([](auto path) { - *path = kFakeDevicePath; - return true; - })); - ON_CALL(dynamicControl(), GetDmDevicePathByName(_, _)) - .WillByDefault(Invoke([](auto partition_name_suffix, auto device) { - *device = GetDmDevice(partition_name_suffix); - return true; - })); - - ON_CALL(dynamicControl(), GetSuperPartitionName(_)) - .WillByDefault(Return(kFakeSuper)); - } - - std::string GetSuperDevice(uint32_t slot) { - return GetDevice(dynamicControl().GetSuperPartitionName(slot)); - } - - // Return the mocked HAL module. - NiceMock& module() { - return static_cast&>(*bootctl_.module_); - } - - // Return the mocked DynamicPartitionControlInterface. - NiceMock& dynamicControl() { - return static_cast&>( - *bootctl_.dynamic_control_); - } - - // Set the fake metadata to return when LoadMetadataBuilder is called on - // |slot|. - void SetMetadata(uint32_t slot, const PartitionSuffixSizes& sizes) { - EXPECT_CALL(dynamicControl(), - LoadMetadataBuilder(GetSuperDevice(slot), slot)) - .Times(AnyNumber()) - .WillRepeatedly(Invoke([sizes](auto, auto) { - return NewFakeMetadata(PartitionSuffixSizesToManifest(sizes)); - })); - } - - uint32_t source() { return slots_.source; } - - uint32_t target() { return slots_.target; } - - // Return partition names with suffix of source(). - string S(const string& name) { return name + kSlotSuffixes[source()]; } - - // Return partition names with suffix of target(). - string T(const string& name) { return name + kSlotSuffixes[target()]; } - - // Set source and target slots to use before testing. - void SetSlots(const TestParam& slots) { - slots_ = slots; - - ON_CALL(module(), getCurrentSlot()).WillByDefault(Invoke([this] { - return source(); - })); - } - - bool PreparePartitionsForUpdate(uint32_t slot, - PartitionSizes partition_sizes, - bool update_metadata = true) { - auto m = PartitionSizesToManifest(partition_sizes); - return bootctl_.PreparePartitionsForUpdate(slot, m, update_metadata); - } - - BootControlAndroid bootctl_; // BootControlAndroid under test. - TestParam slots_; -}; - -class BootControlAndroidTestP - : public BootControlAndroidTest, - public ::testing::WithParamInterface { - public: - void SetUp() override { - BootControlAndroidTest::SetUp(); - SetSlots(GetParam()); - } -}; - -// Test applying retrofit update on a build with dynamic partitions enabled. -TEST_P(BootControlAndroidTestP, - ApplyRetrofitUpdateOnDynamicPartitionsEnabledBuild) { - SetMetadata(source(), - {{S("system"), 2_GiB}, - {S("vendor"), 1_GiB}, - {T("system"), 2_GiB}, - {T("vendor"), 1_GiB}}); - - // Not calling through BootControlAndroidTest::PreparePartitionsForUpdate(), - // since we don't want any default group in the PartitionMetadata. - EXPECT_TRUE(bootctl_.PreparePartitionsForUpdate(target(), {}, true)); - - // Should use dynamic source partitions. - EXPECT_CALL(dynamicControl(), GetState(S("system"))) - .Times(1) - .WillOnce(Return(DmDeviceState::ACTIVE)); - string system_device; - EXPECT_TRUE(bootctl_.GetPartitionDevice("system", source(), &system_device)); - EXPECT_EQ(GetDmDevice(S("system")), system_device); - - // Should use static target partitions without querying dynamic control. - EXPECT_CALL(dynamicControl(), GetState(T("system"))).Times(0); - EXPECT_TRUE(bootctl_.GetPartitionDevice("system", target(), &system_device)); - EXPECT_EQ(GetDevice(T("system")), system_device); - - // Static partition "bar". - EXPECT_CALL(dynamicControl(), GetState(S("bar"))).Times(0); - std::string bar_device; - EXPECT_TRUE(bootctl_.GetPartitionDevice("bar", source(), &bar_device)); - EXPECT_EQ(GetDevice(S("bar")), bar_device); - - EXPECT_CALL(dynamicControl(), GetState(T("bar"))).Times(0); - EXPECT_TRUE(bootctl_.GetPartitionDevice("bar", target(), &bar_device)); - EXPECT_EQ(GetDevice(T("bar")), bar_device); -} - -TEST_P(BootControlAndroidTestP, GetPartitionDeviceWhenResumingUpdate) { - // Both of the two slots contain valid partition metadata, since this is - // resuming an update. - SetMetadata(source(), - {{S("system"), 2_GiB}, - {S("vendor"), 1_GiB}, - {T("system"), 2_GiB}, - {T("vendor"), 1_GiB}}); - SetMetadata(target(), - {{S("system"), 2_GiB}, - {S("vendor"), 1_GiB}, - {T("system"), 2_GiB}, - {T("vendor"), 1_GiB}}); - - EXPECT_CALL(dynamicControl(), PreparePartitionsForUpdate(_, _, _, false)) - .WillOnce(Return(true)); - - EXPECT_TRUE(PreparePartitionsForUpdate( - target(), {{"system", 2_GiB}, {"vendor", 1_GiB}}, false)); - - // Dynamic partition "system". - EXPECT_CALL(dynamicControl(), GetState(S("system"))) - .Times(1) - .WillOnce(Return(DmDeviceState::ACTIVE)); - string system_device; - EXPECT_TRUE(bootctl_.GetPartitionDevice("system", source(), &system_device)); - EXPECT_EQ(GetDmDevice(S("system")), system_device); - - EXPECT_CALL(dynamicControl(), GetState(T("system"))) - .Times(AnyNumber()) - .WillOnce(Return(DmDeviceState::ACTIVE)); - EXPECT_CALL(dynamicControl(), - MapPartitionOnDeviceMapper( - GetSuperDevice(target()), T("system"), target(), _, _)) - .Times(AnyNumber()) - .WillRepeatedly( - Invoke([](const auto&, const auto& name, auto, auto, auto* device) { - *device = "/fake/remapped/" + name; - return true; - })); - EXPECT_TRUE(bootctl_.GetPartitionDevice("system", target(), &system_device)); - EXPECT_EQ("/fake/remapped/" + T("system"), system_device); - - // Static partition "bar". - EXPECT_CALL(dynamicControl(), GetState(S("bar"))).Times(0); - std::string bar_device; - EXPECT_TRUE(bootctl_.GetPartitionDevice("bar", source(), &bar_device)); - EXPECT_EQ(GetDevice(S("bar")), bar_device); - - EXPECT_CALL(dynamicControl(), GetState(T("bar"))).Times(0); - EXPECT_TRUE(bootctl_.GetPartitionDevice("bar", target(), &bar_device)); - EXPECT_EQ(GetDevice(T("bar")), bar_device); -} - -INSTANTIATE_TEST_CASE_P(BootControlAndroidTest, - BootControlAndroidTestP, - testing::Values(TestParam{0, 1}, TestParam{1, 0})); - -TEST_F(BootControlAndroidTest, ApplyingToCurrentSlot) { - SetSlots({1, 1}); - EXPECT_FALSE(PreparePartitionsForUpdate(target(), {})) - << "Should not be able to apply to current slot."; -} - -} // namespace chromeos_update_engine diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc index c641a6b3..464cdf13 100644 --- a/dynamic_partition_control_android.cc +++ b/dynamic_partition_control_android.cc @@ -31,6 +31,8 @@ #include #include #include +#include +#include #include #include "update_engine/common/boot_control_interface.h" @@ -354,6 +356,31 @@ bool DynamicPartitionControlAndroid::PreparePartitionsForUpdate( uint32_t target_slot, const DeltaArchiveManifest& manifest, bool update) { + if (fs_mgr_overlayfs_is_setup()) { + // Non DAP devices can use overlayfs as well. + LOG(WARNING) + << "overlayfs overrides are active and can interfere with our " + "resources.\n" + << "run adb enable-verity to deactivate if required and try again."; + } + + if (!GetDynamicPartitionsFeatureFlag().IsEnabled()) { + return true; + } + + if (target_slot == source_slot) { + LOG(ERROR) << "Cannot call PreparePartitionsForUpdate on current slot."; + return false; + } + + // Although the current build supports dynamic partitions, the given payload + // doesn't use it for target partitions. This could happen when applying a + // retrofit update. Skip updating the partition metadata for the target slot. + is_target_dynamic_ = !manifest.dynamic_partition_metadata().groups().empty(); + if (!is_target_dynamic_) { + return true; + } + target_supports_snapshot_ = manifest.dynamic_partition_metadata().snapshot_enabled(); @@ -532,4 +559,105 @@ bool DynamicPartitionControlAndroid::FinishUpdate() { return true; } +bool DynamicPartitionControlAndroid::GetPartitionDevice( + const std::string& partition_name, + uint32_t slot, + uint32_t current_slot, + std::string* device) { + const auto& partition_name_suffix = + partition_name + SlotSuffixForSlotNumber(slot); + std::string device_dir_str; + TEST_AND_RETURN_FALSE(GetDeviceDir(&device_dir_str)); + base::FilePath device_dir(device_dir_str); + + // When looking up target partition devices, treat them as static if the + // current payload doesn't encode them as dynamic partitions. This may happen + // when applying a retrofit update on top of a dynamic-partitions-enabled + // build. + if (GetDynamicPartitionsFeatureFlag().IsEnabled() && + (slot == current_slot || is_target_dynamic_)) { + switch (GetDynamicPartitionDevice( + device_dir, partition_name_suffix, slot, current_slot, device)) { + case DynamicPartitionDeviceStatus::SUCCESS: + return true; + case DynamicPartitionDeviceStatus::TRY_STATIC: + break; + case DynamicPartitionDeviceStatus::ERROR: // fallthrough + default: + return false; + } + } + base::FilePath path = device_dir.Append(partition_name_suffix); + if (!DeviceExists(path.value())) { + LOG(ERROR) << "Device file " << path.value() << " does not exist."; + return false; + } + + *device = path.value(); + return true; +} + +bool DynamicPartitionControlAndroid::IsSuperBlockDevice( + const base::FilePath& device_dir, + uint32_t current_slot, + const std::string& partition_name_suffix) { + std::string source_device = + device_dir.Append(GetSuperPartitionName(current_slot)).value(); + auto source_metadata = LoadMetadataBuilder(source_device, current_slot); + return source_metadata->HasBlockDevice(partition_name_suffix); +} + +DynamicPartitionControlAndroid::DynamicPartitionDeviceStatus +DynamicPartitionControlAndroid::GetDynamicPartitionDevice( + const base::FilePath& device_dir, + const std::string& partition_name_suffix, + uint32_t slot, + uint32_t current_slot, + std::string* device) { + std::string super_device = + device_dir.Append(GetSuperPartitionName(slot)).value(); + + auto builder = LoadMetadataBuilder(super_device, slot); + if (builder == nullptr) { + LOG(ERROR) << "No metadata in slot " + << BootControlInterface::SlotName(slot); + return DynamicPartitionDeviceStatus::ERROR; + } + if (builder->FindPartition(partition_name_suffix) == nullptr) { + LOG(INFO) << partition_name_suffix + << " is not in super partition metadata."; + + if (IsSuperBlockDevice(device_dir, current_slot, partition_name_suffix)) { + LOG(ERROR) << "The static partition " << partition_name_suffix + << " is a block device for current metadata." + << "It cannot be used as a logical partition."; + return DynamicPartitionDeviceStatus::ERROR; + } + + return DynamicPartitionDeviceStatus::TRY_STATIC; + } + + if (slot == current_slot) { + if (GetState(partition_name_suffix) != DmDeviceState::ACTIVE) { + LOG(WARNING) << partition_name_suffix << " is at current slot but it is " + << "not mapped. Now try to map it."; + } else { + if (GetDmDevicePathByName(partition_name_suffix, device)) { + LOG(INFO) << partition_name_suffix + << " is mapped on device mapper: " << *device; + return DynamicPartitionDeviceStatus::SUCCESS; + } + LOG(ERROR) << partition_name_suffix << "is mapped but path is unknown."; + return DynamicPartitionDeviceStatus::ERROR; + } + } + + bool force_writable = slot != current_slot; + if (MapPartitionOnDeviceMapper( + super_device, partition_name_suffix, slot, force_writable, device)) { + return DynamicPartitionDeviceStatus::SUCCESS; + } + return DynamicPartitionDeviceStatus::ERROR; +} + } // namespace chromeos_update_engine diff --git a/dynamic_partition_control_android.h b/dynamic_partition_control_android.h index 07ce2810..af37398f 100644 --- a/dynamic_partition_control_android.h +++ b/dynamic_partition_control_android.h @@ -23,6 +23,7 @@ #include #include +#include #include #include @@ -34,27 +35,24 @@ class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { ~DynamicPartitionControlAndroid(); FeatureFlag GetDynamicPartitionsFeatureFlag() override; FeatureFlag GetVirtualAbFeatureFlag() override; - bool MapPartitionOnDeviceMapper(const std::string& super_device, - const std::string& target_partition_name, - uint32_t slot, - bool force_writable, - std::string* path) override; void Cleanup() override; - bool DeviceExists(const std::string& path) override; - android::dm::DmDeviceState GetState(const std::string& name) override; - bool GetDmDevicePathByName(const std::string& name, - std::string* path) override; - std::unique_ptr LoadMetadataBuilder( - const std::string& super_device, uint32_t source_slot) override; bool PreparePartitionsForUpdate(uint32_t source_slot, uint32_t target_slot, const DeltaArchiveManifest& manifest, bool update) override; - bool GetDeviceDir(std::string* path) override; - std::string GetSuperPartitionName(uint32_t slot) override; bool FinishUpdate() override; + // Return the device for partition |partition_name| at slot |slot|. + // |current_slot| should be set to the current active slot. + // Note: this function is only used by BootControl*::GetPartitionDevice. + // Other callers should prefer BootControl*::GetPartitionDevice over + // BootControl*::GetDynamicPartitionControl()->GetPartitionDevice(). + bool GetPartitionDevice(const std::string& partition_name, + uint32_t slot, + uint32_t current_slot, + std::string* device); + protected: // These functions are exposed for testing. @@ -84,6 +82,45 @@ class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { android::fs_mgr::MetadataBuilder* builder, uint32_t target_slot); + // Map logical partition on device-mapper. + // |super_device| is the device path of the physical partition ("super"). + // |target_partition_name| is the identifier used in metadata; for example, + // "vendor_a" + // |slot| is the selected slot to mount; for example, 0 for "_a". + // Returns true if mapped successfully; if so, |path| is set to the device + // path of the mapped logical partition. + virtual bool MapPartitionOnDeviceMapper( + const std::string& super_device, + const std::string& target_partition_name, + uint32_t slot, + bool force_writable, + std::string* path); + + // Return true if a static partition exists at device path |path|. + virtual bool DeviceExists(const std::string& path); + + // Returns the current state of the underlying device mapper device + // with given name. + // One of INVALID, SUSPENDED or ACTIVE. + virtual android::dm::DmDeviceState GetState(const std::string& name); + + // Returns the path to the device mapper device node in '/dev' corresponding + // to 'name'. If the device does not exist, false is returned, and the path + // parameter is not set. + virtual bool GetDmDevicePathByName(const std::string& name, + std::string* path); + + // Retrieve metadata from |super_device| at slot |source_slot|. + virtual std::unique_ptr LoadMetadataBuilder( + const std::string& super_device, uint32_t source_slot); + + // Return a possible location for devices listed by name. + virtual bool GetDeviceDir(std::string* path); + + // Return the name of the super partition (which stores super partition + // metadata) for a given slot. + virtual std::string GetSuperPartitionName(uint32_t slot); + private: friend class DynamicPartitionControlAndroidTest; @@ -112,12 +149,38 @@ class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { uint32_t target_slot, const DeltaArchiveManifest& manifest); + enum class DynamicPartitionDeviceStatus { + SUCCESS, + ERROR, + TRY_STATIC, + }; + + // Return SUCCESS and path in |device| if partition is dynamic. + // Return ERROR if any error. + // Return TRY_STATIC if caller should resolve the partition as a static + // partition instead. + DynamicPartitionDeviceStatus GetDynamicPartitionDevice( + const base::FilePath& device_dir, + const std::string& partition_name_suffix, + uint32_t slot, + uint32_t current_slot, + std::string* device); + + // Return true if |partition_name_suffix| is a block device of + // super partition metadata slot |slot|. + bool IsSuperBlockDevice(const base::FilePath& device_dir, + uint32_t current_slot, + const std::string& partition_name_suffix); + std::set mapped_devices_; const FeatureFlag dynamic_partitions_; const FeatureFlag virtual_ab_; std::unique_ptr snapshot_; std::unique_ptr metadata_device_; bool target_supports_snapshot_ = false; + // Whether the target partitions should be loaded as dynamic partitions. Set + // by PreparePartitionsForUpdate() per each update. + bool is_target_dynamic_ = false; DISALLOW_COPY_AND_ASSIGN(DynamicPartitionControlAndroid); }; diff --git a/dynamic_partition_control_android_unittest.cc b/dynamic_partition_control_android_unittest.cc index e8ef1f98..10075ed4 100644 --- a/dynamic_partition_control_android_unittest.cc +++ b/dynamic_partition_control_android_unittest.cc @@ -27,9 +27,11 @@ #include "update_engine/dynamic_partition_test_utils.h" #include "update_engine/mock_dynamic_partition_control.h" +using android::dm::DmDeviceState; using std::string; using testing::_; using testing::AnyNumber; +using testing::AnyOf; using testing::Invoke; using testing::NiceMock; using testing::Not; @@ -55,6 +57,12 @@ class DynamicPartitionControlAndroidTest : public ::testing::Test { ON_CALL(dynamicControl(), GetSuperPartitionName(_)) .WillByDefault(Return(kFakeSuper)); + + ON_CALL(dynamicControl(), GetDmDevicePathByName(_, _)) + .WillByDefault(Invoke([](auto partition_name_suffix, auto device) { + *device = GetDmDevice(partition_name_suffix); + return true; + })); } // Return the mocked DynamicPartitionControlInterface. @@ -283,6 +291,122 @@ TEST_P(DynamicPartitionControlAndroidTestP, NotEnoughSpaceForSlot) { << "Should not be able to grow over size of super / 2"; } +TEST_P(DynamicPartitionControlAndroidTestP, + ApplyRetrofitUpdateOnDynamicPartitionsEnabledBuild) { + ON_CALL(dynamicControl(), GetDynamicPartitionsFeatureFlag()) + .WillByDefault(Return(FeatureFlag(FeatureFlag::Value::RETROFIT))); + // Static partition {system,bar}_{a,b} exists. + EXPECT_CALL(dynamicControl(), + DeviceExists(AnyOf(GetDevice(S("bar")), + GetDevice(T("bar")), + GetDevice(S("system")), + GetDevice(T("system"))))) + .WillRepeatedly(Return(true)); + + SetMetadata(source(), + {{S("system"), 2_GiB}, + {S("vendor"), 1_GiB}, + {T("system"), 2_GiB}, + {T("vendor"), 1_GiB}}); + + // Not calling through + // DynamicPartitionControlAndroidTest::PreparePartitionsForUpdate(), since we + // don't want any default group in the PartitionMetadata. + EXPECT_TRUE(dynamicControl().PreparePartitionsForUpdate( + source(), target(), {}, true)); + + // Should use dynamic source partitions. + EXPECT_CALL(dynamicControl(), GetState(S("system"))) + .Times(1) + .WillOnce(Return(DmDeviceState::ACTIVE)); + string system_device; + EXPECT_TRUE(dynamicControl().GetPartitionDevice( + "system", source(), source(), &system_device)); + EXPECT_EQ(GetDmDevice(S("system")), system_device); + + // Should use static target partitions without querying dynamic control. + EXPECT_CALL(dynamicControl(), GetState(T("system"))).Times(0); + EXPECT_TRUE(dynamicControl().GetPartitionDevice( + "system", target(), source(), &system_device)); + EXPECT_EQ(GetDevice(T("system")), system_device); + + // Static partition "bar". + EXPECT_CALL(dynamicControl(), GetState(S("bar"))).Times(0); + std::string bar_device; + EXPECT_TRUE(dynamicControl().GetPartitionDevice( + "bar", source(), source(), &bar_device)); + EXPECT_EQ(GetDevice(S("bar")), bar_device); + + EXPECT_CALL(dynamicControl(), GetState(T("bar"))).Times(0); + EXPECT_TRUE(dynamicControl().GetPartitionDevice( + "bar", target(), source(), &bar_device)); + EXPECT_EQ(GetDevice(T("bar")), bar_device); +} + +TEST_P(DynamicPartitionControlAndroidTestP, + GetPartitionDeviceWhenResumingUpdate) { + // Static partition bar_{a,b} exists. + EXPECT_CALL(dynamicControl(), + DeviceExists(AnyOf(GetDevice(S("bar")), GetDevice(T("bar"))))) + .WillRepeatedly(Return(true)); + + // Both of the two slots contain valid partition metadata, since this is + // resuming an update. + SetMetadata(source(), + {{S("system"), 2_GiB}, + {S("vendor"), 1_GiB}, + {T("system"), 2_GiB}, + {T("vendor"), 1_GiB}}); + SetMetadata(target(), + {{S("system"), 2_GiB}, + {S("vendor"), 1_GiB}, + {T("system"), 2_GiB}, + {T("vendor"), 1_GiB}}); + + EXPECT_TRUE(dynamicControl().PreparePartitionsForUpdate( + source(), + target(), + PartitionSizesToManifest({{"system", 2_GiB}, {"vendor", 1_GiB}}), + false)); + + // Dynamic partition "system". + EXPECT_CALL(dynamicControl(), GetState(S("system"))) + .Times(1) + .WillOnce(Return(DmDeviceState::ACTIVE)); + string system_device; + EXPECT_TRUE(dynamicControl().GetPartitionDevice( + "system", source(), source(), &system_device)); + EXPECT_EQ(GetDmDevice(S("system")), system_device); + + EXPECT_CALL(dynamicControl(), GetState(T("system"))) + .Times(AnyNumber()) + .WillOnce(Return(DmDeviceState::ACTIVE)); + EXPECT_CALL(dynamicControl(), + MapPartitionOnDeviceMapper( + GetSuperDevice(target()), T("system"), target(), _, _)) + .Times(AnyNumber()) + .WillRepeatedly( + Invoke([](const auto&, const auto& name, auto, auto, auto* device) { + *device = "/fake/remapped/" + name; + return true; + })); + EXPECT_TRUE(dynamicControl().GetPartitionDevice( + "system", target(), source(), &system_device)); + EXPECT_EQ("/fake/remapped/" + T("system"), system_device); + + // Static partition "bar". + EXPECT_CALL(dynamicControl(), GetState(S("bar"))).Times(0); + std::string bar_device; + EXPECT_TRUE(dynamicControl().GetPartitionDevice( + "bar", source(), source(), &bar_device)); + EXPECT_EQ(GetDevice(S("bar")), bar_device); + + EXPECT_CALL(dynamicControl(), GetState(T("bar"))).Times(0); + EXPECT_TRUE(dynamicControl().GetPartitionDevice( + "bar", target(), source(), &bar_device)); + EXPECT_EQ(GetDevice(T("bar")), bar_device); +} + INSTANTIATE_TEST_CASE_P(DynamicPartitionControlAndroidTest, DynamicPartitionControlAndroidTestP, testing::Values(TestParam{0, 1}, TestParam{1, 0})); @@ -486,4 +610,10 @@ TEST_F(DynamicPartitionControlAndroidTest, SimulatedSecondUpdate) { {"deleted", 64_MiB}})); } +TEST_F(DynamicPartitionControlAndroidTest, ApplyingToCurrentSlot) { + SetSlots({1, 1}); + EXPECT_FALSE(PreparePartitionsForUpdate({})) + << "Should not be able to apply to current slot."; +} + } // namespace chromeos_update_engine diff --git a/dynamic_partition_control_interface.h b/dynamic_partition_control_interface.h index 9c18973f..b5be2be6 100644 --- a/dynamic_partition_control_interface.h +++ b/dynamic_partition_control_interface.h @@ -22,11 +22,6 @@ #include #include -#include -#include -#include - -#include "update_engine/common/boot_control_interface.h" #include "update_engine/update_metadata.pb.h" namespace chromeos_update_engine { @@ -55,41 +50,9 @@ class DynamicPartitionControlInterface { // Return the feature flags of Virtual A/B on this device. virtual FeatureFlag GetVirtualAbFeatureFlag() = 0; - // Map logical partition on device-mapper. - // |super_device| is the device path of the physical partition ("super"). - // |target_partition_name| is the identifier used in metadata; for example, - // "vendor_a" - // |slot| is the selected slot to mount; for example, 0 for "_a". - // Returns true if mapped successfully; if so, |path| is set to the device - // path of the mapped logical partition. - virtual bool MapPartitionOnDeviceMapper( - const std::string& super_device, - const std::string& target_partition_name, - uint32_t slot, - bool force_writable, - std::string* path) = 0; - // Do necessary cleanups before destroying the object. virtual void Cleanup() = 0; - // Return true if a static partition exists at device path |path|. - virtual bool DeviceExists(const std::string& path) = 0; - - // Returns the current state of the underlying device mapper device - // with given name. - // One of INVALID, SUSPENDED or ACTIVE. - virtual android::dm::DmDeviceState GetState(const std::string& name) = 0; - - // Returns the path to the device mapper device node in '/dev' corresponding - // to 'name'. If the device does not exist, false is returned, and the path - // parameter is not set. - virtual bool GetDmDevicePathByName(const std::string& name, - std::string* path) = 0; - - // Retrieve metadata from |super_device| at slot |source_slot|. - virtual std::unique_ptr LoadMetadataBuilder( - const std::string& super_device, uint32_t source_slot) = 0; - // Prepare all partitions for an update specified in |manifest|. // This is needed before calling MapPartitionOnDeviceMapper(), otherwise the // device would be mapped in an inconsistent way. @@ -99,13 +62,6 @@ class DynamicPartitionControlInterface { const DeltaArchiveManifest& manifest, bool update) = 0; - // Return a possible location for devices listed by name. - virtual bool GetDeviceDir(std::string* path) = 0; - - // Return the name of the super partition (which stores super partition - // metadata) for a given slot. - virtual std::string GetSuperPartitionName(uint32_t slot) = 0; - virtual bool FinishUpdate() = 0; }; diff --git a/mock_dynamic_partition_control.h b/mock_dynamic_partition_control.h index 8146e0f1..67b3998b 100644 --- a/mock_dynamic_partition_control.h +++ b/mock_dynamic_partition_control.h @@ -36,17 +36,9 @@ class MockDynamicPartitionControl : public DynamicPartitionControlInterface { bool, std::string*)); MOCK_METHOD0(Cleanup, void()); - MOCK_METHOD1(DeviceExists, bool(const std::string&)); - MOCK_METHOD1(GetState, ::android::dm::DmDeviceState(const std::string&)); - MOCK_METHOD2(GetDmDevicePathByName, bool(const std::string&, std::string*)); - MOCK_METHOD2(LoadMetadataBuilder, - std::unique_ptr<::android::fs_mgr::MetadataBuilder>( - const std::string&, uint32_t)); - MOCK_METHOD1(GetDeviceDir, bool(std::string*)); MOCK_METHOD0(GetDynamicPartitionsFeatureFlag, FeatureFlag()); MOCK_METHOD4(PreparePartitionsForUpdate, bool(uint32_t, uint32_t, const DeltaArchiveManifest&, bool)); - MOCK_METHOD1(GetSuperPartitionName, std::string(uint32_t)); MOCK_METHOD0(GetVirtualAbFeatureFlag, FeatureFlag()); MOCK_METHOD0(FinishUpdate, bool()); }; From 15726b9f9cbbf4b7885cd257c80a5a9e8b92357f Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Tue, 5 Nov 2019 19:06:48 -0800 Subject: [PATCH 150/624] Move DynamicPartitionControlInterface to common/ Test: builds Change-Id: Iae63dee2e2c4f768690d27261347f56b90bdf2c5 --- boot_control_android.h | 2 +- .../dynamic_partition_control_interface.h | 6 +++--- dynamic_partition_control_android.h | 4 ++-- mock_dynamic_partition_control.h | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) rename dynamic_partition_control_interface.h => common/dynamic_partition_control_interface.h (92%) diff --git a/boot_control_android.h b/boot_control_android.h index c81f86d4..4393ae83 100644 --- a/boot_control_android.h +++ b/boot_control_android.h @@ -25,8 +25,8 @@ #include #include "update_engine/common/boot_control.h" +#include "update_engine/common/dynamic_partition_control_interface.h" #include "update_engine/dynamic_partition_control_android.h" -#include "update_engine/dynamic_partition_control_interface.h" namespace chromeos_update_engine { diff --git a/dynamic_partition_control_interface.h b/common/dynamic_partition_control_interface.h similarity index 92% rename from dynamic_partition_control_interface.h rename to common/dynamic_partition_control_interface.h index b5be2be6..b092f7a3 100644 --- a/dynamic_partition_control_interface.h +++ b/common/dynamic_partition_control_interface.h @@ -14,8 +14,8 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_DYNAMIC_PARTITION_CONTROL_INTERFACE_H_ -#define UPDATE_ENGINE_DYNAMIC_PARTITION_CONTROL_INTERFACE_H_ +#ifndef UPDATE_ENGINE_COMMON_DYNAMIC_PARTITION_CONTROL_INTERFACE_H_ +#define UPDATE_ENGINE_COMMON_DYNAMIC_PARTITION_CONTROL_INTERFACE_H_ #include @@ -67,4 +67,4 @@ class DynamicPartitionControlInterface { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_DYNAMIC_PARTITION_CONTROL_INTERFACE_H_ +#endif // UPDATE_ENGINE_COMMON_DYNAMIC_PARTITION_CONTROL_INTERFACE_H_ diff --git a/dynamic_partition_control_android.h b/dynamic_partition_control_android.h index af37398f..c1837e07 100644 --- a/dynamic_partition_control_android.h +++ b/dynamic_partition_control_android.h @@ -17,8 +17,6 @@ #ifndef UPDATE_ENGINE_DYNAMIC_PARTITION_CONTROL_ANDROID_H_ #define UPDATE_ENGINE_DYNAMIC_PARTITION_CONTROL_ANDROID_H_ -#include "update_engine/dynamic_partition_control_interface.h" - #include #include #include @@ -27,6 +25,8 @@ #include #include +#include "update_engine/common/dynamic_partition_control_interface.h" + namespace chromeos_update_engine { class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { diff --git a/mock_dynamic_partition_control.h b/mock_dynamic_partition_control.h index 67b3998b..db8e8344 100644 --- a/mock_dynamic_partition_control.h +++ b/mock_dynamic_partition_control.h @@ -22,8 +22,8 @@ #include #include "update_engine/common/boot_control_interface.h" +#include "update_engine/common/dynamic_partition_control_interface.h" #include "update_engine/dynamic_partition_control_android.h" -#include "update_engine/dynamic_partition_control_interface.h" namespace chromeos_update_engine { From daac732e42f9d3423536c226d7e0913dad9ec980 Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Thu, 7 Nov 2019 10:48:26 -0800 Subject: [PATCH 151/624] BootControl exposes DynamicPartitionControl. Add BootControlInterface::GetDynamicPartitionControl, which exposes the internal DynamicPartitionControlInterface object. BootControlStub / FakeBootControl / BootControlChromeOS uses DynamicPartitionControlStub (all functions succeeds). BootControlAndroid uses DynamicPartitionControlAndroid. GetPartitionDevice is exposed so that BootControlAndroid can use it. Follow-up CLs delete duplicated PreparePartitionsForUpdate and Cleanup from BootControlInterface so that BootControlAndroid remains a thin wrapper of the HAL (+GetPartitionDevice, which exists before dynamic partitions.) Test: update_engine_unittests Change-Id: Ifc2aa2ee8a63ef581c8ebc562ec158794ac51dfd --- Android.bp | 1 + boot_control_android.cc | 5 +++ boot_control_android.h | 1 + boot_control_chromeos.cc | 8 ++++ boot_control_chromeos.h | 5 +++ common/boot_control_interface.h | 4 ++ common/boot_control_stub.cc | 9 +++++ common/boot_control_stub.h | 7 +++- common/dynamic_partition_control_stub.cc | 49 ++++++++++++++++++++++++ common/dynamic_partition_control_stub.h | 43 +++++++++++++++++++++ common/fake_boot_control.h | 10 +++++ 11 files changed, 141 insertions(+), 1 deletion(-) create mode 100644 common/dynamic_partition_control_stub.cc create mode 100644 common/dynamic_partition_control_stub.h diff --git a/Android.bp b/Android.bp index 84d4a7ae..84186972 100644 --- a/Android.bp +++ b/Android.bp @@ -150,6 +150,7 @@ cc_library_static { "common/clock.cc", "common/constants.cc", "common/cpu_limiter.cc", + "common/dynamic_partition_control_stub.cc", "common/error_code_utils.cc", "common/file_fetcher.cc", "common/hash_calculator.cc", diff --git a/boot_control_android.cc b/boot_control_android.cc index 05e9637f..5d11e422 100644 --- a/boot_control_android.cc +++ b/boot_control_android.cc @@ -168,4 +168,9 @@ bool BootControlAndroid::PreparePartitionsForUpdate( source_slot, target_slot, manifest, update_metadata); } +DynamicPartitionControlInterface* +BootControlAndroid::GetDynamicPartitionControl() { + return dynamic_control_.get(); +} + } // namespace chromeos_update_engine diff --git a/boot_control_android.h b/boot_control_android.h index 4393ae83..0093ecb5 100644 --- a/boot_control_android.h +++ b/boot_control_android.h @@ -55,6 +55,7 @@ class BootControlAndroid : public BootControlInterface { const DeltaArchiveManifest& manifest, bool update_metadata) override; void Cleanup() override; + DynamicPartitionControlInterface* GetDynamicPartitionControl() override; private: ::android::sp<::android::hardware::boot::V1_0::IBootControl> module_; diff --git a/boot_control_chromeos.cc b/boot_control_chromeos.cc index 7e748d5e..3792e6d4 100644 --- a/boot_control_chromeos.cc +++ b/boot_control_chromeos.cc @@ -31,6 +31,7 @@ extern "C" { } #include "update_engine/common/boot_control.h" +#include "update_engine/common/dynamic_partition_control_stub.h" #include "update_engine/common/subprocess.h" #include "update_engine/common/utils.h" @@ -131,6 +132,8 @@ bool BootControlChromeOS::Init() { return false; } + dynamic_partition_control_.reset(new DynamicPartitionControlStub()); + LOG(INFO) << "Booted from slot " << current_slot_ << " (slot " << SlotName(current_slot_) << ") of " << num_slots_ << " slots present on disk " << boot_disk_name_; @@ -333,4 +336,9 @@ bool BootControlChromeOS::PreparePartitionsForUpdate( void BootControlChromeOS::Cleanup() {} +DynamicPartitionControlInterface* +BootControlChromeOS::GetDynamicPartitionControl() { + return dynamic_partition_control_.get(); +} + } // namespace chromeos_update_engine diff --git a/boot_control_chromeos.h b/boot_control_chromeos.h index 29841c91..9519fb78 100644 --- a/boot_control_chromeos.h +++ b/boot_control_chromeos.h @@ -17,12 +17,14 @@ #ifndef UPDATE_ENGINE_BOOT_CONTROL_CHROMEOS_H_ #define UPDATE_ENGINE_BOOT_CONTROL_CHROMEOS_H_ +#include #include #include #include // for FRIEND_TEST #include "update_engine/common/boot_control_interface.h" +#include "update_engine/common/dynamic_partition_control_interface.h" namespace chromeos_update_engine { @@ -54,6 +56,7 @@ class BootControlChromeOS : public BootControlInterface { const DeltaArchiveManifest& manifest, bool update_metadata) override; void Cleanup() override; + DynamicPartitionControlInterface* GetDynamicPartitionControl() override; private: friend class BootControlChromeOSTest; @@ -81,6 +84,8 @@ class BootControlChromeOS : public BootControlInterface { // The block device of the disk we booted from, without the partition number. std::string boot_disk_name_; + std::unique_ptr dynamic_partition_control_; + DISALLOW_COPY_AND_ASSIGN(BootControlChromeOS); }; diff --git a/common/boot_control_interface.h b/common/boot_control_interface.h index 9bf639a6..2f8e9e3e 100644 --- a/common/boot_control_interface.h +++ b/common/boot_control_interface.h @@ -25,6 +25,7 @@ #include #include +#include "update_engine/common/dynamic_partition_control_interface.h" #include "update_engine/update_metadata.pb.h" namespace chromeos_update_engine { @@ -94,6 +95,9 @@ class BootControlInterface { // Do necessary clean-up operations after the whole update. virtual void Cleanup() = 0; + // Return the dynamic partition control interface. + virtual DynamicPartitionControlInterface* GetDynamicPartitionControl() = 0; + // Return a human-readable slot name used for logging. static std::string SlotName(Slot slot) { if (slot == kInvalidSlot) diff --git a/common/boot_control_stub.cc b/common/boot_control_stub.cc index b10e82f5..6ae88f11 100644 --- a/common/boot_control_stub.cc +++ b/common/boot_control_stub.cc @@ -15,6 +15,7 @@ // #include "update_engine/common/boot_control_stub.h" +#include "update_engine/common/dynamic_partition_control_stub.h" #include @@ -22,6 +23,9 @@ using std::string; namespace chromeos_update_engine { +BootControlStub::BootControlStub() + : dynamic_partition_control_(new DynamicPartitionControlStub()) {} + unsigned int BootControlStub::GetNumSlots() const { return 0; } @@ -69,4 +73,9 @@ void BootControlStub::Cleanup() { LOG(ERROR) << __FUNCTION__ << " should never be called."; } +DynamicPartitionControlInterface* +BootControlStub::GetDynamicPartitionControl() { + return dynamic_partition_control_.get(); +} + } // namespace chromeos_update_engine diff --git a/common/boot_control_stub.h b/common/boot_control_stub.h index f2973a28..33078139 100644 --- a/common/boot_control_stub.h +++ b/common/boot_control_stub.h @@ -17,9 +17,11 @@ #ifndef UPDATE_ENGINE_COMMON_BOOT_CONTROL_STUB_H_ #define UPDATE_ENGINE_COMMON_BOOT_CONTROL_STUB_H_ +#include #include #include "update_engine/common/boot_control_interface.h" +#include "update_engine/common/dynamic_partition_control_interface.h" namespace chromeos_update_engine { @@ -32,7 +34,7 @@ namespace chromeos_update_engine { // implementation is in use. class BootControlStub : public BootControlInterface { public: - BootControlStub() = default; + BootControlStub(); ~BootControlStub() = default; // BootControlInterface overrides. @@ -49,8 +51,11 @@ class BootControlStub : public BootControlInterface { const DeltaArchiveManifest& manifest, bool update_metadata) override; void Cleanup() override; + DynamicPartitionControlInterface* GetDynamicPartitionControl() override; private: + std::unique_ptr dynamic_partition_control_; + DISALLOW_COPY_AND_ASSIGN(BootControlStub); }; diff --git a/common/dynamic_partition_control_stub.cc b/common/dynamic_partition_control_stub.cc new file mode 100644 index 00000000..86f75aa0 --- /dev/null +++ b/common/dynamic_partition_control_stub.cc @@ -0,0 +1,49 @@ +// +// Copyright (C) 2019 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include + +#include + +#include + +#include "update_engine/common/dynamic_partition_control_stub.h" + +namespace chromeos_update_engine { + +FeatureFlag DynamicPartitionControlStub::GetDynamicPartitionsFeatureFlag() { + return FeatureFlag(FeatureFlag::Value::NONE); +} + +FeatureFlag DynamicPartitionControlStub::GetVirtualAbFeatureFlag() { + return FeatureFlag(FeatureFlag::Value::NONE); +} + +void DynamicPartitionControlStub::Cleanup() {} + +bool DynamicPartitionControlStub::PreparePartitionsForUpdate( + uint32_t source_slot, + uint32_t target_slot, + const DeltaArchiveManifest& manifest, + bool update) { + return true; +} + +bool DynamicPartitionControlStub::FinishUpdate() { + return true; +} + +} // namespace chromeos_update_engine diff --git a/common/dynamic_partition_control_stub.h b/common/dynamic_partition_control_stub.h new file mode 100644 index 00000000..e7895dee --- /dev/null +++ b/common/dynamic_partition_control_stub.h @@ -0,0 +1,43 @@ +// +// Copyright (C) 2019 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef UPDATE_ENGINE_COMMON_DYNAMIC_PARTITION_CONTROL_STUB_H_ +#define UPDATE_ENGINE_COMMON_DYNAMIC_PARTITION_CONTROL_STUB_H_ + +#include + +#include + +#include "update_engine/common/dynamic_partition_control_interface.h" + +namespace chromeos_update_engine { + +class DynamicPartitionControlStub : public DynamicPartitionControlInterface { + public: + FeatureFlag GetDynamicPartitionsFeatureFlag() override; + FeatureFlag GetVirtualAbFeatureFlag() override; + void Cleanup() override; + bool PreparePartitionsForUpdate(uint32_t source_slot, + uint32_t target_slot, + const DeltaArchiveManifest& manifest, + bool update) override; + + bool FinishUpdate() override; +}; + +} // namespace chromeos_update_engine + +#endif // UPDATE_ENGINE_COMMON_DYNAMIC_PARTITION_CONTROL_STUB_H_ diff --git a/common/fake_boot_control.h b/common/fake_boot_control.h index 11810d10..1d6d9799 100644 --- a/common/fake_boot_control.h +++ b/common/fake_boot_control.h @@ -18,12 +18,14 @@ #define UPDATE_ENGINE_COMMON_FAKE_BOOT_CONTROL_H_ #include +#include #include #include #include #include "update_engine/common/boot_control_interface.h" +#include "update_engine/common/dynamic_partition_control_stub.h" namespace chromeos_update_engine { @@ -34,6 +36,8 @@ class FakeBootControl : public BootControlInterface { SetNumSlots(num_slots_); // The current slot should be bootable. is_bootable_[current_slot_] = true; + + dynamic_partition_control_.reset(new DynamicPartitionControlStub()); } // BootControlInterface overrides. @@ -103,6 +107,10 @@ class FakeBootControl : public BootControlInterface { is_bootable_[slot] = bootable; } + DynamicPartitionControlInterface* GetDynamicPartitionControl() { + return dynamic_partition_control_.get(); + } + private: BootControlInterface::Slot num_slots_{2}; BootControlInterface::Slot current_slot_{0}; @@ -110,6 +118,8 @@ class FakeBootControl : public BootControlInterface { std::vector is_bootable_; std::vector> devices_; + std::unique_ptr dynamic_partition_control_; + DISALLOW_COPY_AND_ASSIGN(FakeBootControl); }; From 02e2b6b80ce870f9d0fbf9c9f048ab9b11bbc51f Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Thu, 7 Nov 2019 11:00:39 -0800 Subject: [PATCH 152/624] Remove BootControlInterface::PreparePartitionsForUpdate Replace with GetDynamicPartitionControl()->PreparePartitionsForUpdate( GetCurrentSlot(), ...). Test: update_engine_unittests Change-Id: Ib03ee97a25727a9292cbb0c9aab5dbe75a2b6913 --- boot_control_android.cc | 10 ---------- boot_control_android.h | 3 --- boot_control_chromeos.cc | 5 ----- boot_control_chromeos.h | 3 --- common/boot_control_interface.h | 15 ++++----------- common/boot_control_stub.cc | 6 ------ common/boot_control_stub.h | 3 --- common/fake_boot_control.h | 6 ------ payload_consumer/delta_performer.cc | 7 +++++-- 9 files changed, 9 insertions(+), 49 deletions(-) diff --git a/boot_control_android.cc b/boot_control_android.cc index 5d11e422..43b2c594 100644 --- a/boot_control_android.cc +++ b/boot_control_android.cc @@ -158,16 +158,6 @@ bool BootControlAndroid::MarkBootSuccessfulAsync( brillo::MessageLoop::kTaskIdNull; } -bool BootControlAndroid::PreparePartitionsForUpdate( - Slot target_slot, - const DeltaArchiveManifest& manifest, - bool update_metadata) { - - auto source_slot = GetCurrentSlot(); - return dynamic_control_->PreparePartitionsForUpdate( - source_slot, target_slot, manifest, update_metadata); -} - DynamicPartitionControlInterface* BootControlAndroid::GetDynamicPartitionControl() { return dynamic_control_.get(); diff --git a/boot_control_android.h b/boot_control_android.h index 0093ecb5..5de9fff4 100644 --- a/boot_control_android.h +++ b/boot_control_android.h @@ -51,9 +51,6 @@ class BootControlAndroid : public BootControlInterface { bool MarkSlotUnbootable(BootControlInterface::Slot slot) override; bool SetActiveBootSlot(BootControlInterface::Slot slot) override; bool MarkBootSuccessfulAsync(base::Callback callback) override; - bool PreparePartitionsForUpdate(Slot slot, - const DeltaArchiveManifest& manifest, - bool update_metadata) override; void Cleanup() override; DynamicPartitionControlInterface* GetDynamicPartitionControl() override; diff --git a/boot_control_chromeos.cc b/boot_control_chromeos.cc index 3792e6d4..6f50176c 100644 --- a/boot_control_chromeos.cc +++ b/boot_control_chromeos.cc @@ -329,11 +329,6 @@ int BootControlChromeOS::GetPartitionNumber( return -1; } -bool BootControlChromeOS::PreparePartitionsForUpdate( - Slot slot, const DeltaArchiveManifest& manifest, bool update_metadata) { - return true; -} - void BootControlChromeOS::Cleanup() {} DynamicPartitionControlInterface* diff --git a/boot_control_chromeos.h b/boot_control_chromeos.h index 9519fb78..7fe45702 100644 --- a/boot_control_chromeos.h +++ b/boot_control_chromeos.h @@ -52,9 +52,6 @@ class BootControlChromeOS : public BootControlInterface { bool MarkSlotUnbootable(BootControlInterface::Slot slot) override; bool SetActiveBootSlot(BootControlInterface::Slot slot) override; bool MarkBootSuccessfulAsync(base::Callback callback) override; - bool PreparePartitionsForUpdate(Slot slot, - const DeltaArchiveManifest& manifest, - bool update_metadata) override; void Cleanup() override; DynamicPartitionControlInterface* GetDynamicPartitionControl() override; diff --git a/common/boot_control_interface.h b/common/boot_control_interface.h index 2f8e9e3e..1242393a 100644 --- a/common/boot_control_interface.h +++ b/common/boot_control_interface.h @@ -57,9 +57,10 @@ class BootControlInterface { // The |slot| number must be between 0 and GetNumSlots() - 1 and the // |partition_name| is a platform-specific name that identifies a partition on // every slot. In order to access the dynamic partitions in the target slot, - // PreparePartitionsForUpdate() must be called (once per payload) prior to - // calling this function. On success, returns true and stores the block device - // in |device|. + // GetDynamicPartitionControl()->PreparePartitionsForUpdate() must be called + // (with |update| == true for the first time for a payload, and |false| for + // for the rest of the times) prior to calling this function. On success, + // returns true and stores the block device in |device|. virtual bool GetPartitionDevice(const std::string& partition_name, Slot slot, std::string* device) const = 0; @@ -84,14 +85,6 @@ class BootControlInterface { // of the operation. virtual bool MarkBootSuccessfulAsync(base::Callback callback) = 0; - // Initializes the metadata of the underlying partitions for a given |slot| - // and sets up the states for accessing dynamic partitions. - // Metadata will be written to the specified |slot| if - // |update_metadata| is set. - virtual bool PreparePartitionsForUpdate(Slot slot, - const DeltaArchiveManifest& manifest, - bool update_metadata) = 0; - // Do necessary clean-up operations after the whole update. virtual void Cleanup() = 0; diff --git a/common/boot_control_stub.cc b/common/boot_control_stub.cc index 6ae88f11..3d2b6d0a 100644 --- a/common/boot_control_stub.cc +++ b/common/boot_control_stub.cc @@ -63,12 +63,6 @@ bool BootControlStub::MarkBootSuccessfulAsync( return false; } -bool BootControlStub::PreparePartitionsForUpdate( - Slot slot, const DeltaArchiveManifest& manifest, bool update_metadata) { - LOG(ERROR) << __FUNCTION__ << " should never be called."; - return false; -} - void BootControlStub::Cleanup() { LOG(ERROR) << __FUNCTION__ << " should never be called."; } diff --git a/common/boot_control_stub.h b/common/boot_control_stub.h index 33078139..24e5f056 100644 --- a/common/boot_control_stub.h +++ b/common/boot_control_stub.h @@ -47,9 +47,6 @@ class BootControlStub : public BootControlInterface { bool MarkSlotUnbootable(BootControlInterface::Slot slot) override; bool SetActiveBootSlot(BootControlInterface::Slot slot) override; bool MarkBootSuccessfulAsync(base::Callback callback) override; - bool PreparePartitionsForUpdate(Slot slot, - const DeltaArchiveManifest& manifest, - bool update_metadata) override; void Cleanup() override; DynamicPartitionControlInterface* GetDynamicPartitionControl() override; diff --git a/common/fake_boot_control.h b/common/fake_boot_control.h index 1d6d9799..d621aaee 100644 --- a/common/fake_boot_control.h +++ b/common/fake_boot_control.h @@ -78,12 +78,6 @@ class FakeBootControl : public BootControlInterface { return true; } - bool PreparePartitionsForUpdate(Slot slot, - const DeltaArchiveManifest& manifest, - bool update_metadata) override { - return true; - } - void Cleanup() override {} // Setters diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc index 8b3f61cf..f9244a93 100644 --- a/payload_consumer/delta_performer.cc +++ b/payload_consumer/delta_performer.cc @@ -947,8 +947,11 @@ bool DeltaPerformer::ParseManifestPartitions(ErrorCode* error) { bool DeltaPerformer::PreparePartitionsForUpdate() { bool metadata_updated = false; prefs_->GetBoolean(kPrefsDynamicPartitionMetadataUpdated, &metadata_updated); - if (!boot_control_->PreparePartitionsForUpdate( - install_plan_->target_slot, manifest_, !metadata_updated)) { + if (!boot_control_->GetDynamicPartitionControl()->PreparePartitionsForUpdate( + boot_control_->GetCurrentSlot(), + install_plan_->target_slot, + manifest_, + !metadata_updated)) { LOG(ERROR) << "Unable to initialize partition metadata for slot " << BootControlInterface::SlotName(install_plan_->target_slot); return false; From 9194ce85caaf49d0ed68b305212f85226d18331f Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Thu, 7 Nov 2019 11:03:42 -0800 Subject: [PATCH 153/624] Remove BootControlInterface::Cleanup It is just a wrapper around GetDynamicPartitionControl()->Cleanup(). Delete it and let callers call it instead. Test: update_engine_unittests Change-Id: I068228a43dac122828c28bbc49a0f6f77e5b6ff2 --- boot_control_android.cc | 4 ---- boot_control_android.h | 1 - boot_control_chromeos.cc | 2 -- boot_control_chromeos.h | 1 - common/boot_control_interface.h | 3 --- common/boot_control_stub.cc | 4 ---- common/boot_control_stub.h | 1 - common/fake_boot_control.h | 2 -- update_attempter_android.cc | 2 +- 9 files changed, 1 insertion(+), 19 deletions(-) diff --git a/boot_control_android.cc b/boot_control_android.cc index 43b2c594..9f736b7e 100644 --- a/boot_control_android.cc +++ b/boot_control_android.cc @@ -74,10 +74,6 @@ bool BootControlAndroid::Init() { return true; } -void BootControlAndroid::Cleanup() { - dynamic_control_->Cleanup(); -} - unsigned int BootControlAndroid::GetNumSlots() const { return module_->getNumberSlots(); } diff --git a/boot_control_android.h b/boot_control_android.h index 5de9fff4..c9091349 100644 --- a/boot_control_android.h +++ b/boot_control_android.h @@ -51,7 +51,6 @@ class BootControlAndroid : public BootControlInterface { bool MarkSlotUnbootable(BootControlInterface::Slot slot) override; bool SetActiveBootSlot(BootControlInterface::Slot slot) override; bool MarkBootSuccessfulAsync(base::Callback callback) override; - void Cleanup() override; DynamicPartitionControlInterface* GetDynamicPartitionControl() override; private: diff --git a/boot_control_chromeos.cc b/boot_control_chromeos.cc index 6f50176c..e972d411 100644 --- a/boot_control_chromeos.cc +++ b/boot_control_chromeos.cc @@ -329,8 +329,6 @@ int BootControlChromeOS::GetPartitionNumber( return -1; } -void BootControlChromeOS::Cleanup() {} - DynamicPartitionControlInterface* BootControlChromeOS::GetDynamicPartitionControl() { return dynamic_partition_control_.get(); diff --git a/boot_control_chromeos.h b/boot_control_chromeos.h index 7fe45702..10454be1 100644 --- a/boot_control_chromeos.h +++ b/boot_control_chromeos.h @@ -52,7 +52,6 @@ class BootControlChromeOS : public BootControlInterface { bool MarkSlotUnbootable(BootControlInterface::Slot slot) override; bool SetActiveBootSlot(BootControlInterface::Slot slot) override; bool MarkBootSuccessfulAsync(base::Callback callback) override; - void Cleanup() override; DynamicPartitionControlInterface* GetDynamicPartitionControl() override; private: diff --git a/common/boot_control_interface.h b/common/boot_control_interface.h index 1242393a..41fe4eaf 100644 --- a/common/boot_control_interface.h +++ b/common/boot_control_interface.h @@ -85,9 +85,6 @@ class BootControlInterface { // of the operation. virtual bool MarkBootSuccessfulAsync(base::Callback callback) = 0; - // Do necessary clean-up operations after the whole update. - virtual void Cleanup() = 0; - // Return the dynamic partition control interface. virtual DynamicPartitionControlInterface* GetDynamicPartitionControl() = 0; diff --git a/common/boot_control_stub.cc b/common/boot_control_stub.cc index 3d2b6d0a..d198e9d4 100644 --- a/common/boot_control_stub.cc +++ b/common/boot_control_stub.cc @@ -63,10 +63,6 @@ bool BootControlStub::MarkBootSuccessfulAsync( return false; } -void BootControlStub::Cleanup() { - LOG(ERROR) << __FUNCTION__ << " should never be called."; -} - DynamicPartitionControlInterface* BootControlStub::GetDynamicPartitionControl() { return dynamic_partition_control_.get(); diff --git a/common/boot_control_stub.h b/common/boot_control_stub.h index 24e5f056..1dfd08bc 100644 --- a/common/boot_control_stub.h +++ b/common/boot_control_stub.h @@ -47,7 +47,6 @@ class BootControlStub : public BootControlInterface { bool MarkSlotUnbootable(BootControlInterface::Slot slot) override; bool SetActiveBootSlot(BootControlInterface::Slot slot) override; bool MarkBootSuccessfulAsync(base::Callback callback) override; - void Cleanup() override; DynamicPartitionControlInterface* GetDynamicPartitionControl() override; private: diff --git a/common/fake_boot_control.h b/common/fake_boot_control.h index d621aaee..e031fc6f 100644 --- a/common/fake_boot_control.h +++ b/common/fake_boot_control.h @@ -78,8 +78,6 @@ class FakeBootControl : public BootControlInterface { return true; } - void Cleanup() override {} - // Setters void SetNumSlots(unsigned int num_slots) { num_slots_ = num_slots; diff --git a/update_attempter_android.cc b/update_attempter_android.cc index 5bffc426..b76e05b5 100644 --- a/update_attempter_android.cc +++ b/update_attempter_android.cc @@ -597,7 +597,7 @@ void UpdateAttempterAndroid::TerminateUpdateAndNotify(ErrorCode error_code) { return; } - boot_control_->Cleanup(); + boot_control_->GetDynamicPartitionControl()->Cleanup(); download_progress_ = 0; UpdateStatus new_status = From e85836220938cdc39d96d75bbffa520e985faf8d Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Thu, 7 Nov 2019 11:36:31 -0800 Subject: [PATCH 154/624] Don't FinishUpdate() in BootControl*::SetActiveBootSlot() Instead, callers call GetDynamicPartitionControl()->FinishUpdate() directly. This is one more step forward for keeping BootControlAndroid a thin wrapper over the boot control HAL. Test: builds Test: update_engine_unittests Change-Id: I95773ed7f230d1a666d5391bec7783e42b90bf64 --- boot_control_android.cc | 4 ---- payload_consumer/postinstall_runner_action.cc | 3 ++- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/boot_control_android.cc b/boot_control_android.cc index 9f736b7e..429de6aa 100644 --- a/boot_control_android.cc +++ b/boot_control_android.cc @@ -120,10 +120,6 @@ bool BootControlAndroid::MarkSlotUnbootable(Slot slot) { } bool BootControlAndroid::SetActiveBootSlot(Slot slot) { - if (slot != GetCurrentSlot() && !dynamic_control_->FinishUpdate()) { - return false; - } - CommandResult result; auto ret = module_->setActiveBootSlot(slot, StoreResultCallback(&result)); if (!ret.isOk()) { diff --git a/payload_consumer/postinstall_runner_action.cc b/payload_consumer/postinstall_runner_action.cc index a782b8f2..13676494 100644 --- a/payload_consumer/postinstall_runner_action.cc +++ b/payload_consumer/postinstall_runner_action.cc @@ -339,7 +339,8 @@ void PostinstallRunnerAction::CompletePostinstall(ErrorCode error_code) { // steps succeeded. if (error_code == ErrorCode::kSuccess) { if (install_plan_.switch_slot_on_reboot) { - if (!boot_control_->SetActiveBootSlot(install_plan_.target_slot)) { + if (!boot_control_->GetDynamicPartitionControl()->FinishUpdate() || + !boot_control_->SetActiveBootSlot(install_plan_.target_slot)) { error_code = ErrorCode::kPostinstallRunnerError; } } else { From d6aa91fbaaa175f38d7e14e6e84db9d9fe25d687 Mon Sep 17 00:00:00 2001 From: Tianjie Xu Date: Thu, 14 Nov 2019 11:55:10 -0800 Subject: [PATCH 155/624] Schedule a warm_reset after an OTA The property is set to inform kernel to do a warm_reset on the next reboot. This is useful to persist the logs to debug device boot failures. More details in http://go/rvc-ota-persist-logs. Bug: 143489994 Test: check the property sets after an update Change-Id: I776a6996e3b3ef77514da0acc91585d28e5cdf15 --- common/fake_hardware.h | 3 +++ common/hardware_interface.h | 4 ++++ hardware_android.cc | 7 +++++++ hardware_android.h | 1 + hardware_chromeos.cc | 2 ++ hardware_chromeos.h | 1 + payload_consumer/postinstall_runner_action.cc | 3 +++ update_attempter_android.cc | 3 +++ 8 files changed, 24 insertions(+) diff --git a/common/fake_hardware.h b/common/fake_hardware.h index 8da5326c..0b232daa 100644 --- a/common/fake_hardware.h +++ b/common/fake_hardware.h @@ -191,6 +191,8 @@ class FakeHardware : public HardwareInterface { build_timestamp_ = build_timestamp; } + void SetWarmReset(bool warm_reset) { warm_reset_ = warm_reset; } + // Getters to verify state. int GetMaxKernelKeyRollforward() const { return kernel_max_rollforward_; } @@ -218,6 +220,7 @@ class FakeHardware : public HardwareInterface { bool is_rollback_powerwash_{false}; int64_t build_timestamp_{0}; bool first_active_omaha_ping_sent_{false}; + bool warm_reset_{false}; DISALLOW_COPY_AND_ASSIGN(FakeHardware); }; diff --git a/common/hardware_interface.h b/common/hardware_interface.h index 4a64c3e8..d92a6fcc 100644 --- a/common/hardware_interface.h +++ b/common/hardware_interface.h @@ -134,6 +134,10 @@ class HardwareInterface { // Persist the fact that first active ping was sent to omaha and returns false // if failed to persist it. virtual bool SetFirstActiveOmahaPingSent() = 0; + + // If |warm_reset| is true, sets the warm reset to indicate a warm reset is + // needed on the next reboot. Otherwise, clears the flag. + virtual void SetWarmReset(bool warm_reset) = 0; }; } // namespace chromeos_update_engine diff --git a/hardware_android.cc b/hardware_android.cc index 9611ba68..068468b8 100644 --- a/hardware_android.cc +++ b/hardware_android.cc @@ -210,4 +210,11 @@ bool HardwareAndroid::SetFirstActiveOmahaPingSent() { return true; } +void HardwareAndroid::SetWarmReset(bool warm_reset) { + constexpr char warm_reset_prop[] = "ota.warm_reset"; + if (!android::base::SetProperty(warm_reset_prop, warm_reset ? "1" : "0")) { + LOG(WARNING) << "Failed to set prop " << warm_reset_prop; + } +} + } // namespace chromeos_update_engine diff --git a/hardware_android.h b/hardware_android.h index 2a8f6692..145a936d 100644 --- a/hardware_android.h +++ b/hardware_android.h @@ -56,6 +56,7 @@ class HardwareAndroid final : public HardwareInterface { bool AllowDowngrade() const override; bool GetFirstActiveOmahaPingSent() const override; bool SetFirstActiveOmahaPingSent() override; + void SetWarmReset(bool warm_reset) override; private: DISALLOW_COPY_AND_ASSIGN(HardwareAndroid); diff --git a/hardware_chromeos.cc b/hardware_chromeos.cc index 8ef05b2e..a49375e6 100644 --- a/hardware_chromeos.cc +++ b/hardware_chromeos.cc @@ -333,4 +333,6 @@ bool HardwareChromeOS::SetFirstActiveOmahaPingSent() { return true; } +void HardwareChromeOS::SetWarmReset(bool warm_reset) {} + } // namespace chromeos_update_engine diff --git a/hardware_chromeos.h b/hardware_chromeos.h index 57be3b03..2bea9896 100644 --- a/hardware_chromeos.h +++ b/hardware_chromeos.h @@ -61,6 +61,7 @@ class HardwareChromeOS final : public HardwareInterface { bool AllowDowngrade() const override { return false; } bool GetFirstActiveOmahaPingSent() const override; bool SetFirstActiveOmahaPingSent() override; + void SetWarmReset(bool warm_reset) override; private: friend class HardwareChromeOSTest; diff --git a/payload_consumer/postinstall_runner_action.cc b/payload_consumer/postinstall_runner_action.cc index 13676494..c0c3956a 100644 --- a/payload_consumer/postinstall_runner_action.cc +++ b/payload_consumer/postinstall_runner_action.cc @@ -342,6 +342,9 @@ void PostinstallRunnerAction::CompletePostinstall(ErrorCode error_code) { if (!boot_control_->GetDynamicPartitionControl()->FinishUpdate() || !boot_control_->SetActiveBootSlot(install_plan_.target_slot)) { error_code = ErrorCode::kPostinstallRunnerError; + } else { + // Schedules warm reset on next reboot, ignores the error. + hardware_->SetWarmReset(true); } } else { error_code = ErrorCode::kUpdatedButNotActive; diff --git a/update_attempter_android.cc b/update_attempter_android.cc index b76e05b5..bc97a111 100644 --- a/update_attempter_android.cc +++ b/update_attempter_android.cc @@ -351,6 +351,9 @@ bool UpdateAttempterAndroid::ResetStatus(brillo::ErrorPtr* error) { if (!boot_control_->MarkBootSuccessfulAsync(Bind([](bool successful) {}))) ret_value = false; + // Resets the warm reset property since we won't switch the slot. + hardware_->SetWarmReset(false); + if (!ret_value) { return LogAndSetError( error, FROM_HERE, "Failed to reset the status to "); From d04ca0c5cc9e4507301be355fd3bd86b871b05c4 Mon Sep 17 00:00:00 2001 From: Andrew Lassalle Date: Mon, 18 Nov 2019 11:33:57 -0800 Subject: [PATCH 156/624] update_engine: Fix delta_performer_fuzzer -Set the flag hash_checks_mandatory to true in the instance of InstallPlan in the fuzzer. This allows the delta performer to perfom aditional checks which prevent incosistencies between the manifest size in the payload and the one reported by Omaha. -Stop the fuzzer test if the Write function returns false. -Fix a few minor typos. BUG=chromium:1020621 TEST=cros_fuzz reproduce clusterfuzz-testcase-minimized-update_engine_delta_performer_fuzzer -4857426773671936 Change-Id: I43ca72a35d52d3b88453eb516ee05d64feadc764 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/1919716 Auto-Submit: Andrew Lassalle Reviewed-by: Amin Hassani Tested-by: Andrew Lassalle Commit-Queue: Andrew Lassalle --- README.md | 18 +++++++++--------- payload_consumer/delta_performer_fuzzer.cc | 4 +++- update_metadata.proto | 2 +- 3 files changed, 13 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index 0c769267..82642f72 100644 --- a/README.md +++ b/README.md @@ -187,7 +187,7 @@ Non-interactive updates are updates that are scheduled periodically by the update engine and happen in the background. Interactive updates, on the other hand, happen when a user specifically requests an update check (e.g. by clicking on “Check For Update” button in Chrome OS’s About page). Depending on the update -servers policies, interactive updates have higher priority than non-interactive +server's policies, interactive updates have higher priority than non-interactive updates (by carrying marker hints). They may decide to not provide an update if they have busy server load, etc. There are other internal differences between these two types of updates too. For example, interactive updates try to install @@ -237,12 +237,12 @@ system reboots. The latest active log is symlinked to The update payload generation is the process of converting a set of partitions/files into a format that is both understandable by the updater client -(especially if it's much older versions) and is securely verifiable. This +(especially if it's a much older version) and is securely verifiable. This process involves breaking the input partitions into smaller components and compressing them in order to help with network bandwidth when downloading the payloads. -For each generated payload, there is corresponding properties file which +For each generated payload, there is a corresponding properties file which contains the metadata information of the payload in JSON format. Normally the file is located in the same location as the generated payload and its file name is the same as the payload file name plus `.json` @@ -389,11 +389,11 @@ private keys. ## update_payload Scripts -[update_payload] contains a set of python scripts mostly to validate payload -generation and application. We normally test the update payloads using an actual -device (live tests). [`brillo_update_payload`] script can be used to generate -and test applying of a payload on a host device machine. These tests can be -viewed as dynamic tests without the need for an actual device. Other +[update_payload] contains a set of python scripts used mostly to validate +payload generation and application. We normally test the update payloads using +an actual device (live tests). [`brillo_update_payload`] script can be used to +generate and test applying of a payload on a host device machine. These tests +can be viewed as dynamic tests without the need for an actual device. Other `update_payload` scripts (like [`check_update_payload`]) can be used to statically check that a payload is in the correct state and its application works correctly. These scripts actually apply the payload statically without @@ -532,7 +532,7 @@ identify different update parameters like the updater server (Omaha) URL, the current channel, etc. However, to override any of these parameters, create the file `/mnt/stateful_partition/etc/lsb-release` with desired customized parameters. For example, this can be used to point to a developer version of the -update server and allow the update_engine to schedule a periodic update form +update server and allow the update_engine to schedule a periodic update from that specific server. If you have some changes in the protocol that communicates with Omaha, but you diff --git a/payload_consumer/delta_performer_fuzzer.cc b/payload_consumer/delta_performer_fuzzer.cc index 53b168aa..73082c4c 100644 --- a/payload_consumer/delta_performer_fuzzer.cc +++ b/payload_consumer/delta_performer_fuzzer.cc @@ -60,6 +60,7 @@ void FuzzDeltaPerformer(const uint8_t* data, size_t size) { .target_path = "/dev/null", .target_size = 4096, }}, + .hash_checks_mandatory = true, }; InstallPlan::Payload payload{ @@ -81,7 +82,8 @@ void FuzzDeltaPerformer(const uint8_t* data, size_t size) { do { auto chunk_size = data_provider.ConsumeIntegralInRange(0, 100); auto data = data_provider.ConsumeBytes(chunk_size); - performer.Write(data.data(), data.size()); + if (!performer.Write(data.data(), data.size())) + break; } while (data_provider.remaining_bytes() > 0); } diff --git a/update_metadata.proto b/update_metadata.proto index 40db6785..3d136cad 100644 --- a/update_metadata.proto +++ b/update_metadata.proto @@ -73,7 +73,7 @@ // new partition. // - ZERO: Write zeros to the destination dst_extents. // - DISCARD: Discard the destination dst_extents blocks on the physical medium. -// the data read from those block is undefined. +// the data read from those blocks is undefined. // - REPLACE_XZ: Replace the dst_extents with the contents of the attached // xz file after decompression. The xz file should only use crc32 or no crc at // all to be compatible with xz-embedded. From 165843ca10908d7bd79582829a5ee51b098685e6 Mon Sep 17 00:00:00 2001 From: Andrew Lassalle Date: Tue, 5 Nov 2019 13:30:34 -0800 Subject: [PATCH 157/624] update_payload: Port scripts to python3 Update the update_payload scripts to be compatible with python2 and python3. Python2 compatibility is needed since the repo is shared with Android. BUG=chromium:1011631 TEST=Executed aosp/system/update_engine/scripts/run_unittests and cros_generate_update_payload Cq-Depend: chromium:1904837, chromium:1911499 Change-Id: Ie450b80b5f7550051b38d320173ccc0c915f65e7 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/1904310 Commit-Queue: Andrew Lassalle Tested-by: Andrew Lassalle Reviewed-by: Mike Frysinger Reviewed-by: Amin Hassani Auto-Submit: Andrew Lassalle --- scripts/blockdiff.py | 5 +- scripts/paycheck.py | 10 +- scripts/payload_info.py | 21 +- scripts/payload_info_unittest.py | 40 +++- scripts/update_device.py | 11 +- scripts/update_payload/__init__.py | 2 + scripts/update_payload/applier.py | 11 +- scripts/update_payload/checker.py | 21 +- scripts/update_payload/checker_unittest.py | 209 +++++++++++------- scripts/update_payload/common.py | 14 +- scripts/update_payload/format_utils.py | 8 +- .../update_payload/format_utils_unittest.py | 7 +- scripts/update_payload/histogram.py | 5 +- scripts/update_payload/histogram_unittest.py | 7 +- scripts/update_payload/payload.py | 4 +- scripts/update_payload/test_utils.py | 9 +- 16 files changed, 242 insertions(+), 142 deletions(-) diff --git a/scripts/blockdiff.py b/scripts/blockdiff.py index 5793def7..95893cf4 100755 --- a/scripts/blockdiff.py +++ b/scripts/blockdiff.py @@ -1,4 +1,4 @@ -#!/usr/bin/python2 +#!/usr/bin/env python # # Copyright (C) 2013 The Android Open Source Project # @@ -17,6 +17,7 @@ """Block diff utility.""" +from __future__ import absolute_import from __future__ import print_function # pylint: disable=import-error @@ -46,7 +47,7 @@ def BlockDiff(block_size, file1, file2, name1, name2, max_length=-1): """ if max_length < 0: - max_length = sys.maxint + max_length = sys.maxsize diff_list = [] num_blocks = extent_start = extent_length = 0 while max_length or extent_length: diff --git a/scripts/paycheck.py b/scripts/paycheck.py index 875b00f7..35877505 100755 --- a/scripts/paycheck.py +++ b/scripts/paycheck.py @@ -1,4 +1,4 @@ -#!/usr/bin/python2 +#!/usr/bin/env python # # Copyright (C) 2013 The Android Open Source Project # @@ -17,6 +17,7 @@ """Command-line tool for checking and applying Chrome OS update payloads.""" +from __future__ import absolute_import from __future__ import print_function # pylint: disable=import-error @@ -26,13 +27,14 @@ import sys import tempfile -from update_payload import common +from six.moves import zip from update_payload import error + lib_dir = os.path.join(os.path.dirname(__file__), 'lib') if os.path.exists(lib_dir) and os.path.isdir(lib_dir): sys.path.insert(1, lib_dir) -import update_payload +import update_payload # pylint: disable=wrong-import-position _TYPE_FULL = 'full' @@ -287,7 +289,7 @@ def main(argv): # files are created as temp files and will be deleted upon close(). for handle in file_handles: handle.close() - except error.PayloadError, e: + except error.PayloadError as e: sys.stderr.write('Error: %s\n' % e) return 1 diff --git a/scripts/payload_info.py b/scripts/payload_info.py index bb7f8a41..965bb76f 100755 --- a/scripts/payload_info.py +++ b/scripts/payload_info.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (C) 2015 The Android Open Source Project @@ -18,15 +18,17 @@ """payload_info: Show information about an update payload.""" +from __future__ import absolute_import from __future__ import print_function import argparse -import itertools import sys import textwrap +from six.moves import range import update_payload + MAJOR_PAYLOAD_VERSION_BRILLO = 2 def DisplayValue(key, value): @@ -40,12 +42,12 @@ def DisplayValue(key, value): def DisplayHexData(data, indent=0): """Print out binary data as a hex values.""" for off in range(0, len(data), 16): - chunk = data[off:off + 16] + chunk = bytearray(data[off:off + 16]) print(' ' * indent + - ' '.join('%.2x' % ord(c) for c in chunk) + + ' '.join('%.2x' % c for c in chunk) + ' ' * (16 - len(chunk)) + ' | ' + - ''.join(c if 32 <= ord(c) < 127 else '.' for c in chunk)) + ''.join(chr(c) if 32 <= c < 127 else '.' for c in chunk)) class PayloadCommand(object): @@ -144,7 +146,7 @@ def _DisplayExtents(extents, name): op_dict = update_payload.common.OpType.NAMES print('%s:' % name) - for op, op_count in itertools.izip(operations, itertools.count()): + for op_count, op in enumerate(operations): print(' %d: %s' % (op_count, op_dict[op.type])) if op.HasField('data_offset'): print(' Data offset: %s' % op.data_offset) @@ -178,8 +180,8 @@ def _GetStats(self, manifest): last_ext = curr_ext # Old and new partitions are read once during verification. - read_blocks += partition.old_partition_info.size / manifest.block_size - read_blocks += partition.new_partition_info.size / manifest.block_size + read_blocks += partition.old_partition_info.size // manifest.block_size + read_blocks += partition.new_partition_info.size // manifest.block_size stats = {'read_blocks': read_blocks, 'written_blocks': written_blocks, @@ -212,7 +214,7 @@ def Run(self): def main(): parser = argparse.ArgumentParser( description='Show information about an update payload.') - parser.add_argument('payload_file', type=file, + parser.add_argument('payload_file', type=argparse.FileType('rb'), help='The update payload file.') parser.add_argument('--list_ops', default=False, action='store_true', help='List the install operations and their extents.') @@ -224,5 +226,6 @@ def main(): PayloadCommand(args).Run() + if __name__ == '__main__': sys.exit(main()) diff --git a/scripts/payload_info_unittest.py b/scripts/payload_info_unittest.py index bf9f60a1..07bb679d 100755 --- a/scripts/payload_info_unittest.py +++ b/scripts/payload_info_unittest.py @@ -1,4 +1,4 @@ -#!/usr/bin/python2 +#!/usr/bin/env python # # Copyright (C) 2015 The Android Open Source Project # @@ -17,14 +17,19 @@ """Unit testing payload_info.py.""" +# Disable check for function names to avoid errors based on old code +# pylint: disable-msg=invalid-name + +from __future__ import absolute_import from __future__ import print_function -import StringIO import sys import unittest from contextlib import contextmanager +from six.moves import StringIO + import mock # pylint: disable=import-error import payload_info @@ -32,9 +37,11 @@ from update_payload import update_metadata_pb2 + class FakePayloadError(Exception): """A generic error when using the FakePayload.""" + class FakeOption(object): """Fake options object for testing.""" @@ -42,11 +49,12 @@ def __init__(self, **kwargs): self.list_ops = False self.stats = False self.signatures = False - for key, val in kwargs.iteritems(): + for key, val in kwargs.items(): setattr(self, key, val) if not hasattr(self, 'payload_file'): self.payload_file = None + class FakeOp(object): """Fake manifest operation for testing.""" @@ -54,23 +62,26 @@ def __init__(self, src_extents, dst_extents, op_type, **kwargs): self.src_extents = src_extents self.dst_extents = dst_extents self.type = op_type - for key, val in kwargs.iteritems(): + for key, val in kwargs.items(): setattr(self, key, val) def HasField(self, field): return hasattr(self, field) + class FakeExtent(object): """Fake Extent for testing.""" def __init__(self, start_block, num_blocks): self.start_block = start_block self.num_blocks = num_blocks + class FakePartitionInfo(object): """Fake PartitionInfo for testing.""" def __init__(self, size): self.size = size + class FakePartition(object): """Fake PartitionUpdate field for testing.""" @@ -80,6 +91,7 @@ def __init__(self, partition_name, operations, old_size, new_size): self.old_partition_info = FakePartitionInfo(old_size) self.new_partition_info = FakePartitionInfo(new_size) + class FakeManifest(object): """Fake manifest for testing.""" @@ -94,7 +106,7 @@ def __init__(self): ], 1 * 4096, 3 * 4096), FakePartition(update_payload.common.KERNEL, [FakeOp([FakeExtent(1, 1)], - [FakeExtent(x, x) for x in xrange(20)], + [FakeExtent(x, x) for x in range(20)], update_payload.common.OpType.SOURCE_COPY, src_length=4096) ], 2 * 4096, 4 * 4096), @@ -108,6 +120,7 @@ def HasField(self, field_name): """Fake HasField method based on the python members.""" return hasattr(self, field_name) and getattr(self, field_name) is not None + class FakeHeader(object): """Fake payload header for testing.""" @@ -120,6 +133,7 @@ def __init__(self, manifest_len, metadata_signature_len): def size(self): return 24 + class FakePayload(object): """Fake payload for testing.""" @@ -156,7 +170,7 @@ def ReadDataBlob(self, offset, length): def _AddSignatureToProto(proto, **kwargs): """Add a new Signature element to the passed proto.""" new_signature = proto.signatures.add() - for key, val in kwargs.iteritems(): + for key, val in kwargs.items(): setattr(new_signature, key, val) def AddPayloadSignature(self, **kwargs): @@ -174,6 +188,7 @@ def AddMetadataSignature(self, **kwargs): self._header.metadata_signature_len = len(blob) self._blobs[-len(blob)] = blob + class PayloadCommandTest(unittest.TestCase): """Test class for our PayloadCommand class.""" @@ -182,7 +197,7 @@ def OutputCapturer(self): """A tool for capturing the sys.stdout""" stdout = sys.stdout try: - sys.stdout = StringIO.StringIO() + sys.stdout = StringIO() yield sys.stdout finally: sys.stdout = stdout @@ -196,13 +211,13 @@ def TestCommand(self, payload_cmd, payload, expected_out): with mock.patch.object(update_payload, 'Payload', return_value=payload), \ self.OutputCapturer() as output: payload_cmd.Run() - self.assertEquals(output.getvalue(), expected_out) + self.assertEqual(output.getvalue(), expected_out) def testDisplayValue(self): """Verify that DisplayValue prints what we expect.""" with self.OutputCapturer() as output: payload_info.DisplayValue('key', 'value') - self.assertEquals(output.getvalue(), 'key: value\n') + self.assertEqual(output.getvalue(), 'key: value\n') def testRun(self): """Verify that Run parses and displays the payload like we expect.""" @@ -288,9 +303,9 @@ def testSignatures(self): FakeOption(action='show', signatures=True)) payload = FakePayload() payload.AddPayloadSignature(version=1, - data='12345678abcdefgh\x00\x01\x02\x03') - payload.AddPayloadSignature(data='I am a signature so access is yes.') - payload.AddMetadataSignature(data='\x00\x0a\x0c') + data=b'12345678abcdefgh\x00\x01\x02\x03') + payload.AddPayloadSignature(data=b'I am a signature so access is yes.') + payload.AddMetadataSignature(data=b'\x00\x0a\x0c') expected_out = """Payload version: 2 Manifest length: 222 Number of partitions: 2 @@ -314,5 +329,6 @@ def testSignatures(self): """ self.TestCommand(payload_cmd, payload, expected_out) + if __name__ == '__main__': unittest.main() diff --git a/scripts/update_device.py b/scripts/update_device.py index 5c19b89a..f970bd3e 100755 --- a/scripts/update_device.py +++ b/scripts/update_device.py @@ -1,4 +1,4 @@ -#!/usr/bin/python2 +#!/usr/bin/env python # # Copyright (C) 2017 The Android Open Source Project # @@ -17,8 +17,9 @@ """Send an A/B update to an Android device over adb.""" +from __future__ import absolute_import + import argparse -import BaseHTTPServer import hashlib import logging import os @@ -29,6 +30,8 @@ import xml.etree.ElementTree import zipfile +from six.moves import BaseHTTPServer + import update_payload.payload @@ -41,6 +44,7 @@ # The port on the device that update_engine should connect to. DEVICE_PORT = 1234 + def CopyFileObjLength(fsrc, fdst, buffer_size=128 * 1024, copy_length=None): """Copy from a file object to another. @@ -130,7 +134,6 @@ def _parse_range(range_str, file_size): start_range = file_size - int(e) return start_range, end_range - def do_GET(self): # pylint: disable=invalid-name """Reply with the requested payload file.""" if self.path != '/payload': @@ -173,7 +176,6 @@ def do_GET(self): # pylint: disable=invalid-name f.seek(serving_start + start_range) CopyFileObjLength(f, self.wfile, copy_length=end_range - start_range) - def do_POST(self): # pylint: disable=invalid-name """Reply with the omaha response xml.""" if self.path != '/update': @@ -442,5 +444,6 @@ def main(): return 0 + if __name__ == '__main__': sys.exit(main()) diff --git a/scripts/update_payload/__init__.py b/scripts/update_payload/__init__.py index 8ee95e20..6e77678d 100644 --- a/scripts/update_payload/__init__.py +++ b/scripts/update_payload/__init__.py @@ -17,6 +17,8 @@ """Library for processing, verifying and applying Chrome OS update payloads.""" # Just raise the interface classes to the root namespace. +from __future__ import absolute_import + from update_payload.checker import CHECKS_TO_DISABLE from update_payload.error import PayloadError from update_payload.payload import Payload diff --git a/scripts/update_payload/applier.py b/scripts/update_payload/applier.py index 511ed497..7830c002 100644 --- a/scripts/update_payload/applier.py +++ b/scripts/update_payload/applier.py @@ -24,6 +24,7 @@ """ +from __future__ import absolute_import from __future__ import print_function import array @@ -70,7 +71,7 @@ def _VerifySha256(file_obj, expected_hash, name, length=-1): """ hasher = hashlib.sha256() block_length = 1024 * 1024 - max_length = length if length >= 0 else sys.maxint + max_length = length if length >= 0 else sys.maxsize while max_length > 0: read_length = min(max_length, block_length) @@ -108,7 +109,7 @@ def _ReadExtents(file_obj, extents, block_size, max_length=-1): """ data = array.array('c') if max_length < 0: - max_length = sys.maxint + max_length = sys.maxsize for ex in extents: if max_length == 0: break @@ -176,7 +177,7 @@ def _ExtentsToBspatchArg(extents, block_size, base_name, data_length=-1): arg = '' pad_off = pad_len = 0 if data_length < 0: - data_length = sys.maxint + data_length = sys.maxsize for ex, ex_name in common.ExtentIter(extents, base_name): if not data_length: raise PayloadError('%s: more extents than total data length' % ex_name) @@ -416,7 +417,7 @@ def _ApplyDiffOperation(self, op, op_name, patch_data, old_part_file, "--dst_extents=%s" % out_extents_arg] subprocess.check_call(puffpatch_cmd) else: - raise PayloadError("Unknown operation %s", op.type) + raise PayloadError("Unknown operation %s" % op.type) # Pad with zeros past the total output length. if pad_len: @@ -451,7 +452,7 @@ def _ApplyDiffOperation(self, op, op_name, patch_data, old_part_file, "--patch_file=%s" % patch_file_name] subprocess.check_call(puffpatch_cmd) else: - raise PayloadError("Unknown operation %s", op.type) + raise PayloadError("Unknown operation %s" % op.type) # Read output. with open(out_file_name, 'rb') as out_file: diff --git a/scripts/update_payload/checker.py b/scripts/update_payload/checker.py index 4558872b..4c655160 100644 --- a/scripts/update_payload/checker.py +++ b/scripts/update_payload/checker.py @@ -24,6 +24,7 @@ checker.Run(...) """ +from __future__ import absolute_import from __future__ import print_function import array @@ -34,13 +35,14 @@ import os import subprocess +from six.moves import range + from update_payload import common from update_payload import error from update_payload import format_utils from update_payload import histogram from update_payload import update_metadata_pb2 - # # Constants. # @@ -71,6 +73,7 @@ 6: (_TYPE_DELTA,), } + # # Helper functions. # @@ -647,7 +650,7 @@ def _CheckManifest(self, report, part_sizes=None): 'Apparent full payload contains old_{kernel,rootfs}_info.') self.payload_type = _TYPE_DELTA - for part, (msg, part_report) in self.old_part_info.iteritems(): + for part, (msg, part_report) in self.old_part_info.items(): # Check: {size, hash} present in old_{kernel,rootfs}_info. field = 'old_%s_info' % part self.old_fs_sizes[part] = self._CheckMandatoryField(msg, 'size', @@ -668,7 +671,7 @@ def _CheckManifest(self, report, part_sizes=None): self.payload_type = _TYPE_FULL # Check: new_{kernel,rootfs}_info present; contains {size, hash}. - for part, (msg, part_report) in self.new_part_info.iteritems(): + for part, (msg, part_report) in self.new_part_info.items(): field = 'new_%s_info' % part self.new_fs_sizes[part] = self._CheckMandatoryField(msg, 'size', part_report, field) @@ -740,7 +743,7 @@ def _CheckExtents(self, extents, usable_size, block_counters, name): (ex_name, common.FormatExtent(ex, self.block_size), usable_size)) # Record block usage. - for i in xrange(start_block, end_block): + for i in range(start_block, end_block): block_counters[i] += 1 total_num_blocks += num_blocks @@ -759,6 +762,11 @@ def _CheckReplaceOperation(self, op, data_length, total_dst_blocks, op_name): Raises: error.PayloadError if any check fails. """ + # Check: total_dst_blocks is not a floating point. + if isinstance(total_dst_blocks, float): + raise error.PayloadError('%s: contains invalid data type of ' + 'total_dst_blocks.' % op_name) + # Check: Does not contain src extents. if op.src_extents: raise error.PayloadError('%s: contains src_extents.' % op_name) @@ -975,7 +983,7 @@ def _CheckOperation(self, op, op_name, old_block_counters, new_block_counters, def _SizeToNumBlocks(self, size): """Returns the number of blocks needed to contain a given byte size.""" - return (size + self.block_size - 1) / self.block_size + return (size + self.block_size - 1) // self.block_size def _AllocBlockCounters(self, total_size): """Returns a freshly initialized array of block counters. @@ -1054,7 +1062,7 @@ def _CheckOperations(self, operations, report, base_name, old_fs_size, op_num += 1 # Check: Type is valid. - if op.type not in op_counts.keys(): + if op.type not in op_counts: raise error.PayloadError('%s: invalid type (%d).' % (op_name, op.type)) op_counts[op.type] += 1 @@ -1127,7 +1135,6 @@ def _CheckSignatures(self, report, pubkey_file_name): raise error.PayloadError('It seems like the last operation is the ' 'signature blob. This is an invalid payload.') - # Compute the checksum of all data up to signature blob. # TODO(garnold) we're re-reading the whole data section into a string # just to compute the checksum; instead, we could do it incrementally as diff --git a/scripts/update_payload/checker_unittest.py b/scripts/update_payload/checker_unittest.py index 48816539..993b785c 100755 --- a/scripts/update_payload/checker_unittest.py +++ b/scripts/update_payload/checker_unittest.py @@ -1,4 +1,4 @@ -#!/usr/bin/python2 +#!/usr/bin/env python # # Copyright (C) 2013 The Android Open Source Project # @@ -17,30 +17,33 @@ """Unit testing checker.py.""" -from __future__ import print_function +# Disable check for function names to avoid errors based on old code +# pylint: disable-msg=invalid-name + +from __future__ import absolute_import import array import collections -import cStringIO import hashlib +import io import itertools import os import unittest -# pylint cannot find mox. -# pylint: disable=F0401 -import mox +from six.moves import zip + +import mock # pylint: disable=import-error from update_payload import checker from update_payload import common from update_payload import test_utils from update_payload import update_metadata_pb2 from update_payload.error import PayloadError -from update_payload.payload import Payload # Avoid name conflicts later. +from update_payload.payload import Payload # Avoid name conflicts later. def _OpTypeByName(op_name): - """Returns the type of an operation from itsname.""" + """Returns the type of an operation from its name.""" op_name_to_type = { 'REPLACE': common.OpType.REPLACE, 'REPLACE_BZ': common.OpType.REPLACE_BZ, @@ -63,7 +66,7 @@ def _GetPayloadChecker(payload_gen_write_to_file_func, payload_gen_dargs=None, if checker_init_dargs is None: checker_init_dargs = {} - payload_file = cStringIO.StringIO() + payload_file = io.BytesIO() payload_gen_write_to_file_func(payload_file, **payload_gen_dargs) payload_file.seek(0) payload = Payload(payload_file) @@ -73,7 +76,7 @@ def _GetPayloadChecker(payload_gen_write_to_file_func, payload_gen_dargs=None, def _GetPayloadCheckerWithData(payload_gen): """Returns a payload checker from a given payload generator.""" - payload_file = cStringIO.StringIO() + payload_file = io.BytesIO() payload_gen.WriteToFile(payload_file) payload_file.seek(0) payload = Payload(payload_file) @@ -87,7 +90,7 @@ def _GetPayloadCheckerWithData(payload_gen): # pylint: disable=W0212 # Don't bark about missing members of classes you cannot import. # pylint: disable=E1101 -class PayloadCheckerTest(mox.MoxTestBase): +class PayloadCheckerTest(unittest.TestCase): """Tests the PayloadChecker class. In addition to ordinary testFoo() methods, which are automatically invoked by @@ -100,11 +103,42 @@ class PayloadCheckerTest(mox.MoxTestBase): all such tests is done in AddAllParametricTests(). """ + def setUp(self): + """setUp function for unittest testcase""" + self.mock_checks = [] + + def tearDown(self): + """tearDown function for unittest testcase""" + # Verify that all mock functions were called. + for check in self.mock_checks: + check.mock_fn.assert_called_once_with(*check.exp_args, **check.exp_kwargs) + + class MockChecksAtTearDown(object): + """Mock data storage. + + This class stores the mock functions and its arguments to be checked at a + later point. + """ + def __init__(self, mock_fn, *args, **kwargs): + self.mock_fn = mock_fn + self.exp_args = args + self.exp_kwargs = kwargs + + def addPostCheckForMockFunction(self, mock_fn, *args, **kwargs): + """Store a mock function and its arguments to self.mock_checks + + Args: + mock_fn: mock function object + args: expected positional arguments for the mock_fn + kwargs: expected named arguments for the mock_fn + """ + self.mock_checks.append(self.MockChecksAtTearDown(mock_fn, *args, **kwargs)) + def MockPayload(self): """Create a mock payload object, complete with a mock manifest.""" - payload = self.mox.CreateMock(Payload) + payload = mock.create_autospec(Payload) payload.is_init = True - payload.manifest = self.mox.CreateMock( + payload.manifest = mock.create_autospec( update_metadata_pb2.DeltaArchiveManifest) return payload @@ -173,19 +207,20 @@ def SetupAddElemTest(self, is_present, is_submsg, convert=str, subreport = 'fake subreport' # Create a mock message. - msg = self.mox.CreateMock(update_metadata_pb2._message.Message) - msg.HasField(name).AndReturn(is_present) + msg = mock.create_autospec(update_metadata_pb2._message.Message) + self.addPostCheckForMockFunction(msg.HasField, name) + msg.HasField.return_value = is_present setattr(msg, name, val) - # Create a mock report. - report = self.mox.CreateMock(checker._PayloadReport) + report = mock.create_autospec(checker._PayloadReport) if is_present: if is_submsg: - report.AddSubReport(name).AndReturn(subreport) + self.addPostCheckForMockFunction(report.AddSubReport, name) + report.AddSubReport.return_value = subreport else: - report.AddField(name, convert(val), linebreak=linebreak, indent=indent) + self.addPostCheckForMockFunction(report.AddField, name, convert(val), + linebreak=linebreak, indent=indent) - self.mox.ReplayAll() return (msg, report, subreport, name, val) def DoAddElemTest(self, is_present, is_mandatory, is_submsg, convert, @@ -211,9 +246,9 @@ def DoAddElemTest(self, is_present, is_mandatory, is_submsg, convert, else: ret_val, ret_subreport = checker.PayloadChecker._CheckElem(*args, **kwargs) - self.assertEquals(val if is_present else None, ret_val) - self.assertEquals(subreport if is_present and is_submsg else None, - ret_subreport) + self.assertEqual(val if is_present else None, ret_val) + self.assertEqual(subreport if is_present and is_submsg else None, + ret_subreport) def DoAddFieldTest(self, is_mandatory, is_present, convert, linebreak, indent): @@ -243,7 +278,7 @@ def DoAddFieldTest(self, is_mandatory, is_present, convert, linebreak, self.assertRaises(PayloadError, tested_func, *args, **kwargs) else: ret_val = tested_func(*args, **kwargs) - self.assertEquals(val if is_present else None, ret_val) + self.assertEqual(val if is_present else None, ret_val) def DoAddSubMsgTest(self, is_mandatory, is_present): """Parametrized testing of _Check{Mandatory,Optional}SubMsg(). @@ -267,8 +302,8 @@ def DoAddSubMsgTest(self, is_mandatory, is_present): self.assertRaises(PayloadError, tested_func, *args) else: ret_val, ret_subreport = tested_func(*args) - self.assertEquals(val if is_present else None, ret_val) - self.assertEquals(subreport if is_present else None, ret_subreport) + self.assertEqual(val if is_present else None, ret_val) + self.assertEqual(subreport if is_present else None, ret_subreport) def testCheckPresentIff(self): """Tests _CheckPresentIff().""" @@ -294,15 +329,14 @@ def DoCheckSha256SignatureTest(self, expect_pass, expect_subprocess_call, returned_signed_hash: The signed hash data retuned by openssl. expected_signed_hash: The signed hash data to compare against. """ - try: - # Stub out the subprocess invocation. - self.mox.StubOutWithMock(checker.PayloadChecker, '_Run') + # Stub out the subprocess invocation. + with mock.patch.object(checker.PayloadChecker, '_Run') \ + as mock_payload_checker: if expect_subprocess_call: - checker.PayloadChecker._Run( - mox.IsA(list), send_data=sig_data).AndReturn( - (sig_asn1_header + returned_signed_hash, None)) + mock_payload_checker([], send_data=sig_data) + mock_payload_checker.return_value = ( + sig_asn1_header + returned_signed_hash, None) - self.mox.ReplayAll() if expect_pass: self.assertIsNone(checker.PayloadChecker._CheckSha256Signature( sig_data, 'foo', expected_signed_hash, 'bar')) @@ -310,13 +344,11 @@ def DoCheckSha256SignatureTest(self, expect_pass, expect_subprocess_call, self.assertRaises(PayloadError, checker.PayloadChecker._CheckSha256Signature, sig_data, 'foo', expected_signed_hash, 'bar') - finally: - self.mox.UnsetStubs() def testCheckSha256Signature_Pass(self): """Tests _CheckSha256Signature(); pass case.""" sig_data = 'fake-signature'.ljust(256) - signed_hash = hashlib.sha256('fake-data').digest() + signed_hash = hashlib.sha256(b'fake-data').digest() self.DoCheckSha256SignatureTest(True, True, sig_data, common.SIG_ASN1_HEADER, signed_hash, signed_hash) @@ -324,7 +356,7 @@ def testCheckSha256Signature_Pass(self): def testCheckSha256Signature_FailBadSignature(self): """Tests _CheckSha256Signature(); fails due to malformed signature.""" sig_data = 'fake-signature' # Malformed (not 256 bytes in length). - signed_hash = hashlib.sha256('fake-data').digest() + signed_hash = hashlib.sha256(b'fake-data').digest() self.DoCheckSha256SignatureTest(False, False, sig_data, common.SIG_ASN1_HEADER, signed_hash, signed_hash) @@ -332,7 +364,7 @@ def testCheckSha256Signature_FailBadSignature(self): def testCheckSha256Signature_FailBadOutputLength(self): """Tests _CheckSha256Signature(); fails due to unexpected output length.""" sig_data = 'fake-signature'.ljust(256) - signed_hash = 'fake-hash' # Malformed (not 32 bytes in length). + signed_hash = b'fake-hash' # Malformed (not 32 bytes in length). self.DoCheckSha256SignatureTest(False, True, sig_data, common.SIG_ASN1_HEADER, signed_hash, signed_hash) @@ -340,16 +372,16 @@ def testCheckSha256Signature_FailBadOutputLength(self): def testCheckSha256Signature_FailBadAsnHeader(self): """Tests _CheckSha256Signature(); fails due to bad ASN1 header.""" sig_data = 'fake-signature'.ljust(256) - signed_hash = hashlib.sha256('fake-data').digest() - bad_asn1_header = 'bad-asn-header'.ljust(len(common.SIG_ASN1_HEADER)) + signed_hash = hashlib.sha256(b'fake-data').digest() + bad_asn1_header = b'bad-asn-header'.ljust(len(common.SIG_ASN1_HEADER)) self.DoCheckSha256SignatureTest(False, True, sig_data, bad_asn1_header, signed_hash, signed_hash) def testCheckSha256Signature_FailBadHash(self): """Tests _CheckSha256Signature(); fails due to bad hash returned.""" sig_data = 'fake-signature'.ljust(256) - expected_signed_hash = hashlib.sha256('fake-data').digest() - returned_signed_hash = hashlib.sha256('bad-fake-data').digest() + expected_signed_hash = hashlib.sha256(b'fake-data').digest() + returned_signed_hash = hashlib.sha256(b'bad-fake-data').digest() self.DoCheckSha256SignatureTest(False, True, sig_data, common.SIG_ASN1_HEADER, expected_signed_hash, returned_signed_hash) @@ -455,23 +487,23 @@ def DoCheckManifestTest(self, fail_mismatched_block_size, fail_bad_sigs, # Add old kernel/rootfs partition info, as required. if fail_mismatched_oki_ori or fail_old_kernel_fs_size or fail_bad_oki: oki_hash = (None if fail_bad_oki - else hashlib.sha256('fake-oki-content').digest()) + else hashlib.sha256(b'fake-oki-content').digest()) payload_gen.SetPartInfo(common.KERNEL, False, old_kernel_fs_size, oki_hash) if not fail_mismatched_oki_ori and (fail_old_rootfs_fs_size or fail_bad_ori): ori_hash = (None if fail_bad_ori - else hashlib.sha256('fake-ori-content').digest()) + else hashlib.sha256(b'fake-ori-content').digest()) payload_gen.SetPartInfo(common.ROOTFS, False, old_rootfs_fs_size, ori_hash) # Add new kernel/rootfs partition info. payload_gen.SetPartInfo( common.KERNEL, True, new_kernel_fs_size, - None if fail_bad_nki else hashlib.sha256('fake-nki-content').digest()) + None if fail_bad_nki else hashlib.sha256(b'fake-nki-content').digest()) payload_gen.SetPartInfo( common.ROOTFS, True, new_rootfs_fs_size, - None if fail_bad_nri else hashlib.sha256('fake-nri-content').digest()) + None if fail_bad_nri else hashlib.sha256(b'fake-nri-content').digest()) # Set the minor version. payload_gen.SetMinorVersion(0) @@ -518,7 +550,7 @@ def testCheckExtents(self): # Passes w/ all real extents. extents = self.NewExtentList((0, 4), (8, 3), (1024, 16)) - self.assertEquals( + self.assertEqual( 23, payload_checker._CheckExtents(extents, (1024 + 16) * block_size, collections.defaultdict(int), 'foo')) @@ -553,34 +585,34 @@ def testCheckReplaceOperation(self): block_size = payload_checker.block_size data_length = 10000 - op = self.mox.CreateMock( - update_metadata_pb2.InstallOperation) + op = mock.create_autospec(update_metadata_pb2.InstallOperation) op.type = common.OpType.REPLACE # Pass. op.src_extents = [] self.assertIsNone( payload_checker._CheckReplaceOperation( - op, data_length, (data_length + block_size - 1) / block_size, + op, data_length, (data_length + block_size - 1) // block_size, 'foo')) # Fail, src extents founds. op.src_extents = ['bar'] self.assertRaises( PayloadError, payload_checker._CheckReplaceOperation, - op, data_length, (data_length + block_size - 1) / block_size, 'foo') + op, data_length, (data_length + block_size - 1) // block_size, 'foo') # Fail, missing data. op.src_extents = [] self.assertRaises( PayloadError, payload_checker._CheckReplaceOperation, - op, None, (data_length + block_size - 1) / block_size, 'foo') + op, None, (data_length + block_size - 1) // block_size, 'foo') # Fail, length / block number mismatch. op.src_extents = ['bar'] self.assertRaises( PayloadError, payload_checker._CheckReplaceOperation, - op, data_length, (data_length + block_size - 1) / block_size + 1, 'foo') + op, data_length, (data_length + block_size - 1) // block_size + 1, + 'foo') def testCheckReplaceBzOperation(self): """Tests _CheckReplaceOperation() where op.type == REPLACE_BZ.""" @@ -588,7 +620,7 @@ def testCheckReplaceBzOperation(self): block_size = payload_checker.block_size data_length = block_size * 3 - op = self.mox.CreateMock( + op = mock.create_autospec( update_metadata_pb2.InstallOperation) op.type = common.OpType.REPLACE_BZ @@ -596,23 +628,30 @@ def testCheckReplaceBzOperation(self): op.src_extents = [] self.assertIsNone( payload_checker._CheckReplaceOperation( - op, data_length, (data_length + block_size - 1) / block_size + 5, + op, data_length, (data_length + block_size - 1) // block_size + 5, 'foo')) # Fail, src extents founds. op.src_extents = ['bar'] self.assertRaises( PayloadError, payload_checker._CheckReplaceOperation, - op, data_length, (data_length + block_size - 1) / block_size + 5, 'foo') + op, data_length, (data_length + block_size - 1) // block_size + 5, + 'foo') # Fail, missing data. op.src_extents = [] self.assertRaises( PayloadError, payload_checker._CheckReplaceOperation, - op, None, (data_length + block_size - 1) / block_size, 'foo') + op, None, (data_length + block_size - 1) // block_size, 'foo') # Fail, too few blocks to justify BZ. op.src_extents = [] + self.assertRaises( + PayloadError, payload_checker._CheckReplaceOperation, + op, data_length, (data_length + block_size - 1) // block_size, 'foo') + + # Fail, total_dst_blocks is a floating point value. + op.src_extents = [] self.assertRaises( PayloadError, payload_checker._CheckReplaceOperation, op, data_length, (data_length + block_size - 1) / block_size, 'foo') @@ -623,7 +662,7 @@ def testCheckReplaceXzOperation(self): block_size = payload_checker.block_size data_length = block_size * 3 - op = self.mox.CreateMock( + op = mock.create_autospec( update_metadata_pb2.InstallOperation) op.type = common.OpType.REPLACE_XZ @@ -631,23 +670,30 @@ def testCheckReplaceXzOperation(self): op.src_extents = [] self.assertIsNone( payload_checker._CheckReplaceOperation( - op, data_length, (data_length + block_size - 1) / block_size + 5, + op, data_length, (data_length + block_size - 1) // block_size + 5, 'foo')) # Fail, src extents founds. op.src_extents = ['bar'] self.assertRaises( PayloadError, payload_checker._CheckReplaceOperation, - op, data_length, (data_length + block_size - 1) / block_size + 5, 'foo') + op, data_length, (data_length + block_size - 1) // block_size + 5, + 'foo') # Fail, missing data. op.src_extents = [] self.assertRaises( PayloadError, payload_checker._CheckReplaceOperation, - op, None, (data_length + block_size - 1) / block_size, 'foo') + op, None, (data_length + block_size - 1) // block_size, 'foo') # Fail, too few blocks to justify XZ. op.src_extents = [] + self.assertRaises( + PayloadError, payload_checker._CheckReplaceOperation, + op, data_length, (data_length + block_size - 1) // block_size, 'foo') + + # Fail, total_dst_blocks is a floating point value. + op.src_extents = [] self.assertRaises( PayloadError, payload_checker._CheckReplaceOperation, op, data_length, (data_length + block_size - 1) / block_size, 'foo') @@ -724,9 +770,9 @@ def DoCheckOperationTest(self, op_type_name, allow_unhashed, old_part_size = test_utils.MiB(4) new_part_size = test_utils.MiB(8) old_block_counters = array.array( - 'B', [0] * ((old_part_size + block_size - 1) / block_size)) + 'B', [0] * ((old_part_size + block_size - 1) // block_size)) new_block_counters = array.array( - 'B', [0] * ((new_part_size + block_size - 1) / block_size)) + 'B', [0] * ((new_part_size + block_size - 1) // block_size)) prev_data_offset = 1876 blob_hash_counts = collections.defaultdict(int) @@ -769,16 +815,14 @@ def DoCheckOperationTest(self, op_type_name, allow_unhashed, fake_data = 'fake-data'.ljust(op.data_length) if not allow_unhashed and not fail_data_hash: # Create a valid data blob hash. - op.data_sha256_hash = hashlib.sha256(fake_data).digest() - payload.ReadDataBlob(op.data_offset, op.data_length).AndReturn( - fake_data) + op.data_sha256_hash = hashlib.sha256(fake_data.encode('utf-8')).digest() + payload.ReadDataBlob.return_value = fake_data.encode('utf-8') elif fail_data_hash: # Create an invalid data blob hash. op.data_sha256_hash = hashlib.sha256( - fake_data.replace(' ', '-')).digest() - payload.ReadDataBlob(op.data_offset, op.data_length).AndReturn( - fake_data) + fake_data.replace(' ', '-').encode('utf-8')).digest() + payload.ReadDataBlob.return_value = fake_data.encode('utf-8') total_dst_blocks = 0 if not fail_missing_dst_extents: @@ -807,7 +851,6 @@ def DoCheckOperationTest(self, op_type_name, allow_unhashed, payload_checker.minor_version <= 3): op.dst_length = total_dst_blocks * block_size - self.mox.ReplayAll() should_fail = (fail_src_extents or fail_dst_extents or fail_mismatched_data_offset_length or fail_missing_dst_extents or fail_src_length or @@ -857,7 +900,8 @@ def DoCheckOperationsTest(self, fail_nonexhaustive_full_update): rootfs_data_length -= block_size payload_gen.AddOperation(common.ROOTFS, rootfs_op_type, - dst_extents=[(0, rootfs_data_length / block_size)], + dst_extents= + [(0, rootfs_data_length // block_size)], data_offset=0, data_length=rootfs_data_length) @@ -889,13 +933,13 @@ def DoCheckSignaturesTest(self, fail_empty_sigs_blob, fail_sig_missing_fields, rootfs_part_size = test_utils.MiB(2) kernel_part_size = test_utils.KiB(16) payload_gen.SetPartInfo(common.ROOTFS, True, rootfs_part_size, - hashlib.sha256('fake-new-rootfs-content').digest()) + hashlib.sha256(b'fake-new-rootfs-content').digest()) payload_gen.SetPartInfo(common.KERNEL, True, kernel_part_size, - hashlib.sha256('fake-new-kernel-content').digest()) + hashlib.sha256(b'fake-new-kernel-content').digest()) payload_gen.SetMinorVersion(0) payload_gen.AddOperationWithData( common.ROOTFS, common.OpType.REPLACE, - dst_extents=[(0, rootfs_part_size / block_size)], + dst_extents=[(0, rootfs_part_size // block_size)], data_blob=os.urandom(rootfs_part_size)) do_forge_sigs_data = (fail_empty_sigs_blob or fail_sig_missing_fields or @@ -908,7 +952,7 @@ def DoCheckSignaturesTest(self, fail_empty_sigs_blob, fail_sig_missing_fields, if fail_sig_missing_fields: sig_data = None else: - sig_data = test_utils.SignSha256('fake-payload-content', + sig_data = test_utils.SignSha256(b'fake-payload-content', test_utils._PRIVKEY_FILE_NAME) sigs_gen.AddSig(5 if fail_unknown_sig_version else 1, sig_data) @@ -984,9 +1028,9 @@ def DoRunTest(self, rootfs_part_size_provided, kernel_part_size_provided, kernel_filesystem_size = test_utils.KiB(16) rootfs_filesystem_size = test_utils.MiB(2) payload_gen.SetPartInfo(common.ROOTFS, True, rootfs_filesystem_size, - hashlib.sha256('fake-new-rootfs-content').digest()) + hashlib.sha256(b'fake-new-rootfs-content').digest()) payload_gen.SetPartInfo(common.KERNEL, True, kernel_filesystem_size, - hashlib.sha256('fake-new-kernel-content').digest()) + hashlib.sha256(b'fake-new-kernel-content').digest()) payload_gen.SetMinorVersion(0) rootfs_part_size = 0 @@ -997,7 +1041,7 @@ def DoRunTest(self, rootfs_part_size_provided, kernel_part_size_provided, rootfs_op_size += block_size payload_gen.AddOperationWithData( common.ROOTFS, common.OpType.REPLACE, - dst_extents=[(0, rootfs_op_size / block_size)], + dst_extents=[(0, rootfs_op_size // block_size)], data_blob=os.urandom(rootfs_op_size)) kernel_part_size = 0 @@ -1008,7 +1052,7 @@ def DoRunTest(self, rootfs_part_size_provided, kernel_part_size_provided, kernel_op_size += block_size payload_gen.AddOperationWithData( common.KERNEL, common.OpType.REPLACE, - dst_extents=[(0, kernel_op_size / block_size)], + dst_extents=[(0, kernel_op_size // block_size)], data_blob=os.urandom(kernel_op_size)) # Generate payload (complete w/ signature) and create the test object. @@ -1054,6 +1098,7 @@ def DoRunTest(self, rootfs_part_size_provided, kernel_part_size_provided, else: self.assertIsNone(payload_checker.Run(**kwargs2)) + # This implements a generic API, hence the occasional unused args. # pylint: disable=W0613 def ValidateCheckOperationTest(op_type_name, allow_unhashed, @@ -1104,13 +1149,13 @@ def AddParametricTests(tested_method_name, arg_space, validate_func=None): (values) associated with them. validate_func: A function used for validating test argument combinations. """ - for value_tuple in itertools.product(*arg_space.itervalues()): - run_dargs = dict(zip(arg_space.iterkeys(), value_tuple)) + for value_tuple in itertools.product(*iter(arg_space.values())): + run_dargs = dict(zip(iter(arg_space.keys()), value_tuple)) if validate_func and not validate_func(**run_dargs): continue run_method_name = 'Do%sTest' % tested_method_name test_method_name = 'test%s' % tested_method_name - for arg_key, arg_val in run_dargs.iteritems(): + for arg_key, arg_val in run_dargs.items(): if arg_val or isinstance(arg_val, int): test_method_name += '__%s=%s' % (arg_key, arg_val) setattr(PayloadCheckerTest, test_method_name, diff --git a/scripts/update_payload/common.py b/scripts/update_payload/common.py index dfb8181a..b934cf88 100644 --- a/scripts/update_payload/common.py +++ b/scripts/update_payload/common.py @@ -16,8 +16,11 @@ """Utilities for update payload processing.""" +from __future__ import absolute_import from __future__ import print_function +import base64 + from update_payload import update_metadata_pb2 from update_payload.error import PayloadError @@ -26,9 +29,9 @@ # Constants. # SIG_ASN1_HEADER = ( - '\x30\x31\x30\x0d\x06\x09\x60\x86' - '\x48\x01\x65\x03\x04\x02\x01\x05' - '\x00\x04\x20' + b'\x30\x31\x30\x0d\x06\x09\x60\x86' + b'\x48\x01\x65\x03\x04\x02\x01\x05' + b'\x00\x04\x20' ) BRILLO_MAJOR_PAYLOAD_VERSION = 2 @@ -43,6 +46,7 @@ # Tuple of (name in system, name in protobuf). CROS_PARTITIONS = ((KERNEL, KERNEL), (ROOTFS, 'rootfs')) + # # Payload operation types. # @@ -138,7 +142,7 @@ def Read(file_obj, length, offset=None, hasher=None): try: data = file_obj.read(length) - except IOError, e: + except IOError as e: raise PayloadError('error reading from file (%s): %s' % (file_obj.name, e)) if len(data) != length: @@ -164,7 +168,7 @@ def FormatExtent(ex, block_size=0): def FormatSha256(digest): """Returns a canonical string representation of a SHA256 digest.""" - return digest.encode('base64').strip() + return base64.b64encode(digest).decode('utf-8') # diff --git a/scripts/update_payload/format_utils.py b/scripts/update_payload/format_utils.py index 6248ba9b..e73badf3 100644 --- a/scripts/update_payload/format_utils.py +++ b/scripts/update_payload/format_utils.py @@ -16,6 +16,8 @@ """Various formatting functions.""" +from __future__ import division + def NumToPercent(num, total, min_precision=1, max_precision=5): """Returns the percentage (string) of |num| out of |total|. @@ -50,7 +52,7 @@ def NumToPercent(num, total, min_precision=1, max_precision=5): precision = min(min_precision, max_precision) factor = 10 ** precision while precision <= max_precision: - percent = num * 100 * factor / total + percent = num * 100 * factor // total if percent: break factor *= 10 @@ -102,8 +104,8 @@ def BytesToHumanReadable(size, precision=1, decimal=False): magnitude = next_magnitude if exp != 0: - whole = size / magnitude - frac = (size % magnitude) * (10 ** precision) / magnitude + whole = size // magnitude + frac = (size % magnitude) * (10 ** precision) // magnitude while frac and not frac % 10: frac /= 10 return '%d%s %s' % (whole, '.%d' % frac if frac else '', suffixes[exp - 1]) diff --git a/scripts/update_payload/format_utils_unittest.py b/scripts/update_payload/format_utils_unittest.py index 42ea621c..4dcd6527 100755 --- a/scripts/update_payload/format_utils_unittest.py +++ b/scripts/update_payload/format_utils_unittest.py @@ -1,4 +1,4 @@ -#!/usr/bin/python2 +#!/usr/bin/env python # # Copyright (C) 2013 The Android Open Source Project # @@ -17,6 +17,11 @@ """Unit tests for format_utils.py.""" +# Disable check for function names to avoid errors based on old code +# pylint: disable-msg=invalid-name + +from __future__ import absolute_import + import unittest from update_payload import format_utils diff --git a/scripts/update_payload/histogram.py b/scripts/update_payload/histogram.py index 1ac2ab5d..bad2dc37 100644 --- a/scripts/update_payload/histogram.py +++ b/scripts/update_payload/histogram.py @@ -16,6 +16,9 @@ """Histogram generation tools.""" +from __future__ import absolute_import +from __future__ import division + from collections import defaultdict from update_payload import format_utils @@ -110,7 +113,7 @@ def __str__(self): hist_bar = '|' for key, count in self.data: if self.total: - bar_len = count * self.scale / self.total + bar_len = count * self.scale // self.total hist_bar = '|%s|' % ('#' * bar_len).ljust(self.scale) line = '%s %s %s' % ( diff --git a/scripts/update_payload/histogram_unittest.py b/scripts/update_payload/histogram_unittest.py index e757dd02..ccde2bb1 100755 --- a/scripts/update_payload/histogram_unittest.py +++ b/scripts/update_payload/histogram_unittest.py @@ -1,4 +1,4 @@ -#!/usr/bin/python2 +#!/usr/bin/env python # # Copyright (C) 2013 The Android Open Source Project # @@ -17,6 +17,11 @@ """Unit tests for histogram.py.""" +# Disable check for function names to avoid errors based on old code +# pylint: disable-msg=invalid-name + +from __future__ import absolute_import + import unittest from update_payload import format_utils diff --git a/scripts/update_payload/payload.py b/scripts/update_payload/payload.py index 1ed5f99e..ea5ed308 100644 --- a/scripts/update_payload/payload.py +++ b/scripts/update_payload/payload.py @@ -16,6 +16,7 @@ """Tools for reading, verifying and applying Chrome OS update payloads.""" +from __future__ import absolute_import from __future__ import print_function import hashlib @@ -64,7 +65,7 @@ class _PayloadHeader(object): """Update payload header struct.""" # Header constants; sizes are in bytes. - _MAGIC = 'CrAU' + _MAGIC = b'CrAU' _VERSION_SIZE = 8 _MANIFEST_LEN_SIZE = 8 _METADATA_SIGNATURE_LEN_SIZE = 4 @@ -111,7 +112,6 @@ def ReadFromPayload(self, payload_file, hasher=None): payload_file, self._METADATA_SIGNATURE_LEN_SIZE, True, hasher=hasher) - def __init__(self, payload_file, payload_file_offset=0): """Initialize the payload object. diff --git a/scripts/update_payload/test_utils.py b/scripts/update_payload/test_utils.py index 4f5fed03..e153669e 100644 --- a/scripts/update_payload/test_utils.py +++ b/scripts/update_payload/test_utils.py @@ -16,9 +16,10 @@ """Utilities for unit testing.""" +from __future__ import absolute_import from __future__ import print_function -import cStringIO +import io import hashlib import os import struct @@ -70,7 +71,7 @@ def _WriteInt(file_obj, size, is_unsigned, val): """ try: file_obj.write(struct.pack(common.IntPackingFmtStr(size, is_unsigned), val)) - except IOError, e: + except IOError as e: raise payload.PayloadError('error writing to file (%s): %s' % (file_obj.name, e)) @@ -335,7 +336,7 @@ def WriteToFileWithData(self, file_obj, sigs_data=None, if do_generate_sigs_data: # First, sign some arbitrary data to obtain the size of a signature blob. - fake_sig = SignSha256('fake-payload-data', privkey_file_name) + fake_sig = SignSha256(b'fake-payload-data', privkey_file_name) fake_sigs_gen = SignaturesGenerator() fake_sigs_gen.AddSig(1, fake_sig) sigs_len = len(fake_sigs_gen.ToBinary()) @@ -345,7 +346,7 @@ def WriteToFileWithData(self, file_obj, sigs_data=None, if do_generate_sigs_data: # Once all payload fields are updated, dump and sign it. - temp_payload_file = cStringIO.StringIO() + temp_payload_file = io.BytesIO() self.WriteToFile(temp_payload_file, data_blobs=self.data_blobs) sig = SignSha256(temp_payload_file.getvalue(), privkey_file_name) sigs_gen = SignaturesGenerator() From 1f6bcab058b94446e25d9a55356d0398bee9aa60 Mon Sep 17 00:00:00 2001 From: Andrew Date: Thu, 21 Nov 2019 11:34:22 -0800 Subject: [PATCH 158/624] update_engine: Fix paycheck script The paycheck script was broken when migrating from python2 to python3. BUG=chromium:1027199 TEST=tryjob, unittests Change-Id: Ie40799138edbe6bdd49fb0bd5ab91ae6369e007f Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/1929453 Tested-by: Amin Hassani Commit-Queue: Andrew Lassalle Reviewed-by: Amin Hassani Reviewed-by: Mike Frysinger --- scripts/paycheck.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/paycheck.py b/scripts/paycheck.py index 35877505..f4ccca2c 100755 --- a/scripts/paycheck.py +++ b/scripts/paycheck.py @@ -203,7 +203,7 @@ def main(argv): # Parse and validate arguments. args = ParseArguments(argv[1:]) - with open(args.payload) as payload_file: + with open(args.payload, 'rb') as payload_file: payload = update_payload.Payload(payload_file) try: # Initialize payload. @@ -227,7 +227,7 @@ def main(argv): part_sizes = (args.part_sizes and dict(zip(args.part_names, args.part_sizes))) - metadata_sig_file = args.meta_sig and open(args.meta_sig) + metadata_sig_file = args.meta_sig and open(args.meta_sig, 'rb') payload.Check( pubkey_file_name=args.key, metadata_sig_file=metadata_sig_file, @@ -258,7 +258,7 @@ def main(argv): file_handles = [] if args.out_dst_part_paths is not None: for name, path in zip(args.part_names, args.out_dst_part_paths): - handle = open(path, 'w+') + handle = open(path, 'wb+') file_handles.append(handle) out_dst_parts[name] = handle.name else: From 6955bcc4ffe4cc9d62a88186b9a7e75d095a7897 Mon Sep 17 00:00:00 2001 From: Qijiang Fan Date: Tue, 19 Nov 2019 20:33:43 +0900 Subject: [PATCH 159/624] update_engine: Get rid of WatchFileDescriptor. The API is removed in the next libchrome uprev to r576297. This CL replaces the use by new API base::FileDescriptorWatcher. BUG=chromium:909719 TEST=Build locally. Ran cros_run_unit_tests. Change-Id: I318b35b2d00742955f6877c4e36624e4c672827b Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/1924097 Tested-by: Qijiang Fan Reviewed-by: Amin Hassani Commit-Queue: Qijiang Fan --- common/subprocess_unittest.cc | 34 +++++++++++++++++++--------------- 1 file changed, 19 insertions(+), 15 deletions(-) diff --git a/common/subprocess_unittest.cc b/common/subprocess_unittest.cc index 8dbaa0b2..19b24f45 100644 --- a/common/subprocess_unittest.cc +++ b/common/subprocess_unittest.cc @@ -45,6 +45,7 @@ using base::TimeDelta; using brillo::MessageLoop; using std::string; +using std::unique_ptr; using std::vector; namespace { @@ -73,6 +74,7 @@ class SubprocessTest : public ::testing::Test { brillo::BaseMessageLoop loop_{&base_loop_}; brillo::AsynchronousSignalHandler async_signal_handler_; Subprocess subprocess_; + unique_ptr watcher_; }; namespace { @@ -256,21 +258,23 @@ TEST_F(SubprocessTest, CancelTest) { int fifo_fd = HANDLE_EINTR(open(fifo_path.c_str(), O_RDONLY)); EXPECT_GE(fifo_fd, 0); - loop_.WatchFileDescriptor(FROM_HERE, - fifo_fd, - MessageLoop::WatchMode::kWatchRead, - false, - base::Bind( - [](int fifo_fd, uint32_t tag) { - char c; - EXPECT_EQ(1, - HANDLE_EINTR(read(fifo_fd, &c, 1))); - EXPECT_EQ('X', c); - LOG(INFO) << "Killing tag " << tag; - Subprocess::Get().KillExec(tag); - }, - fifo_fd, - tag)); + watcher_ = base::FileDescriptorWatcher::WatchReadable( + fifo_fd, + base::Bind( + [](unique_ptr* watcher, + int fifo_fd, + uint32_t tag) { + char c; + EXPECT_EQ(1, HANDLE_EINTR(read(fifo_fd, &c, 1))); + EXPECT_EQ('X', c); + LOG(INFO) << "Killing tag " << tag; + Subprocess::Get().KillExec(tag); + *watcher = nullptr; + }, + // watcher_ is no longer used outside the clousure. + base::Unretained(&watcher_), + fifo_fd, + tag)); // This test would leak a callback that runs when the child process exits // unless we wait for it to run. From 8a1de4b6b8e368d7bd2b92f015a83f49764f7b79 Mon Sep 17 00:00:00 2001 From: Andrew Date: Sat, 23 Nov 2019 20:32:35 -0800 Subject: [PATCH 160/624] update_payload: Fix array type in applier.py In python3, the array type 'c' is no longer supported. Type 'c' arrays are replaced by type 'b'(signed char) and 'B'(unsigned char). 'B' is supported by python 2 and python3. Replaced deprecate function 'buffer' by memoryview in python3. BUG=chromium:1027199 TEST=unittest, cros_generate_update_payload, tryjob Change-Id: Id77ba2d1aac0005d31516da4e6ac4617027ca345 Exempt-From-Owner-Approval: -release builders are on fire Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/1932226 Tested-by: Nicolas Boichat Commit-Queue: Nicolas Boichat Reviewed-by: Mike Frysinger --- scripts/update_payload/applier.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/scripts/update_payload/applier.py b/scripts/update_payload/applier.py index 7830c002..7eaa07ac 100644 --- a/scripts/update_payload/applier.py +++ b/scripts/update_payload/applier.py @@ -52,7 +52,9 @@ from update_payload import common from update_payload.error import PayloadError - +# buffer is not supported in python3, but memoryview has the same functionality +if sys.version_info.major >= 3: + buffer = memoryview # pylint: disable=invalid-name, redefined-builtin # # Helper functions. # @@ -107,7 +109,7 @@ def _ReadExtents(file_obj, extents, block_size, max_length=-1): Returns: A character array containing the concatenated read data. """ - data = array.array('c') + data = array.array('B') if max_length < 0: max_length = sys.maxsize for ex in extents: From 4b00ae16c992f7913bc026d3027b41d8b271f436 Mon Sep 17 00:00:00 2001 From: Andrew Date: Mon, 25 Nov 2019 09:37:27 -0800 Subject: [PATCH 161/624] update_payload: Remove 'buffer' in applier.py 'memoryview' replaces 'buffer' in python3, but not all the signatures are the same in both functions. The output of memoryview has to be converted to bytes, but buffer's doesn't. This difference could cause the script to fail when using python2. Set the type of multiple strings to byte. BUG=chromium:1027199 TEST=cros_generate_update_payload --image ~/trunk/src/build/images/eve/R80-12705.0.2019_11_23_1521-a1/chromiumos_image.bin --src_image ~/trunk/src/build/images/eve/R80-12705.0.2019_11_23_1521-a1/chromiumos_image.bin --output ~/delete/ttt2 --check TEST=cros_workon_make update_payload --test TEST=tryjob. Note: tryjob fails because tryjob runs without the updates to the sdk from this CL. see chromium:1028178 Change-Id: Ic05892d1e26f12e33a661b590039821329379042 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/1933128 Tested-by: Andrew Lassalle Reviewed-by: Amin Hassani Reviewed-by: Mike Frysinger Auto-Submit: Andrew Lassalle --- scripts/update_payload/applier.py | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/scripts/update_payload/applier.py b/scripts/update_payload/applier.py index 7eaa07ac..ce1998f6 100644 --- a/scripts/update_payload/applier.py +++ b/scripts/update_payload/applier.py @@ -52,9 +52,6 @@ from update_payload import common from update_payload.error import PayloadError -# buffer is not supported in python3, but memoryview has the same functionality -if sys.version_info.major >= 3: - buffer = memoryview # pylint: disable=invalid-name, redefined-builtin # # Helper functions. # @@ -146,10 +143,8 @@ def _WriteExtents(file_obj, data, extents, block_size, base_name): if not data_length: raise PayloadError('%s: more write extents than data' % ex_name) write_length = min(data_length, ex.num_blocks * block_size) - file_obj.seek(ex.start_block * block_size) - data_view = buffer(data, data_offset, write_length) - file_obj.write(data_view) + file_obj.write(data[data_offset:(data_offset + write_length)]) data_offset += write_length data_length -= write_length @@ -283,7 +278,7 @@ def _ApplyReplaceOperation(self, op, op_name, out_data, part_file, part_size): # Pad with zeros if necessary. if data_end > data_length: padding = data_end - data_length - out_data += '\0' * padding + out_data += b'\0' * padding self.payload.payload_file.seek(start_block * block_size) part_file.seek(start_block * block_size) @@ -314,7 +309,7 @@ def _ApplyZeroOperation(self, op, op_name, part_file): # pylint: disable=unused-variable for ex, ex_name in common.ExtentIter(op.dst_extents, base_name): part_file.seek(ex.start_block * block_size) - part_file.write('\0' * (ex.num_blocks * block_size)) + part_file.write(b'\0' * (ex.num_blocks * block_size)) def _ApplySourceCopyOperation(self, op, op_name, old_part_file, new_part_file): @@ -424,7 +419,7 @@ def _ApplyDiffOperation(self, op, op_name, patch_data, old_part_file, # Pad with zeros past the total output length. if pad_len: new_part_file.seek(pad_off) - new_part_file.write('\0' * pad_len) + new_part_file.write(b'\0' * pad_len) else: # Gather input raw data and write to a temp file. input_part_file = old_part_file if old_part_file else new_part_file @@ -467,7 +462,7 @@ def _ApplyDiffOperation(self, op, op_name, patch_data, old_part_file, # Write output back to partition, with padding. unaligned_out_len = len(out_data) % block_size if unaligned_out_len: - out_data += '\0' * (block_size - unaligned_out_len) + out_data += b'\0' * (block_size - unaligned_out_len) _WriteExtents(new_part_file, out_data, op.dst_extents, block_size, '%s.dst_extents' % op_name) From 3dfd8031a9d48a88bd0b16a03520082061d893ff Mon Sep 17 00:00:00 2001 From: Andrew Date: Tue, 26 Nov 2019 12:50:57 -0800 Subject: [PATCH 162/624] update_payload: Adapt subprocess.check_call to py3 In python3, file descriptors(fd) are not passed to child processes, so the call to 'puffin' fails because puffin cannot receive the file descriptors passed by applier.py. In python3, there is an option to set a fd inheritable so the subprocess can access the fd. BUG=chromium:1027199 TEST=cros_generate_update_payload --src_image ~/trunk/src/build/images/eve/R80-12705.0.2019_11_23_1521-a1/dlc/dummy-dlc/dummy-package/dlc.img --image ~/trunk/src/build/images/eve/R80-12713.0.2019_11_26_0806-a1/dlc/dummy-dlc/dummy-package/dlc.img --output ~/delete/ttt3 --check TEST=cros_generate_update_payload --src_image ~/trunk/src/build/images/eve/R80-12697.0.2019_11_21_1601-a1/chromiumos_test_image.bin --image ~/trunk/src/build/images/eve/R80-12713.0.2019_11_26_0806-a1/chromiumos_image.bin --check --output ~/delete/ttt5 Change-Id: I5d68d5a7ce0a128f2438b0d9f2e32167463661c2 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/1938007 Tested-by: Andrew Lassalle Reviewed-by: Mike Frysinger Reviewed-by: Amin Hassani Commit-Queue: Mike Frysinger --- scripts/update_payload/applier.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/scripts/update_payload/applier.py b/scripts/update_payload/applier.py index ce1998f6..29ccb8e5 100644 --- a/scripts/update_payload/applier.py +++ b/scripts/update_payload/applier.py @@ -398,11 +398,19 @@ def _ApplyDiffOperation(self, op, op_name, patch_data, old_part_file, # Diff from source partition. old_file_name = '/dev/fd/%d' % old_part_file.fileno() + # In python3, file descriptors(fd) are not passed to child processes by + # default. To pass the fds to the child processes, we need to set the flag + # 'inheritable' in the fds and make the subprocess calls with the argument + # close_fds set to False. + if sys.version_info.major >= 3: + os.set_inheritable(new_part_file.fileno(), True) + os.set_inheritable(old_part_file.fileno(), True) + if op.type in (common.OpType.SOURCE_BSDIFF, common.OpType.BROTLI_BSDIFF): # Invoke bspatch on partition file with extents args. bspatch_cmd = [self.bspatch_path, old_file_name, new_file_name, patch_file_name, in_extents_arg, out_extents_arg] - subprocess.check_call(bspatch_cmd) + subprocess.check_call(bspatch_cmd, close_fds=False) elif op.type == common.OpType.PUFFDIFF: # Invoke puffpatch on partition file with extents args. puffpatch_cmd = [self.puffpatch_path, @@ -412,7 +420,7 @@ def _ApplyDiffOperation(self, op, op_name, patch_data, old_part_file, "--patch_file=%s" % patch_file_name, "--src_extents=%s" % in_extents_arg, "--dst_extents=%s" % out_extents_arg] - subprocess.check_call(puffpatch_cmd) + subprocess.check_call(puffpatch_cmd, close_fds=False) else: raise PayloadError("Unknown operation %s" % op.type) From 14980e2fbb885f966c5e531b83fee7a12d92b27c Mon Sep 17 00:00:00 2001 From: Alessio Balsini Date: Tue, 26 Nov 2019 11:46:06 +0000 Subject: [PATCH 163/624] SkipInstallOperation placeholders For some devices, some InstallOperations can be skipped during the update process. An example is the SOURCE_COPY operation with same source and destination locations for Virtual A/B devices. This patch extents the DynamicPartitionControl interface with the ShouldSkipOperation() method to query if given operation should be skipped or not. Bug: 141207436 Test: build Change-Id: I94dbf1d33d531944e04cb2ffcba274106e866d5d Signed-off-by: Alessio Balsini --- common/dynamic_partition_control_interface.h | 3 +++ common/dynamic_partition_control_stub.cc | 5 +++++ common/dynamic_partition_control_stub.h | 1 + dynamic_partition_control_android.cc | 5 +++++ dynamic_partition_control_android.h | 1 + 5 files changed, 15 insertions(+) diff --git a/common/dynamic_partition_control_interface.h b/common/dynamic_partition_control_interface.h index b092f7a3..39daf75c 100644 --- a/common/dynamic_partition_control_interface.h +++ b/common/dynamic_partition_control_interface.h @@ -50,6 +50,9 @@ class DynamicPartitionControlInterface { // Return the feature flags of Virtual A/B on this device. virtual FeatureFlag GetVirtualAbFeatureFlag() = 0; + // Checks if the provided InstallOperation can be skipped on this device. + virtual bool ShouldSkipOperation(const InstallOperation& operation) = 0; + // Do necessary cleanups before destroying the object. virtual void Cleanup() = 0; diff --git a/common/dynamic_partition_control_stub.cc b/common/dynamic_partition_control_stub.cc index 86f75aa0..06f6b3c2 100644 --- a/common/dynamic_partition_control_stub.cc +++ b/common/dynamic_partition_control_stub.cc @@ -32,6 +32,11 @@ FeatureFlag DynamicPartitionControlStub::GetVirtualAbFeatureFlag() { return FeatureFlag(FeatureFlag::Value::NONE); } +bool DynamicPartitionControlStub::ShouldSkipOperation( + const InstallOperation& operation) { + return false; +} + void DynamicPartitionControlStub::Cleanup() {} bool DynamicPartitionControlStub::PreparePartitionsForUpdate( diff --git a/common/dynamic_partition_control_stub.h b/common/dynamic_partition_control_stub.h index e7895dee..c62758bd 100644 --- a/common/dynamic_partition_control_stub.h +++ b/common/dynamic_partition_control_stub.h @@ -29,6 +29,7 @@ class DynamicPartitionControlStub : public DynamicPartitionControlInterface { public: FeatureFlag GetDynamicPartitionsFeatureFlag() override; FeatureFlag GetVirtualAbFeatureFlag() override; + bool ShouldSkipOperation(const InstallOperation& operation) override; void Cleanup() override; bool PreparePartitionsForUpdate(uint32_t source_slot, uint32_t target_slot, diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc index 464cdf13..c24aee90 100644 --- a/dynamic_partition_control_android.cc +++ b/dynamic_partition_control_android.cc @@ -104,6 +104,11 @@ FeatureFlag DynamicPartitionControlAndroid::GetVirtualAbFeatureFlag() { return virtual_ab_; } +bool DynamicPartitionControlAndroid::ShouldSkipOperation( + const InstallOperation& operation) { + return false; +} + bool DynamicPartitionControlAndroid::MapPartitionInternal( const std::string& super_device, const std::string& target_partition_name, diff --git a/dynamic_partition_control_android.h b/dynamic_partition_control_android.h index c1837e07..2bfbcb15 100644 --- a/dynamic_partition_control_android.h +++ b/dynamic_partition_control_android.h @@ -35,6 +35,7 @@ class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { ~DynamicPartitionControlAndroid(); FeatureFlag GetDynamicPartitionsFeatureFlag() override; FeatureFlag GetVirtualAbFeatureFlag() override; + bool ShouldSkipOperation(const InstallOperation& operation) override; void Cleanup() override; bool PreparePartitionsForUpdate(uint32_t source_slot, From 2a3b4a22eb6925326e439f820fa91a3976afac10 Mon Sep 17 00:00:00 2001 From: Alessio Balsini Date: Mon, 25 Nov 2019 16:46:51 +0000 Subject: [PATCH 164/624] Skip identical SOURCE_COPY operations When Virtual A/B devices are updated, SOURCE_COPY operations that are copying data from source to destination at the same locations: - are useless; - introduce an overhead for overwritingin identical data; - increase the COW device size when using dm-snapshot. This patch analyzes SOURCE_COPY operations and skips them if applied to Virtual A/B devices and source and destination addresses are identical. Bug: 141207436 Test: DynamicPartitionControlAndroidTest:ShouldSkipOperationTest Depends-On: I146aeba1c8ede35f21cfef8e21d4af62274bda84 Change-Id: Ifec33abaf81b1d4cbd61533293735de68578c9c4 Signed-off-by: Alessio Balsini --- dynamic_partition_control_android.cc | 10 +++ dynamic_partition_control_android_unittest.cc | 82 +++++++++++++++++++ 2 files changed, 92 insertions(+) diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc index c24aee90..4414e4b4 100644 --- a/dynamic_partition_control_android.cc +++ b/dynamic_partition_control_android.cc @@ -50,6 +50,7 @@ using android::fs_mgr::MetadataBuilder; using android::fs_mgr::Partition; using android::fs_mgr::PartitionOpener; using android::fs_mgr::SlotSuffixForSlotNumber; +using android::snapshot::SourceCopyOperationIsClone; namespace chromeos_update_engine { @@ -106,6 +107,15 @@ FeatureFlag DynamicPartitionControlAndroid::GetVirtualAbFeatureFlag() { bool DynamicPartitionControlAndroid::ShouldSkipOperation( const InstallOperation& operation) { + switch (operation.type()) { + case InstallOperation::SOURCE_COPY: + return target_supports_snapshot_ && + GetVirtualAbFeatureFlag().IsEnabled() && + SourceCopyOperationIsClone(operation); + break; + default: + break; + } return false; } diff --git a/dynamic_partition_control_android_unittest.cc b/dynamic_partition_control_android_unittest.cc index 10075ed4..fc3d38cb 100644 --- a/dynamic_partition_control_android_unittest.cc +++ b/dynamic_partition_control_android_unittest.cc @@ -124,6 +124,10 @@ class DynamicPartitionControlAndroidTest : public ::testing::Test { } void SetSlots(const TestParam& slots) { slots_ = slots; } + void SetSnapshotEnabled(bool enabled) { + dynamicControl().target_supports_snapshot_ = enabled; + } + struct Listener : public ::testing::MatchResultListener { explicit Listener(std::ostream* os) : MatchResultListener(os) {} }; @@ -616,4 +620,82 @@ TEST_F(DynamicPartitionControlAndroidTest, ApplyingToCurrentSlot) { << "Should not be able to apply to current slot."; } +TEST_F(DynamicPartitionControlAndroidTest, ShouldSkipOperationTest) { + InstallOperation iop; + Extent *se, *de; + + // Not a SOURCE_COPY operation, cannot skip. + iop.set_type(InstallOperation::REPLACE); + EXPECT_FALSE(dynamicControl().ShouldSkipOperation(iop)); + + iop.set_type(InstallOperation::SOURCE_COPY); + + // By default GetVirtualAbFeatureFlag is disabled. Cannot skip operation. + EXPECT_FALSE(dynamicControl().ShouldSkipOperation(iop)); + + // Enable GetVirtualAbFeatureFlag in the mock interface. + ON_CALL(dynamicControl(), GetVirtualAbFeatureFlag()) + .WillByDefault(Return(FeatureFlag(FeatureFlag::Value::LAUNCH))); + + // By default target_supports_snapshot_ is set to false. Cannot skip + // operation. + EXPECT_FALSE(dynamicControl().ShouldSkipOperation(iop)); + + SetSnapshotEnabled(true); + + // Empty source and destination. Skip. + EXPECT_TRUE(dynamicControl().ShouldSkipOperation(iop)); + + se = iop.add_src_extents(); + se->set_start_block(0); + se->set_num_blocks(1); + + // There is something in sources, but destinations are empty. Cannot skip. + EXPECT_FALSE(dynamicControl().ShouldSkipOperation(iop)); + + InstallOperation iop2; + + de = iop2.add_dst_extents(); + de->set_start_block(0); + de->set_num_blocks(1); + + // There is something in destinations, but sources are empty. Cannot skip. + EXPECT_FALSE(dynamicControl().ShouldSkipOperation(iop2)); + + de = iop.add_dst_extents(); + de->set_start_block(0); + de->set_num_blocks(1); + + // Sources and destinations are identical. Skip. + EXPECT_TRUE(dynamicControl().ShouldSkipOperation(iop)); + + se = iop.add_src_extents(); + se->set_start_block(1); + se->set_num_blocks(5); + + // There is something in source, but not in destination. Cannot skip. + EXPECT_FALSE(dynamicControl().ShouldSkipOperation(iop)); + + de = iop.add_dst_extents(); + de->set_start_block(1); + de->set_num_blocks(5); + + // There is source and destination are equal. Skip. + EXPECT_TRUE(dynamicControl().ShouldSkipOperation(iop)); + + de = iop.add_dst_extents(); + de->set_start_block(6); + de->set_num_blocks(5); + + // There is something extra in dest. Cannot skip. + EXPECT_FALSE(dynamicControl().ShouldSkipOperation(iop)); + + se = iop.add_src_extents(); + se->set_start_block(6); + se->set_num_blocks(5); + + // Source and dest are identical again. Skip. + EXPECT_TRUE(dynamicControl().ShouldSkipOperation(iop)); +} + } // namespace chromeos_update_engine From 8d3843144aa92a77b6bcadea6e0158fa4962f907 Mon Sep 17 00:00:00 2001 From: Alessio Balsini Date: Tue, 26 Nov 2019 11:46:33 +0000 Subject: [PATCH 165/624] DeltaPerformer uses ShouldSkipOperation() Enable DeltaPerformer request DynamicPartitionControl if a SOURCE_COPY operation can be skip. A SOURCE_COPY operation can still fail if its hash does not correspond to the partition's. Bug: 141207436 Test: manual OTA application Change-Id: Ib3d8c9343acb4d3655781150a4cca57985f20387 Signed-off-by: Alessio Balsini --- payload_consumer/delta_performer.cc | 46 +++++++++++++++++++++-------- 1 file changed, 34 insertions(+), 12 deletions(-) diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc index f9244a93..3d38c5bc 100644 --- a/payload_consumer/delta_performer.cc +++ b/payload_consumer/delta_performer.cc @@ -1154,7 +1154,14 @@ bool DeltaPerformer::PerformSourceCopyOperation( TEST_AND_RETURN_FALSE(source_fd_ != nullptr); + // The device may optimize the SOURCE_COPY operation. + // Being this a device-specific optimization let DynamicPartitionController + // decide it the operation should be skipped. + const auto& partition_control = boot_control_->GetDynamicPartitionControl(); + bool should_skip = partition_control->ShouldSkipOperation(operation); + if (operation.has_src_sha256_hash()) { + bool read_ok; brillo::Blob source_hash; brillo::Blob expected_source_hash(operation.src_sha256_hash().begin(), operation.src_sha256_hash().end()); @@ -1163,12 +1170,17 @@ bool DeltaPerformer::PerformSourceCopyOperation( // device doesn't match or there was an error reading the source partition. // Note that this code will also fall back if writing the target partition // fails. - bool read_ok = fd_utils::CopyAndHashExtents(source_fd_, - operation.src_extents(), - target_fd_, - operation.dst_extents(), - block_size_, - &source_hash); + if (should_skip) { + read_ok = fd_utils::ReadAndHashExtents( + source_fd_, operation.src_extents(), block_size_, &source_hash); + } else { + read_ok = fd_utils::CopyAndHashExtents(source_fd_, + operation.src_extents(), + target_fd_, + operation.dst_extents(), + block_size_, + &source_hash); + } if (read_ok && expected_source_hash == source_hash) return true; @@ -1185,12 +1197,18 @@ bool DeltaPerformer::PerformSourceCopyOperation( << base::HexEncode(expected_source_hash.data(), expected_source_hash.size()); - TEST_AND_RETURN_FALSE(fd_utils::CopyAndHashExtents(source_ecc_fd_, - operation.src_extents(), - target_fd_, - operation.dst_extents(), - block_size_, - &source_hash)); + if (should_skip) { + TEST_AND_RETURN_FALSE(fd_utils::ReadAndHashExtents( + source_ecc_fd_, operation.src_extents(), block_size_, &source_hash)); + } else { + TEST_AND_RETURN_FALSE( + fd_utils::CopyAndHashExtents(source_ecc_fd_, + operation.src_extents(), + target_fd_, + operation.dst_extents(), + block_size_, + &source_hash)); + } TEST_AND_RETURN_FALSE( ValidateSourceHash(source_hash, operation, source_ecc_fd_, error)); // At this point reading from the the error corrected device worked, but @@ -1202,6 +1220,10 @@ bool DeltaPerformer::PerformSourceCopyOperation( // corrected device first since we can't verify the block in the raw device // at this point, but we fall back to the raw device since the error // corrected device can be shorter or not available. + + if (should_skip) + return true; + if (OpenCurrentECCPartition() && fd_utils::CopyAndHashExtents(source_ecc_fd_, operation.src_extents(), From 6eec995ac79c66741e665fce9a30613844d59384 Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Wed, 4 Dec 2019 13:12:01 -0800 Subject: [PATCH 166/624] Only skip operation on snapshot partitions On Virtual A/B devices, don't skip SOURCE_COPY on static partitions. Test: update_engine_unittest Test: incremental update to self Change-Id: I5c93b501e09f50f559151eb77d83052373c90d0d --- common/dynamic_partition_control_interface.h | 8 +++-- common/dynamic_partition_control_stub.cc | 2 +- common/dynamic_partition_control_stub.h | 3 +- dynamic_partition_control_android.cc | 12 ++++++- dynamic_partition_control_android.h | 7 ++++- dynamic_partition_control_android_unittest.cc | 31 ++++++++++++------- mock_dynamic_partition_control.h | 5 +++ payload_consumer/delta_performer.cc | 4 ++- 8 files changed, 53 insertions(+), 19 deletions(-) diff --git a/common/dynamic_partition_control_interface.h b/common/dynamic_partition_control_interface.h index 39daf75c..c17bafbb 100644 --- a/common/dynamic_partition_control_interface.h +++ b/common/dynamic_partition_control_interface.h @@ -50,8 +50,12 @@ class DynamicPartitionControlInterface { // Return the feature flags of Virtual A/B on this device. virtual FeatureFlag GetVirtualAbFeatureFlag() = 0; - // Checks if the provided InstallOperation can be skipped on this device. - virtual bool ShouldSkipOperation(const InstallOperation& operation) = 0; + // Checks if |operation| can be skipped on the given partition. + // |partition_name| should not have the slot suffix; implementation of + // DynamicPartitionControlInterface checks partition at the target slot + // previously set with PreparePartitionsForUpdate(). + virtual bool ShouldSkipOperation(const std::string& partition_name, + const InstallOperation& operation) = 0; // Do necessary cleanups before destroying the object. virtual void Cleanup() = 0; diff --git a/common/dynamic_partition_control_stub.cc b/common/dynamic_partition_control_stub.cc index 06f6b3c2..bc792c8c 100644 --- a/common/dynamic_partition_control_stub.cc +++ b/common/dynamic_partition_control_stub.cc @@ -33,7 +33,7 @@ FeatureFlag DynamicPartitionControlStub::GetVirtualAbFeatureFlag() { } bool DynamicPartitionControlStub::ShouldSkipOperation( - const InstallOperation& operation) { + const std::string& partition_name, const InstallOperation& operation) { return false; } diff --git a/common/dynamic_partition_control_stub.h b/common/dynamic_partition_control_stub.h index c62758bd..1704f056 100644 --- a/common/dynamic_partition_control_stub.h +++ b/common/dynamic_partition_control_stub.h @@ -29,7 +29,8 @@ class DynamicPartitionControlStub : public DynamicPartitionControlInterface { public: FeatureFlag GetDynamicPartitionsFeatureFlag() override; FeatureFlag GetVirtualAbFeatureFlag() override; - bool ShouldSkipOperation(const InstallOperation& operation) override; + bool ShouldSkipOperation(const std::string& partition_name, + const InstallOperation& operation) override; void Cleanup() override; bool PreparePartitionsForUpdate(uint32_t source_slot, uint32_t target_slot, diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc index 4414e4b4..4ad02c74 100644 --- a/dynamic_partition_control_android.cc +++ b/dynamic_partition_control_android.cc @@ -106,11 +106,13 @@ FeatureFlag DynamicPartitionControlAndroid::GetVirtualAbFeatureFlag() { } bool DynamicPartitionControlAndroid::ShouldSkipOperation( - const InstallOperation& operation) { + const std::string& partition_name, const InstallOperation& operation) { switch (operation.type()) { case InstallOperation::SOURCE_COPY: return target_supports_snapshot_ && GetVirtualAbFeatureFlag().IsEnabled() && + mapped_devices_.count(partition_name + + SlotSuffixForSlotNumber(target_slot_)) > 0 && SourceCopyOperationIsClone(operation); break; default: @@ -371,6 +373,9 @@ bool DynamicPartitionControlAndroid::PreparePartitionsForUpdate( uint32_t target_slot, const DeltaArchiveManifest& manifest, bool update) { + source_slot_ = source_slot; + target_slot_ = target_slot; + if (fs_mgr_overlayfs_is_setup()) { // Non DAP devices can use overlayfs as well. LOG(WARNING) @@ -675,4 +680,9 @@ DynamicPartitionControlAndroid::GetDynamicPartitionDevice( return DynamicPartitionDeviceStatus::ERROR; } +void DynamicPartitionControlAndroid::set_fake_mapped_devices( + const std::set& fake) { + mapped_devices_ = fake; +} + } // namespace chromeos_update_engine diff --git a/dynamic_partition_control_android.h b/dynamic_partition_control_android.h index 2bfbcb15..13fbb1ae 100644 --- a/dynamic_partition_control_android.h +++ b/dynamic_partition_control_android.h @@ -35,7 +35,8 @@ class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { ~DynamicPartitionControlAndroid(); FeatureFlag GetDynamicPartitionsFeatureFlag() override; FeatureFlag GetVirtualAbFeatureFlag() override; - bool ShouldSkipOperation(const InstallOperation& operation) override; + bool ShouldSkipOperation(const std::string& partition_name, + const InstallOperation& operation) override; void Cleanup() override; bool PreparePartitionsForUpdate(uint32_t source_slot, @@ -122,6 +123,8 @@ class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { // metadata) for a given slot. virtual std::string GetSuperPartitionName(uint32_t slot); + virtual void set_fake_mapped_devices(const std::set& fake); + private: friend class DynamicPartitionControlAndroidTest; @@ -182,6 +185,8 @@ class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { // Whether the target partitions should be loaded as dynamic partitions. Set // by PreparePartitionsForUpdate() per each update. bool is_target_dynamic_ = false; + uint32_t source_slot_ = UINT32_MAX; + uint32_t target_slot_ = UINT32_MAX; DISALLOW_COPY_AND_ASSIGN(DynamicPartitionControlAndroid); }; diff --git a/dynamic_partition_control_android_unittest.cc b/dynamic_partition_control_android_unittest.cc index fc3d38cb..207a97e0 100644 --- a/dynamic_partition_control_android_unittest.cc +++ b/dynamic_partition_control_android_unittest.cc @@ -620,18 +620,22 @@ TEST_F(DynamicPartitionControlAndroidTest, ApplyingToCurrentSlot) { << "Should not be able to apply to current slot."; } -TEST_F(DynamicPartitionControlAndroidTest, ShouldSkipOperationTest) { +TEST_P(DynamicPartitionControlAndroidTestP, ShouldSkipOperationTest) { + ASSERT_TRUE(dynamicControl().PreparePartitionsForUpdate( + source(), target(), PartitionSizesToManifest({{"foo", 4_MiB}}), false)); + dynamicControl().set_fake_mapped_devices({T("foo")}); + InstallOperation iop; Extent *se, *de; // Not a SOURCE_COPY operation, cannot skip. iop.set_type(InstallOperation::REPLACE); - EXPECT_FALSE(dynamicControl().ShouldSkipOperation(iop)); + EXPECT_FALSE(dynamicControl().ShouldSkipOperation("foo", iop)); iop.set_type(InstallOperation::SOURCE_COPY); // By default GetVirtualAbFeatureFlag is disabled. Cannot skip operation. - EXPECT_FALSE(dynamicControl().ShouldSkipOperation(iop)); + EXPECT_FALSE(dynamicControl().ShouldSkipOperation("foo", iop)); // Enable GetVirtualAbFeatureFlag in the mock interface. ON_CALL(dynamicControl(), GetVirtualAbFeatureFlag()) @@ -639,19 +643,19 @@ TEST_F(DynamicPartitionControlAndroidTest, ShouldSkipOperationTest) { // By default target_supports_snapshot_ is set to false. Cannot skip // operation. - EXPECT_FALSE(dynamicControl().ShouldSkipOperation(iop)); + EXPECT_FALSE(dynamicControl().ShouldSkipOperation("foo", iop)); SetSnapshotEnabled(true); // Empty source and destination. Skip. - EXPECT_TRUE(dynamicControl().ShouldSkipOperation(iop)); + EXPECT_TRUE(dynamicControl().ShouldSkipOperation("foo", iop)); se = iop.add_src_extents(); se->set_start_block(0); se->set_num_blocks(1); // There is something in sources, but destinations are empty. Cannot skip. - EXPECT_FALSE(dynamicControl().ShouldSkipOperation(iop)); + EXPECT_FALSE(dynamicControl().ShouldSkipOperation("foo", iop)); InstallOperation iop2; @@ -660,42 +664,45 @@ TEST_F(DynamicPartitionControlAndroidTest, ShouldSkipOperationTest) { de->set_num_blocks(1); // There is something in destinations, but sources are empty. Cannot skip. - EXPECT_FALSE(dynamicControl().ShouldSkipOperation(iop2)); + EXPECT_FALSE(dynamicControl().ShouldSkipOperation("foo", iop2)); de = iop.add_dst_extents(); de->set_start_block(0); de->set_num_blocks(1); // Sources and destinations are identical. Skip. - EXPECT_TRUE(dynamicControl().ShouldSkipOperation(iop)); + EXPECT_TRUE(dynamicControl().ShouldSkipOperation("foo", iop)); se = iop.add_src_extents(); se->set_start_block(1); se->set_num_blocks(5); // There is something in source, but not in destination. Cannot skip. - EXPECT_FALSE(dynamicControl().ShouldSkipOperation(iop)); + EXPECT_FALSE(dynamicControl().ShouldSkipOperation("foo", iop)); de = iop.add_dst_extents(); de->set_start_block(1); de->set_num_blocks(5); // There is source and destination are equal. Skip. - EXPECT_TRUE(dynamicControl().ShouldSkipOperation(iop)); + EXPECT_TRUE(dynamicControl().ShouldSkipOperation("foo", iop)); de = iop.add_dst_extents(); de->set_start_block(6); de->set_num_blocks(5); // There is something extra in dest. Cannot skip. - EXPECT_FALSE(dynamicControl().ShouldSkipOperation(iop)); + EXPECT_FALSE(dynamicControl().ShouldSkipOperation("foo", iop)); se = iop.add_src_extents(); se->set_start_block(6); se->set_num_blocks(5); // Source and dest are identical again. Skip. - EXPECT_TRUE(dynamicControl().ShouldSkipOperation(iop)); + EXPECT_TRUE(dynamicControl().ShouldSkipOperation("foo", iop)); + + // Don't skip for static partitions. + EXPECT_FALSE(dynamicControl().ShouldSkipOperation("bar", iop)); } } // namespace chromeos_update_engine diff --git a/mock_dynamic_partition_control.h b/mock_dynamic_partition_control.h index db8e8344..09b825d9 100644 --- a/mock_dynamic_partition_control.h +++ b/mock_dynamic_partition_control.h @@ -17,6 +17,7 @@ #include #include +#include #include #include @@ -69,6 +70,10 @@ class MockDynamicPartitionControlAndroid MOCK_METHOD1(GetSuperPartitionName, std::string(uint32_t)); MOCK_METHOD0(GetVirtualAbFeatureFlag, FeatureFlag()); MOCK_METHOD0(FinishUpdate, bool()); + + void set_fake_mapped_devices(const std::set& fake) override { + DynamicPartitionControlAndroid::set_fake_mapped_devices(fake); + } }; } // namespace chromeos_update_engine diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc index 3d38c5bc..c49474ce 100644 --- a/payload_consumer/delta_performer.cc +++ b/payload_consumer/delta_performer.cc @@ -1157,8 +1157,10 @@ bool DeltaPerformer::PerformSourceCopyOperation( // The device may optimize the SOURCE_COPY operation. // Being this a device-specific optimization let DynamicPartitionController // decide it the operation should be skipped. + const PartitionUpdate& partition = partitions_[current_partition_]; const auto& partition_control = boot_control_->GetDynamicPartitionControl(); - bool should_skip = partition_control->ShouldSkipOperation(operation); + bool should_skip = partition_control->ShouldSkipOperation( + partition.partition_name(), operation); if (operation.has_src_sha256_hash()) { bool read_ok; From 635792beb0c02d23ce40385d2e45bcf76b3b2a53 Mon Sep 17 00:00:00 2001 From: Jeff Sharkey Date: Wed, 4 Dec 2019 09:54:47 -0700 Subject: [PATCH 167/624] Explicitly cast to avoid ambiguous method call. Bug: 143723019 Test: manual Exempt-From-Owner-Approval: trivial refactoring Change-Id: I2fea96b2d5343ec0de8555e531f4ff9b6edb6398 --- metrics_reporter_android.cc | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/metrics_reporter_android.cc b/metrics_reporter_android.cc index 4165c143..454649c0 100644 --- a/metrics_reporter_android.cc +++ b/metrics_reporter_android.cc @@ -176,13 +176,13 @@ void MetricsReporterAndroid::ReportSuccessfulUpdateMetrics( android::util::stats_write( android::util::UPDATE_ENGINE_SUCCESSFUL_UPDATE_REPORTED, - attempt_count, + static_cast(attempt_count), GetStatsdEnumValue(static_cast(payload_type)), - payload_size_mib, - total_bytes_downloaded, - download_overhead_percentage, - total_duration.InMinutes(), - reboot_count); + static_cast(payload_size_mib), + static_cast(total_bytes_downloaded), + static_cast(download_overhead_percentage), + static_cast(total_duration.InMinutes()), + static_cast(reboot_count)); } void MetricsReporterAndroid::ReportAbnormallyTerminatedUpdateAttemptMetrics() { From 173e619eaec17f9703b0e55d1cfdd039311f809a Mon Sep 17 00:00:00 2001 From: Tianjie Xu Date: Tue, 10 Dec 2019 10:56:01 -0800 Subject: [PATCH 168/624] Switch to use libdm to bind loop devices This is a speculative fix of the flaky unit test. There used to be a race condition when the test tries to find a free loop device and binds to it. Switch to libdm in android to address the race. In the local test, the device gets into a state where kernel fails to open the loop device returned by LOOP_CTL_GET_FREE. This cl tries to prevent the device from entering the erroneous state. Though it's not clear if the test itself put the device into such state. It's still worth trying if there'is less flakiness after this cl. Bug: 145706147 Test: unit tests pass Change-Id: I3abbba2ef801d787c575696f5d0ce553c43545ca --- Android.bp | 1 + common/test_utils.cc | 96 +++++++++++++++++++++++++++----------------- 2 files changed, 61 insertions(+), 36 deletions(-) diff --git a/Android.bp b/Android.bp index 84186972..9d6ec7c7 100644 --- a/Android.bp +++ b/Android.bp @@ -652,6 +652,7 @@ cc_test { "libgmock", "libchrome_test_helpers", "libupdate_engine_android", + "libdm", ], header_libs: [ diff --git a/common/test_utils.cc b/common/test_utils.cc index 50b09624..bd69d039 100644 --- a/common/test_utils.cc +++ b/common/test_utils.cc @@ -37,6 +37,10 @@ #include #include +#ifdef __ANDROID__ +#include +#endif + #include "update_engine/common/error_code_utils.h" #include "update_engine/common/utils.h" #include "update_engine/payload_consumer/file_writer.h" @@ -44,16 +48,7 @@ using std::set; using std::string; using std::vector; - -namespace { - -#ifdef __ANDROID__ -#define kLoopDevicePrefix "/dev/block/loop" -#else -#define kLoopDevicePrefix "/dev/loop" -#endif // __ANDROID__ - -} // namespace +using namespace std::chrono_literals; namespace chromeos_update_engine { @@ -112,17 +107,43 @@ bool WriteFileString(const string& path, const string& data) { return utils::WriteFile(path.c_str(), data.data(), data.size()); } -bool BindToUnusedLoopDevice(const string& filename, - bool writable, - string* out_lo_dev_name) { - CHECK(out_lo_dev_name); +bool SetLoopDeviceStatus(int loop_device_fd, + const std::string& filename, + int loop_number, + bool writable) { + struct loop_info64 device_info {}; + device_info.lo_offset = 0; + device_info.lo_sizelimit = 0; // 0 means whole file. + device_info.lo_flags = (writable ? 0 : LO_FLAGS_READ_ONLY); + device_info.lo_number = loop_number; + strncpy(reinterpret_cast(device_info.lo_file_name), + base::FilePath(filename).BaseName().value().c_str(), + LO_NAME_SIZE - 1); + device_info.lo_file_name[LO_NAME_SIZE - 1] = '\0'; + TEST_AND_RETURN_FALSE_ERRNO( + ioctl(loop_device_fd, LOOP_SET_STATUS64, &device_info) == 0); + if (writable) { + // Make sure loop device isn't read only. + int ro = 0; + if (ioctl(loop_device_fd, BLKROSET, &ro) != 0) { + PLOG(WARNING) << "Failed to mark loop device writable."; + } + } + + return true; +} + +bool BindToUnusedLoopDeviceLegacy(int data_fd, + const string& filename, + bool writable, + string* out_lo_dev_name) { // Get the next available loop-device. int control_fd = HANDLE_EINTR(open("/dev/loop-control", O_RDWR | O_LARGEFILE)); TEST_AND_RETURN_FALSE_ERRNO(control_fd >= 0); int loop_number = ioctl(control_fd, LOOP_CTL_GET_FREE); IGNORE_EINTR(close(control_fd)); - *out_lo_dev_name = kLoopDevicePrefix + std::to_string(loop_number); + *out_lo_dev_name = "/dev/loop" + std::to_string(loop_number); // Double check that the loop exists and is free. int loop_device_fd = @@ -146,32 +167,35 @@ bool BindToUnusedLoopDevice(const string& filename, return false; } - // Open our data file and assign it to the loop device. + // Assign the data fd to the loop device. + TEST_AND_RETURN_FALSE_ERRNO(ioctl(loop_device_fd, LOOP_SET_FD, data_fd) == 0); + return SetLoopDeviceStatus(loop_device_fd, filename, loop_number, writable); +} + +bool BindToUnusedLoopDevice(const string& filename, + bool writable, + string* out_lo_dev_name) { + CHECK(out_lo_dev_name); int data_fd = open(filename.c_str(), (writable ? O_RDWR : O_RDONLY) | O_LARGEFILE | O_CLOEXEC); TEST_AND_RETURN_FALSE_ERRNO(data_fd >= 0); ScopedFdCloser data_fd_closer(&data_fd); - TEST_AND_RETURN_FALSE_ERRNO(ioctl(loop_device_fd, LOOP_SET_FD, data_fd) == 0); - memset(&device_info, 0, sizeof(device_info)); - device_info.lo_offset = 0; - device_info.lo_sizelimit = 0; // 0 means whole file. - device_info.lo_flags = (writable ? 0 : LO_FLAGS_READ_ONLY); - device_info.lo_number = loop_number; - strncpy(reinterpret_cast(device_info.lo_file_name), - base::FilePath(filename).BaseName().value().c_str(), - LO_NAME_SIZE - 1); - device_info.lo_file_name[LO_NAME_SIZE - 1] = '\0'; - TEST_AND_RETURN_FALSE_ERRNO( - ioctl(loop_device_fd, LOOP_SET_STATUS64, &device_info) == 0); - if (writable) { - // Make sure loop device isn't read only. - int ro = 0; - if (ioctl(loop_device_fd, BLKROSET, &ro) != 0) { - PLOG(WARNING) << "Failed to mark loop device writable."; - } - } - return true; +#ifdef __ANDROID__ + // Use libdm to bind a free loop device. The library internally handles the + // race condition. + android::dm::LoopControl loop_control; + TEST_AND_RETURN_FALSE(loop_control.Attach(data_fd, 5s, out_lo_dev_name)); + int loop_device_fd = open(out_lo_dev_name->c_str(), O_RDWR | O_CLOEXEC); + ScopedFdCloser loop_fd_closer(&loop_device_fd); + int loop_number; + TEST_AND_RETURN_FALSE( + sscanf(out_lo_dev_name->c_str(), "/dev/block/loop%d", &loop_number) == 1); + return SetLoopDeviceStatus(loop_device_fd, filename, loop_number, writable); +#else + return BindToUnusedLoopDeviceLegacy( + data_fd, filename, writable, out_lo_dev_name); +#endif } bool UnbindLoopDevice(const string& lo_dev_name) { From f6c805a7dd67cf169fa63762de1a922b19c9f602 Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Fri, 13 Dec 2019 11:50:43 -0800 Subject: [PATCH 169/624] Sync with CrOS error codes. Add to ErrorCode and metrics::DownloadErrorCode: - kInternalLibCurlError - kUnresolvedHostError - kUnresolvedHostRecovered Test: builds Change-Id: Ibe11c02137860aed583b00b7e05b089ef84edb03 --- common/error_code.h | 3 +++ common/error_code_utils.cc | 6 ++++++ metrics_constants.h | 9 +++++++++ metrics_utils.cc | 10 ++++++++++ payload_state.cc | 3 +++ update_manager/chromeos_policy.cc | 3 +++ 6 files changed, 34 insertions(+) diff --git a/common/error_code.h b/common/error_code.h index 252cc420..3dd74028 100644 --- a/common/error_code.h +++ b/common/error_code.h @@ -80,6 +80,9 @@ enum class ErrorCode : int { kRollbackNotPossible = 54, kFirstActiveOmahaPingSentPersistenceError = 55, kVerityCalculationError = 56, + kInternalLibCurlError = 57, + kUnresolvedHostError = 58, + kUnresolvedHostRecovered = 59, // VERY IMPORTANT! When adding new error codes: // diff --git a/common/error_code_utils.cc b/common/error_code_utils.cc index b0bbbd4a..5bcbaa40 100644 --- a/common/error_code_utils.cc +++ b/common/error_code_utils.cc @@ -161,6 +161,12 @@ string ErrorCodeToString(ErrorCode code) { return "ErrorCode::kFirstActiveOmahaPingSentPersistenceError"; case ErrorCode::kVerityCalculationError: return "ErrorCode::kVerityCalculationError"; + case ErrorCode::kInternalLibCurlError: + return "ErrorCode::kInternalLibCurlError"; + case ErrorCode::kUnresolvedHostError: + return "ErrorCode::kUnresolvedHostError"; + case ErrorCode::kUnresolvedHostRecovered: + return "ErrorCode::kUnresolvedHostRecovered"; // Don't add a default case to let the compiler warn about newly added // error codes which should be added here. } diff --git a/metrics_constants.h b/metrics_constants.h index eabb8fb0..137143ab 100644 --- a/metrics_constants.h +++ b/metrics_constants.h @@ -60,6 +60,15 @@ enum class DownloadErrorCode { // above block and before the kInputMalformed field. This // is to ensure that error codes are not reordered. + // This error is reported when libcurl returns CURLE_COULDNT_RESOLVE_HOST and + // calling res_init() can recover. + kUnresolvedHostRecovered = 97, + // This error is reported when libcurl returns CURLE_COULDNT_RESOLVE_HOST. + kUnresolvedHostError = 98, + // This error is reported when libcurl has an internal error that + // update_engine can't recover from. + kInternalLibCurlError = 99, + // This error code is used to convey that malformed input was given // to the utils::GetDownloadErrorCode() function. This should never // happen but if it does it's because of an internal update_engine diff --git a/metrics_utils.cc b/metrics_utils.cc index 070626a4..ca3b5c97 100644 --- a/metrics_utils.cc +++ b/metrics_utils.cc @@ -43,6 +43,9 @@ metrics::AttemptResult GetAttemptResult(ErrorCode code) { return metrics::AttemptResult::kUpdateSucceededNotActive; case ErrorCode::kDownloadTransferError: + case ErrorCode::kInternalLibCurlError: + case ErrorCode::kUnresolvedHostError: + case ErrorCode::kUnresolvedHostRecovered: return metrics::AttemptResult::kPayloadDownloadError; case ErrorCode::kDownloadInvalidMetadataSize: @@ -168,6 +171,13 @@ metrics::DownloadErrorCode GetDownloadErrorCode(ErrorCode code) { case ErrorCode::kDownloadTransferError: return metrics::DownloadErrorCode::kDownloadError; + case ErrorCode::kInternalLibCurlError: + return metrics::DownloadErrorCode::kInternalLibCurlError; + case ErrorCode::kUnresolvedHostError: + return metrics::DownloadErrorCode::kUnresolvedHostError; + case ErrorCode::kUnresolvedHostRecovered: + return metrics::DownloadErrorCode::kUnresolvedHostRecovered; + // All of these error codes are not related to downloading so break // out so we can warn and return InputMalformed. case ErrorCode::kSuccess: diff --git a/payload_state.cc b/payload_state.cc index a6c36201..355552ec 100644 --- a/payload_state.cc +++ b/payload_state.cc @@ -365,6 +365,9 @@ void PayloadState::UpdateFailed(ErrorCode error) { case ErrorCode::kNoUpdate: case ErrorCode::kRollbackNotPossible: case ErrorCode::kFirstActiveOmahaPingSentPersistenceError: + case ErrorCode::kInternalLibCurlError: + case ErrorCode::kUnresolvedHostError: + case ErrorCode::kUnresolvedHostRecovered: LOG(INFO) << "Not incrementing URL index or failure count for this error"; break; diff --git a/update_manager/chromeos_policy.cc b/update_manager/chromeos_policy.cc index bdb88f8e..8056f06f 100644 --- a/update_manager/chromeos_policy.cc +++ b/update_manager/chromeos_policy.cc @@ -149,6 +149,9 @@ bool HandleErrorCode(ErrorCode err_code, int* url_num_error_p) { case ErrorCode::kNoUpdate: case ErrorCode::kRollbackNotPossible: case ErrorCode::kFirstActiveOmahaPingSentPersistenceError: + case ErrorCode::kInternalLibCurlError: + case ErrorCode::kUnresolvedHostError: + case ErrorCode::kUnresolvedHostRecovered: LOG(INFO) << "Not changing URL index or failure count due to error " << chromeos_update_engine::utils::ErrorCodeToString(err_code) << " (" << static_cast(err_code) << ")"; From 46e37d4b7d50bc7b2023fd0a791aa4d57b8a4643 Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Fri, 13 Dec 2019 12:01:42 -0800 Subject: [PATCH 170/624] Add ErrorCode::kNotEnoughSpace. Virtual A/B now requires space in userdata, which may not be enough during OTA. A kNotEnoughSpace error code is added to indicate such case. Bug: 138808328 Test: builds Change-Id: I6bf7964911c8eb3b9df9b8752018925a99876d0f --- common/error_code.h | 1 + common/error_code_utils.cc | 2 ++ metrics_utils.cc | 2 ++ payload_state.cc | 1 + update_manager/chromeos_policy.cc | 1 + 5 files changed, 7 insertions(+) diff --git a/common/error_code.h b/common/error_code.h index 3dd74028..1edbba3a 100644 --- a/common/error_code.h +++ b/common/error_code.h @@ -83,6 +83,7 @@ enum class ErrorCode : int { kInternalLibCurlError = 57, kUnresolvedHostError = 58, kUnresolvedHostRecovered = 59, + kNotEnoughSpace = 60, // VERY IMPORTANT! When adding new error codes: // diff --git a/common/error_code_utils.cc b/common/error_code_utils.cc index 5bcbaa40..645adc59 100644 --- a/common/error_code_utils.cc +++ b/common/error_code_utils.cc @@ -167,6 +167,8 @@ string ErrorCodeToString(ErrorCode code) { return "ErrorCode::kUnresolvedHostError"; case ErrorCode::kUnresolvedHostRecovered: return "ErrorCode::kUnresolvedHostRecovered"; + case ErrorCode::kNotEnoughSpace: + return "ErrorCode::kNotEnoughSpace"; // Don't add a default case to let the compiler warn about newly added // error codes which should be added here. } diff --git a/metrics_utils.cc b/metrics_utils.cc index ca3b5c97..1f70874e 100644 --- a/metrics_utils.cc +++ b/metrics_utils.cc @@ -72,6 +72,7 @@ metrics::AttemptResult GetAttemptResult(ErrorCode code) { case ErrorCode::kFilesystemCopierError: case ErrorCode::kFilesystemVerifierError: case ErrorCode::kVerityCalculationError: + case ErrorCode::kNotEnoughSpace: return metrics::AttemptResult::kOperationExecutionError; case ErrorCode::kDownloadMetadataSignatureMismatch: @@ -236,6 +237,7 @@ metrics::DownloadErrorCode GetDownloadErrorCode(ErrorCode code) { case ErrorCode::kRollbackNotPossible: case ErrorCode::kFirstActiveOmahaPingSentPersistenceError: case ErrorCode::kVerityCalculationError: + case ErrorCode::kNotEnoughSpace: break; // Special flags. These can't happen (we mask them out above) but diff --git a/payload_state.cc b/payload_state.cc index 355552ec..23ff3e2f 100644 --- a/payload_state.cc +++ b/payload_state.cc @@ -368,6 +368,7 @@ void PayloadState::UpdateFailed(ErrorCode error) { case ErrorCode::kInternalLibCurlError: case ErrorCode::kUnresolvedHostError: case ErrorCode::kUnresolvedHostRecovered: + case ErrorCode::kNotEnoughSpace: LOG(INFO) << "Not incrementing URL index or failure count for this error"; break; diff --git a/update_manager/chromeos_policy.cc b/update_manager/chromeos_policy.cc index 8056f06f..22e50945 100644 --- a/update_manager/chromeos_policy.cc +++ b/update_manager/chromeos_policy.cc @@ -152,6 +152,7 @@ bool HandleErrorCode(ErrorCode err_code, int* url_num_error_p) { case ErrorCode::kInternalLibCurlError: case ErrorCode::kUnresolvedHostError: case ErrorCode::kUnresolvedHostRecovered: + case ErrorCode::kNotEnoughSpace: LOG(INFO) << "Not changing URL index or failure count due to error " << chromeos_update_engine::utils::ErrorCodeToString(err_code) << " (" << static_cast(err_code) << ")"; From bd47d620e753e5150fd79a58ed79c23640057470 Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Fri, 13 Dec 2019 14:59:58 -0800 Subject: [PATCH 171/624] Refactor UpdateAttmpeterAndroid. Factor out code for future use. Test: update_engine_unittests Change-Id: I8071ba84a1dc66ed72faaf63eea1fb5bb814ab1d --- update_attempter_android.cc | 82 +++++++++++++++++++++++++++---------- update_attempter_android.h | 10 +++++ 2 files changed, 70 insertions(+), 22 deletions(-) diff --git a/update_attempter_android.cc b/update_attempter_android.cc index bc97a111..d7756792 100644 --- a/update_attempter_android.cc +++ b/update_attempter_android.cc @@ -98,6 +98,34 @@ bool GetHeaderAsBool(const string& header, bool default_value) { return default_value; } +bool ParseKeyValuePairHeaders(const vector& key_value_pair_headers, + std::map* headers, + brillo::ErrorPtr* error) { + for (const string& key_value_pair : key_value_pair_headers) { + string key; + string value; + if (!brillo::string_utils::SplitAtFirst( + key_value_pair, "=", &key, &value, false)) { + return LogAndSetError( + error, FROM_HERE, "Passed invalid header: " + key_value_pair); + } + if (!headers->emplace(key, value).second) + return LogAndSetError(error, FROM_HERE, "Passed repeated key: " + key); + } + return true; +} + +// Unique identifier for the payload. An empty string means that the payload +// can't be resumed. +string GetPayloadId(const std::map& headers) { + return (headers.count(kPayloadPropertyFileHash) + ? headers.at(kPayloadPropertyFileHash) + : "") + + (headers.count(kPayloadPropertyMetadataHash) + ? headers.at(kPayloadPropertyMetadataHash) + : ""); +} + } // namespace UpdateAttempterAndroid::UpdateAttempterAndroid( @@ -149,22 +177,11 @@ bool UpdateAttempterAndroid::ApplyPayload( DCHECK(status_ == UpdateStatus::IDLE); std::map headers; - for (const string& key_value_pair : key_value_pair_headers) { - string key; - string value; - if (!brillo::string_utils::SplitAtFirst( - key_value_pair, "=", &key, &value, false)) { - return LogAndSetError( - error, FROM_HERE, "Passed invalid header: " + key_value_pair); - } - if (!headers.emplace(key, value).second) - return LogAndSetError(error, FROM_HERE, "Passed repeated key: " + key); + if (!ParseKeyValuePairHeaders(key_value_pair_headers, &headers, error)) { + return false; } - // Unique identifier for the payload. An empty string means that the payload - // can't be resumed. - string payload_id = (headers[kPayloadPropertyFileHash] + - headers[kPayloadPropertyMetadataHash]); + string payload_id = GetPayloadId(headers); // Setup the InstallPlan based on the request. install_plan_ = InstallPlan(); @@ -207,8 +224,8 @@ bool UpdateAttempterAndroid::ApplyPayload( LOG(WARNING) << "Unable to save the update check response hash."; } } - install_plan_.source_slot = boot_control_->GetCurrentSlot(); - install_plan_.target_slot = install_plan_.source_slot == 0 ? 1 : 0; + install_plan_.source_slot = GetCurrentSlot(); + install_plan_.target_slot = GetTargetSlot(); install_plan_.powerwash_required = GetHeaderAsBool(headers[kPayloadPropertyPowerwash], false); @@ -342,7 +359,7 @@ bool UpdateAttempterAndroid::ResetStatus(brillo::ErrorPtr* error) { ClearMetricsPrefs(); // Update the boot flags so the current slot has higher priority. - if (!boot_control_->SetActiveBootSlot(boot_control_->GetCurrentSlot())) + if (!boot_control_->SetActiveBootSlot(GetCurrentSlot())) ret_value = false; // Mark the current slot as successful again, since marking it as active @@ -372,8 +389,10 @@ bool UpdateAttempterAndroid::ResetStatus(brillo::ErrorPtr* error) { } } -bool UpdateAttempterAndroid::VerifyPayloadApplicable( - const std::string& metadata_filename, brillo::ErrorPtr* error) { +bool UpdateAttempterAndroid::VerifyPayloadParseManifest( + const std::string& metadata_filename, + DeltaArchiveManifest* manifest, + brillo::ErrorPtr* error) { FileDescriptorPtr fd(new EintrSafeFileDescriptor); if (!fd->Open(metadata_filename.c_str(), O_RDONLY)) { return LogAndSetError( @@ -431,12 +450,23 @@ bool UpdateAttempterAndroid::VerifyPayloadApplicable( "Failed to validate metadata signature: " + utils::ErrorCodeToString(errorcode)); } - DeltaArchiveManifest manifest; - if (!payload_metadata.GetManifest(metadata, &manifest)) { + if (!payload_metadata.GetManifest(metadata, manifest)) { return LogAndSetError(error, FROM_HERE, "Failed to parse manifest."); } - BootControlInterface::Slot current_slot = boot_control_->GetCurrentSlot(); + return true; +} + +bool UpdateAttempterAndroid::VerifyPayloadApplicable( + const std::string& metadata_filename, brillo::ErrorPtr* error) { + DeltaArchiveManifest manifest; + TEST_AND_RETURN_FALSE( + VerifyPayloadParseManifest(metadata_filename, &manifest, error)); + + FileDescriptorPtr fd(new EintrSafeFileDescriptor); + ErrorCode errorcode; + + BootControlInterface::Slot current_slot = GetCurrentSlot(); for (const PartitionUpdate& partition : manifest.partitions()) { if (!partition.has_old_partition_info()) continue; @@ -863,4 +893,12 @@ void UpdateAttempterAndroid::ClearMetricsPrefs() { prefs_->Delete(kPrefsUpdateBootTimestampStart); } +BootControlInterface::Slot UpdateAttempterAndroid::GetCurrentSlot() const { + return boot_control_->GetCurrentSlot(); +} + +BootControlInterface::Slot UpdateAttempterAndroid::GetTargetSlot() const { + return GetCurrentSlot() == 0 ? 1 : 0; +} + } // namespace chromeos_update_engine diff --git a/update_attempter_android.h b/update_attempter_android.h index 7e1949d6..44b66d35 100644 --- a/update_attempter_android.h +++ b/update_attempter_android.h @@ -162,6 +162,16 @@ class UpdateAttempterAndroid // |kPrefsUpdateBootTimestampStart| void ClearMetricsPrefs(); + // Return source and target slots for update. + BootControlInterface::Slot GetCurrentSlot() const; + BootControlInterface::Slot GetTargetSlot() const; + + // Helper of public VerifyPayloadApplicable. Return the parsed manifest in + // |manifest|. + static bool VerifyPayloadParseManifest(const std::string& metadata_filename, + DeltaArchiveManifest* manifest, + brillo::ErrorPtr* error); + DaemonStateInterface* daemon_state_; // DaemonStateAndroid pointers. From eec29279a8869100458472b8803804f33c01f9d2 Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Fri, 13 Dec 2019 15:02:37 -0800 Subject: [PATCH 172/624] Refactor BinderUpdateEngineAndroidService. Factor out common code. Test: update_engine_unittests Change-Id: I476e6f1cbe44e55a1a5b02b1ab136380c5ab2ae6 --- binder_service_android.cc | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/binder_service_android.cc b/binder_service_android.cc index 88bc1f2d..9d040d61 100644 --- a/binder_service_android.cc +++ b/binder_service_android.cc @@ -34,6 +34,16 @@ Status ErrorPtrToStatus(const brillo::ErrorPtr& error) { return Status::fromServiceSpecificError( 1, android::String8{error->GetMessage().c_str()}); } + +vector ToVecString(const vector& inp) { + vector out; + out.reserve(inp.size()); + for (const auto& e : inp) { + out.emplace_back(android::String8{e}.string()); + } + return out; +} + } // namespace namespace chromeos_update_engine { @@ -99,11 +109,7 @@ Status BinderUpdateEngineAndroidService::applyPayload( int64_t payload_size, const vector& header_kv_pairs) { const string payload_url{android::String8{url}.string()}; - vector str_headers; - str_headers.reserve(header_kv_pairs.size()); - for (const auto& header : header_kv_pairs) { - str_headers.emplace_back(android::String8{header}.string()); - } + vector str_headers = ToVecString(header_kv_pairs); brillo::ErrorPtr error; if (!service_delegate_->ApplyPayload( @@ -118,11 +124,7 @@ Status BinderUpdateEngineAndroidService::applyPayloadFd( int64_t payload_offset, int64_t payload_size, const vector& header_kv_pairs) { - vector str_headers; - str_headers.reserve(header_kv_pairs.size()); - for (const auto& header : header_kv_pairs) { - str_headers.emplace_back(android::String8{header}.string()); - } + vector str_headers = ToVecString(header_kv_pairs); brillo::ErrorPtr error; if (!service_delegate_->ApplyPayload( From 766135abfb530609614295c90899fd6be335c687 Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Fri, 13 Dec 2019 11:29:32 -0800 Subject: [PATCH 173/624] update_engine: Be more descriptive on the logs on get status failure. It happens once in a while that the update-engine service is down or it is applying a heavy operation and won't be able to respond to the DBus mesasge. But this is confusing because we can't really figure out what went wrong. Add a more descriptive comment so people would know better how to diagnose the problem. BUG=chromium:1032384 TEST=None Change-Id: I04c59a74f70bf34ff305756c1b222f716ba39ee7 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/1966513 Tested-by: Amin Hassani Auto-Submit: Amin Hassani Reviewed-by: Jae Hoon Kim Commit-Queue: Jae Hoon Kim --- update_engine_client.cc | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/update_engine_client.cc b/update_engine_client.cc index e78eccfa..33425924 100644 --- a/update_engine_client.cc +++ b/update_engine_client.cc @@ -156,7 +156,13 @@ bool UpdateEngineClient::ShowStatus() { if (--retry_count == 0) { return false; } - LOG(WARNING) << "Will try " << retry_count << " more times!"; + LOG(WARNING) + << "Failed to get the update_engine status. This can happen when the" + " update_engine is busy doing a heavy operation or if the" + " update-engine service is down. If it doesn't resolve, a restart of" + " the update-engine service is needed." + " Will try " + << retry_count << " more times!"; base::PlatformThread::Sleep( base::TimeDelta::FromSeconds(kShowStatusRetryIntervalInSeconds)); } From 60f3a238b23e0cbaa76b7a0a5d7d99a5d8ff8035 Mon Sep 17 00:00:00 2001 From: Tianjie Xu Date: Wed, 11 Dec 2019 13:53:39 -0800 Subject: [PATCH 174/624] During binding, check the status when calling callback functions In a separate bug, we encounter a selinux denial when the update_engine domain tries to call function from gmscore domain (through the callback). This results in an infinite progress bar in the UI, and adds confusion to testers and users. Even though the callback function is defined as oneway in the aidl, I find it returns status with the error message upon selinux denial: Status(-129, EX_TRANSACTION_FAILED): 'FAILED_TRANSACTION: ' So, we should at least check this status and return false in the bind(). We also need to unbind the callback in such case. So the caller can bind() again later without storing two callbacks in update_engine. Bug: 146073270 Bug: 145340049 Test: check the error message and return value upon selinux denial Change-Id: I7aeb80637704907090974192deaa17ba3eadc822 --- binder_service_android.cc | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/binder_service_android.cc b/binder_service_android.cc index 9d040d61..994dcfae 100644 --- a/binder_service_android.cc +++ b/binder_service_android.cc @@ -70,6 +70,20 @@ void BinderUpdateEngineAndroidService::SendPayloadApplicationComplete( Status BinderUpdateEngineAndroidService::bind( const android::sp& callback, bool* return_value) { + // Send an status update on connection (except when no update sent so far). + // Even though the status update is oneway, it still returns an erroneous + // status in case of a selinux denial. We should at least check this status + // and fails the binding. + if (last_status_ != -1) { + auto status = callback->onStatusUpdate(last_status_, last_progress_); + if (!status.isOk()) { + LOG(ERROR) << "Failed to call onStatusUpdate() from callback: " + << status.toString8(); + *return_value = false; + return Status::ok(); + } + } + callbacks_.emplace_back(callback); const android::sp& callback_binder = @@ -82,12 +96,6 @@ Status BinderUpdateEngineAndroidService::bind( base::Unretained(this), base::Unretained(callback_binder.get()))); - // Send an status update on connection (except when no update sent so far), - // since the status update is oneway and we don't need to wait for the - // response. - if (last_status_ != -1) - callback->onStatusUpdate(last_status_, last_progress_); - *return_value = true; return Status::ok(); } From e7ce821d3d4699fc01d8d49dded9ae5d853012bf Mon Sep 17 00:00:00 2001 From: David Anderson Date: Mon, 16 Dec 2019 20:13:19 -0800 Subject: [PATCH 175/624] Use libfs_mgr_binder when linking to binder-enabled libsnapshot. Bug: 134949511 Test: builds Change-Id: I6fe9788ac69b9c9ffe67a65c877c3e4beaf7a56e --- Android.bp | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/Android.bp b/Android.bp index 9d6ec7c7..e457cd1b 100644 --- a/Android.bp +++ b/Android.bp @@ -195,12 +195,14 @@ cc_defaults { defaults: ["update_metadata-protos_exports"], static_libs: [ + "libcutils", + "libfs_mgr_binder", + "libgsi", "libsnapshot", "update_metadata-protos", ], shared_libs: [ "libbootloader_message", - "libfs_mgr", "libhidlbase", "liblp", "libutils", @@ -209,8 +211,14 @@ cc_defaults { ], target: { recovery: { - static_libs: ["libsnapshot_nobinder"], - exclude_static_libs: ["libsnapshot"], + static_libs: [ + "libfs_mgr", + "libsnapshot_nobinder", + ], + exclude_static_libs: [ + "libfs_mgr_binder", + "libsnapshot", + ], }, }, } From 6f7e29f0c91f733ee515b82baea4e85a67440648 Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Fri, 13 Dec 2019 14:41:06 -0800 Subject: [PATCH 176/624] Add stubs for UpdateEngine.AllocateSpaceForPayload This API preallocates space for a Virtual A/B update. Right now, it also returns an error (which becomes a ServiceSpecificException in Java) when space is insufficient. This will be fixed in a follow up CL. Test: pass Bug: 138808058 Change-Id: I587615ba765acb5a52c1918d6a4acc57a95d75f7 --- binder_bindings/android/os/IUpdateEngine.aidl | 13 +++++++++++++ binder_service_android.cc | 19 +++++++++++++++++++ binder_service_android.h | 4 ++++ service_delegate_android_interface.h | 13 +++++++++++++ update_attempter_android.cc | 9 +++++++++ update_attempter_android.h | 4 ++++ 6 files changed, 62 insertions(+) diff --git a/binder_bindings/android/os/IUpdateEngine.aidl b/binder_bindings/android/os/IUpdateEngine.aidl index 13050795..8a5ec717 100644 --- a/binder_bindings/android/os/IUpdateEngine.aidl +++ b/binder_bindings/android/os/IUpdateEngine.aidl @@ -45,4 +45,17 @@ interface IUpdateEngine { void resetStatus(); /** @hide */ boolean verifyPayloadApplicable(in String metadataFilename); + /** + * Allocate space on userdata partition. + * + * @return 0 indicates allocation is successful. + * Non-zero indicates space is insufficient. The returned value is the + * total required space (in bytes) on userdata partition. + * + * @throws ServiceSpecificException for other errors. + * + * @hide + */ + long allocateSpaceForPayload(in String metadataFilename, + in String[] headerKeyValuePairs); } diff --git a/binder_service_android.cc b/binder_service_android.cc index 994dcfae..214801bb 100644 --- a/binder_service_android.cc +++ b/binder_service_android.cc @@ -199,4 +199,23 @@ bool BinderUpdateEngineAndroidService::UnbindCallback(const IBinder* callback) { return true; } +Status BinderUpdateEngineAndroidService::allocateSpaceForPayload( + const android::String16& metadata_filename, + const vector& header_kv_pairs, + int64_t* return_value) { + const std::string payload_metadata{ + android::String8{metadata_filename}.string()}; + vector str_headers = ToVecString(header_kv_pairs); + LOG(INFO) << "Received a request of allocating space for " << payload_metadata + << "."; + brillo::ErrorPtr error; + *return_value = + static_cast(service_delegate_->AllocateSpaceForPayload( + payload_metadata, str_headers, &error)); + if (error != nullptr) + return ErrorPtrToStatus(error); + + return Status::ok(); +} + } // namespace chromeos_update_engine diff --git a/binder_service_android.h b/binder_service_android.h index 0dda93bd..52070758 100644 --- a/binder_service_android.h +++ b/binder_service_android.h @@ -70,6 +70,10 @@ class BinderUpdateEngineAndroidService : public android::os::BnUpdateEngine, android::binder::Status resetStatus() override; android::binder::Status verifyPayloadApplicable( const android::String16& metadata_filename, bool* return_value) override; + android::binder::Status allocateSpaceForPayload( + const android::String16& metadata_filename, + const std::vector& header_kv_pairs, + int64_t* return_value) override; private: // Remove the passed |callback| from the list of registered callbacks. Called diff --git a/service_delegate_android_interface.h b/service_delegate_android_interface.h index 6bd75b62..7f0169e5 100644 --- a/service_delegate_android_interface.h +++ b/service_delegate_android_interface.h @@ -83,6 +83,19 @@ class ServiceDelegateAndroidInterface { virtual bool VerifyPayloadApplicable(const std::string& metadata_filename, brillo::ErrorPtr* error) = 0; + // Allocates space for a payload. + // Returns 0 if space is successfully preallocated. + // Return non-zero if not enough space is not available; returned value is + // the total space required (in bytes) to be free on the device for this + // update to be applied, and |error| is unset. + // In case of error, returns 0, and sets |error| accordingly. + // + // This function may block for several minutes in the worst case. + virtual uint64_t AllocateSpaceForPayload( + const std::string& metadata_filename, + const std::vector& key_value_pair_headers, + brillo::ErrorPtr* error) = 0; + protected: ServiceDelegateAndroidInterface() = default; }; diff --git a/update_attempter_android.cc b/update_attempter_android.cc index d7756792..59cdbb8e 100644 --- a/update_attempter_android.cc +++ b/update_attempter_android.cc @@ -901,4 +901,13 @@ BootControlInterface::Slot UpdateAttempterAndroid::GetTargetSlot() const { return GetCurrentSlot() == 0 ? 1 : 0; } +uint64_t UpdateAttempterAndroid::AllocateSpaceForPayload( + const std::string& metadata_filename, + const vector& key_value_pair_headers, + brillo::ErrorPtr* error) { + // TODO(elsk): implement b/138808058 + LogAndSetError(error, FROM_HERE, "Not implemented."); + return 0; +} + } // namespace chromeos_update_engine diff --git a/update_attempter_android.h b/update_attempter_android.h index 44b66d35..309adffc 100644 --- a/update_attempter_android.h +++ b/update_attempter_android.h @@ -77,6 +77,10 @@ class UpdateAttempterAndroid bool ResetStatus(brillo::ErrorPtr* error) override; bool VerifyPayloadApplicable(const std::string& metadata_filename, brillo::ErrorPtr* error) override; + uint64_t AllocateSpaceForPayload( + const std::string& metadata_filename, + const std::vector& key_value_pair_headers, + brillo::ErrorPtr* error) override; // ActionProcessorDelegate methods: void ProcessingDone(const ActionProcessor* processor, From 7727990e0d465f1c8e18283023ec382b1012b2be Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Tue, 17 Dec 2019 16:38:21 -0800 Subject: [PATCH 177/624] Add ErrorCode::kDeviceCorrupted. This error code indicates the device is corrupted in a way that it can no longer receive any future updates. Specifically, if a device encounters a merge error, there is no way we can recover that device. Test: pass Bug: 138808328 Change-Id: Idbd9d1bc5bc02bbba157f17f31b2c5ace839243c --- common/error_code.h | 1 + common/error_code_utils.cc | 2 ++ metrics_utils.cc | 2 ++ payload_state.cc | 1 + update_manager/chromeos_policy.cc | 1 + 5 files changed, 7 insertions(+) diff --git a/common/error_code.h b/common/error_code.h index 1edbba3a..e473a05f 100644 --- a/common/error_code.h +++ b/common/error_code.h @@ -84,6 +84,7 @@ enum class ErrorCode : int { kUnresolvedHostError = 58, kUnresolvedHostRecovered = 59, kNotEnoughSpace = 60, + kDeviceCorrupted = 61, // VERY IMPORTANT! When adding new error codes: // diff --git a/common/error_code_utils.cc b/common/error_code_utils.cc index 645adc59..3fbf0feb 100644 --- a/common/error_code_utils.cc +++ b/common/error_code_utils.cc @@ -169,6 +169,8 @@ string ErrorCodeToString(ErrorCode code) { return "ErrorCode::kUnresolvedHostRecovered"; case ErrorCode::kNotEnoughSpace: return "ErrorCode::kNotEnoughSpace"; + case ErrorCode::kDeviceCorrupted: + return "ErrorCode::kDeviceCorrupted"; // Don't add a default case to let the compiler warn about newly added // error codes which should be added here. } diff --git a/metrics_utils.cc b/metrics_utils.cc index 1f70874e..9abc3ef7 100644 --- a/metrics_utils.cc +++ b/metrics_utils.cc @@ -73,6 +73,7 @@ metrics::AttemptResult GetAttemptResult(ErrorCode code) { case ErrorCode::kFilesystemVerifierError: case ErrorCode::kVerityCalculationError: case ErrorCode::kNotEnoughSpace: + case ErrorCode::kDeviceCorrupted: return metrics::AttemptResult::kOperationExecutionError; case ErrorCode::kDownloadMetadataSignatureMismatch: @@ -238,6 +239,7 @@ metrics::DownloadErrorCode GetDownloadErrorCode(ErrorCode code) { case ErrorCode::kFirstActiveOmahaPingSentPersistenceError: case ErrorCode::kVerityCalculationError: case ErrorCode::kNotEnoughSpace: + case ErrorCode::kDeviceCorrupted: break; // Special flags. These can't happen (we mask them out above) but diff --git a/payload_state.cc b/payload_state.cc index 23ff3e2f..3ba63914 100644 --- a/payload_state.cc +++ b/payload_state.cc @@ -369,6 +369,7 @@ void PayloadState::UpdateFailed(ErrorCode error) { case ErrorCode::kUnresolvedHostError: case ErrorCode::kUnresolvedHostRecovered: case ErrorCode::kNotEnoughSpace: + case ErrorCode::kDeviceCorrupted: LOG(INFO) << "Not incrementing URL index or failure count for this error"; break; diff --git a/update_manager/chromeos_policy.cc b/update_manager/chromeos_policy.cc index 22e50945..1fa86368 100644 --- a/update_manager/chromeos_policy.cc +++ b/update_manager/chromeos_policy.cc @@ -153,6 +153,7 @@ bool HandleErrorCode(ErrorCode err_code, int* url_num_error_p) { case ErrorCode::kUnresolvedHostError: case ErrorCode::kUnresolvedHostRecovered: case ErrorCode::kNotEnoughSpace: + case ErrorCode::kDeviceCorrupted: LOG(INFO) << "Not changing URL index or failure count due to error " << chromeos_update_engine::utils::ErrorCodeToString(err_code) << " (" << static_cast(err_code) << ")"; From 2236ea0fcf58f6bb14d0c79b6ea91cac181dc2ba Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Fri, 13 Dec 2019 16:11:22 -0800 Subject: [PATCH 178/624] Add stubs for UpdateEngine.CleanupSuccessfulUpdate This API waits for merge to finish after rebooting to the new Virtual A/B build. Test: pass Bug: 138808328 Change-Id: I9422f0ffb1876b71ab45e994fc2d5c76d9ec3a75 --- binder_bindings/android/os/IUpdateEngine.aidl | 8 ++++++++ binder_service_android.cc | 8 ++++++++ binder_service_android.h | 2 ++ service_delegate_android_interface.h | 7 +++++++ update_attempter_android.cc | 7 +++++++ update_attempter_android.h | 1 + 6 files changed, 33 insertions(+) diff --git a/binder_bindings/android/os/IUpdateEngine.aidl b/binder_bindings/android/os/IUpdateEngine.aidl index 8a5ec717..bbb86a3f 100644 --- a/binder_bindings/android/os/IUpdateEngine.aidl +++ b/binder_bindings/android/os/IUpdateEngine.aidl @@ -58,4 +58,12 @@ interface IUpdateEngine { */ long allocateSpaceForPayload(in String metadataFilename, in String[] headerKeyValuePairs); + /** @hide + * + * Wait for merge to finish, and clean up necessary files. + * + * @return SUCCESS if successful. ERROR if transient errors (e.g. merged but + * needs reboot). DEVICE_CORRUPTED for permanent errors. + */ + int cleanupSuccessfulUpdate(); } diff --git a/binder_service_android.cc b/binder_service_android.cc index 214801bb..c376f4ea 100644 --- a/binder_service_android.cc +++ b/binder_service_android.cc @@ -214,7 +214,15 @@ Status BinderUpdateEngineAndroidService::allocateSpaceForPayload( payload_metadata, str_headers, &error)); if (error != nullptr) return ErrorPtrToStatus(error); + return Status::ok(); +} +Status BinderUpdateEngineAndroidService::cleanupSuccessfulUpdate( + int32_t* return_value) { + brillo::ErrorPtr error; + *return_value = service_delegate_->CleanupSuccessfulUpdate(&error); + if (error != nullptr) + return ErrorPtrToStatus(error); return Status::ok(); } diff --git a/binder_service_android.h b/binder_service_android.h index 52070758..1c38d2be 100644 --- a/binder_service_android.h +++ b/binder_service_android.h @@ -74,6 +74,8 @@ class BinderUpdateEngineAndroidService : public android::os::BnUpdateEngine, const android::String16& metadata_filename, const std::vector& header_kv_pairs, int64_t* return_value) override; + android::binder::Status cleanupSuccessfulUpdate( + int32_t* return_value) override; private: // Remove the passed |callback| from the list of registered callbacks. Called diff --git a/service_delegate_android_interface.h b/service_delegate_android_interface.h index 7f0169e5..a12f1e8c 100644 --- a/service_delegate_android_interface.h +++ b/service_delegate_android_interface.h @@ -96,6 +96,13 @@ class ServiceDelegateAndroidInterface { const std::vector& key_value_pair_headers, brillo::ErrorPtr* error) = 0; + // Wait for merge to complete, then clean up merge after an update has been + // successful. + // + // This function returns immediately if no merge is needed, but may block + // for a long time (up to several minutes) in the worst case. + virtual int32_t CleanupSuccessfulUpdate(brillo::ErrorPtr* error) = 0; + protected: ServiceDelegateAndroidInterface() = default; }; diff --git a/update_attempter_android.cc b/update_attempter_android.cc index 59cdbb8e..263498b5 100644 --- a/update_attempter_android.cc +++ b/update_attempter_android.cc @@ -910,4 +910,11 @@ uint64_t UpdateAttempterAndroid::AllocateSpaceForPayload( return 0; } +int32_t UpdateAttempterAndroid::CleanupSuccessfulUpdate( + brillo::ErrorPtr* error) { + // TODO(elsk): implement b/138808328 + LogAndSetError(error, FROM_HERE, "Not implemented."); + return static_cast(ErrorCode::kError); +} + } // namespace chromeos_update_engine diff --git a/update_attempter_android.h b/update_attempter_android.h index 309adffc..c301e645 100644 --- a/update_attempter_android.h +++ b/update_attempter_android.h @@ -81,6 +81,7 @@ class UpdateAttempterAndroid const std::string& metadata_filename, const std::vector& key_value_pair_headers, brillo::ErrorPtr* error) override; + int32_t CleanupSuccessfulUpdate(brillo::ErrorPtr* error) override; // ActionProcessorDelegate methods: void ProcessingDone(const ActionProcessor* processor, From 3a7dc26624a526b3efc278877002c69d4d55b86f Mon Sep 17 00:00:00 2001 From: Andrew Date: Thu, 19 Dec 2019 11:38:08 -0800 Subject: [PATCH 179/624] update_engine: Check metadata and signature sizes Check that the size of the metadata size and signature sizes are smaller that the payload size. Without this check, the delta performer writes X number of bytes to the buffer before validating these values, and an attacker could provide a huge value which will make update_engine crash. BUG=chromium:1027166 TEST=fuzzer, unittest, install/unistall DLC on DUT TEST=test_that -b $BOARD $IP autoupdate_EOL Change-Id: Iad3a314efacbb1005fac37dd383a3f8852008f4b Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/1976079 Commit-Queue: Andrew Lassalle Tested-by: Andrew Lassalle Reviewed-by: Amin Hassani Auto-Submit: Andrew Lassalle --- payload_consumer/delta_performer.cc | 15 ++++ .../delta_performer_integration_test.cc | 3 +- payload_consumer/delta_performer_unittest.cc | 90 +++++++++++++++++-- payload_consumer/payload_metadata.cc | 2 + 4 files changed, 104 insertions(+), 6 deletions(-) diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc index ee5f38cc..3263ff7d 100644 --- a/payload_consumer/delta_performer.cc +++ b/payload_consumer/delta_performer.cc @@ -458,6 +458,21 @@ MetadataParseResult DeltaPerformer::ParsePayloadMetadata( return MetadataParseResult::kError; } } + + // Check that the |metadata signature size_| and |metadata_size_| are not + // very big numbers. This is necessary since |update_engine| needs to write + // these values into the buffer before being able to use them, and if an + // attacker sets these values to a very big number, the buffer will overflow + // and |update_engine| will crash. A simple way of solving this is to check + // that the size of both values is smaller than the payload itself. + if (metadata_size_ + metadata_signature_size_ > payload_->size) { + LOG(ERROR) << "The size of the metadata_size(" << metadata_size_ << ")" + << " or metadata signature(" << metadata_signature_size_ << ")" + << " is greater than the size of the payload" + << "(" << payload_->size << ")"; + *error = ErrorCode::kDownloadInvalidMetadataSize; + return MetadataParseResult::kError; + } } // Now that we have validated the metadata size, we should wait for the full diff --git a/payload_consumer/delta_performer_integration_test.cc b/payload_consumer/delta_performer_integration_test.cc index 5f557392..f1a492b5 100644 --- a/payload_consumer/delta_performer_integration_test.cc +++ b/payload_consumer/delta_performer_integration_test.cc @@ -713,7 +713,8 @@ static void ApplyDeltaFile(bool full_kernel, // Update the A image in place. InstallPlan* install_plan = &state->install_plan; install_plan->hash_checks_mandatory = hash_checks_mandatory; - install_plan->payloads = {{.metadata_size = state->metadata_size, + install_plan->payloads = {{.size = state->delta.size(), + .metadata_size = state->metadata_size, .type = (full_kernel && full_rootfs) ? InstallPayloadType::kFull : InstallPayloadType::kDelta}}; diff --git a/payload_consumer/delta_performer_unittest.cc b/payload_consumer/delta_performer_unittest.cc index 0671ecae..47cb0e78 100644 --- a/payload_consumer/delta_performer_unittest.cc +++ b/payload_consumer/delta_performer_unittest.cc @@ -43,6 +43,7 @@ #include "update_engine/payload_consumer/fake_file_descriptor.h" #include "update_engine/payload_consumer/mock_download_action.h" #include "update_engine/payload_consumer/payload_constants.h" +#include "update_engine/payload_consumer/payload_metadata.h" #include "update_engine/payload_generator/bzip.h" #include "update_engine/payload_generator/extent_ranges.h" #include "update_engine/payload_generator/payload_file.h" @@ -272,6 +273,7 @@ class DeltaPerformerTest : public ::testing::Test { test_utils::ScopedTempFile new_part("Partition-XXXXXX"); EXPECT_TRUE(test_utils::WriteFileVector(new_part.path(), target_data)); + payload_.size = payload_data.size(); // We installed the operations only in the rootfs partition, but the // delta performer needs to access all the partitions. fake_boot_control_.SetPartitionDevice( @@ -308,6 +310,7 @@ class DeltaPerformerTest : public ::testing::Test { EXPECT_TRUE(performer_.Write(&version, 8)); payload_.metadata_size = expected_metadata_size; + payload_.size = actual_metadata_size + 1; ErrorCode error_code; // When filling in size in manifest, exclude the size of the 24-byte header. uint64_t size_in_manifest = htobe64(actual_metadata_size - 24); @@ -325,7 +328,7 @@ class DeltaPerformerTest : public ::testing::Test { EXPECT_LT(performer_.Close(), 0); } - // Generates a valid delta file but tests the delta performer by suppling + // Generates a valid delta file but tests the delta performer by supplying // different metadata signatures as per metadata_signature_test flag and // sees if the result of the parsing are as per hash_checks_mandatory flag. void DoMetadataSignatureTest(MetadataSignatureTest metadata_signature_test, @@ -338,6 +341,7 @@ class DeltaPerformerTest : public ::testing::Test { kBrilloMajorPayloadVersion, kFullPayloadMinorVersion); + payload_.size = payload.size(); LOG(INFO) << "Payload size: " << payload.size(); install_plan_.hash_checks_mandatory = hash_checks_mandatory; @@ -866,15 +870,27 @@ TEST_F(DeltaPerformerTest, BrilloMetadataSignatureSizeTest) { EXPECT_TRUE(performer_.Write(kDeltaMagic, sizeof(kDeltaMagic))); uint64_t major_version = htobe64(kBrilloMajorPayloadVersion); - EXPECT_TRUE(performer_.Write(&major_version, 8)); + EXPECT_TRUE( + performer_.Write(&major_version, PayloadMetadata::kDeltaVersionSize)); uint64_t manifest_size = rand_r(&seed) % 256; + uint32_t metadata_signature_size = rand_r(&seed) % 256; + + // The payload size has to be bigger than the |metadata_size| and + // |metadata_signature_size| + payload_.size = PayloadMetadata::kDeltaManifestSizeOffset + + PayloadMetadata::kDeltaManifestSizeSize + + PayloadMetadata::kDeltaMetadataSignatureSizeSize + + manifest_size + metadata_signature_size + 1; + uint64_t manifest_size_be = htobe64(manifest_size); - EXPECT_TRUE(performer_.Write(&manifest_size_be, 8)); + EXPECT_TRUE(performer_.Write(&manifest_size_be, + PayloadMetadata::kDeltaManifestSizeSize)); - uint32_t metadata_signature_size = rand_r(&seed) % 256; uint32_t metadata_signature_size_be = htobe32(metadata_signature_size); - EXPECT_TRUE(performer_.Write(&metadata_signature_size_be, 4)); + EXPECT_TRUE( + performer_.Write(&metadata_signature_size_be, + PayloadMetadata::kDeltaMetadataSignatureSizeSize)); EXPECT_LT(performer_.Close(), 0); @@ -884,11 +900,75 @@ TEST_F(DeltaPerformerTest, BrilloMetadataSignatureSizeTest) { EXPECT_EQ(metadata_signature_size, performer_.metadata_signature_size_); } +TEST_F(DeltaPerformerTest, BrilloMetadataSizeNOKTest) { + unsigned int seed = time(nullptr); + EXPECT_TRUE(performer_.Write(kDeltaMagic, sizeof(kDeltaMagic))); + + uint64_t major_version = htobe64(kBrilloMajorPayloadVersion); + EXPECT_TRUE( + performer_.Write(&major_version, PayloadMetadata::kDeltaVersionSize)); + + uint64_t manifest_size = UINT64_MAX - 600; // Subtract to avoid wrap around. + uint64_t manifest_offset = PayloadMetadata::kDeltaManifestSizeOffset + + PayloadMetadata::kDeltaManifestSizeSize + + PayloadMetadata::kDeltaMetadataSignatureSizeSize; + payload_.metadata_size = manifest_offset + manifest_size; + uint32_t metadata_signature_size = rand_r(&seed) % 256; + + // The payload size is greater than the payload header but smaller than + // |metadata_signature_size| + |metadata_size| + payload_.size = manifest_offset + metadata_signature_size + 1; + + uint64_t manifest_size_be = htobe64(manifest_size); + EXPECT_TRUE(performer_.Write(&manifest_size_be, + PayloadMetadata::kDeltaManifestSizeSize)); + uint32_t metadata_signature_size_be = htobe32(metadata_signature_size); + + ErrorCode error; + EXPECT_FALSE( + performer_.Write(&metadata_signature_size_be, + PayloadMetadata::kDeltaMetadataSignatureSizeSize + 1, + &error)); + + EXPECT_EQ(ErrorCode::kDownloadInvalidMetadataSize, error); +} + +TEST_F(DeltaPerformerTest, BrilloMetadataSignatureSizeNOKTest) { + unsigned int seed = time(nullptr); + EXPECT_TRUE(performer_.Write(kDeltaMagic, sizeof(kDeltaMagic))); + + uint64_t major_version = htobe64(kBrilloMajorPayloadVersion); + EXPECT_TRUE( + performer_.Write(&major_version, PayloadMetadata::kDeltaVersionSize)); + + uint64_t manifest_size = rand_r(&seed) % 256; + // Subtract from UINT32_MAX to avoid wrap around. + uint32_t metadata_signature_size = UINT32_MAX - 600; + + // The payload size is greater than |manifest_size| but smaller than + // |metadata_signature_size| + payload_.size = manifest_size + 1; + + uint64_t manifest_size_be = htobe64(manifest_size); + EXPECT_TRUE(performer_.Write(&manifest_size_be, + PayloadMetadata::kDeltaManifestSizeSize)); + + uint32_t metadata_signature_size_be = htobe32(metadata_signature_size); + ErrorCode error; + EXPECT_FALSE( + performer_.Write(&metadata_signature_size_be, + PayloadMetadata::kDeltaMetadataSignatureSizeSize + 1, + &error)); + + EXPECT_EQ(ErrorCode::kDownloadInvalidMetadataSize, error); +} + TEST_F(DeltaPerformerTest, BrilloParsePayloadMetadataTest) { brillo::Blob payload_data = GeneratePayload( {}, {}, true, kBrilloMajorPayloadVersion, kSourceMinorPayloadVersion); install_plan_.hash_checks_mandatory = true; performer_.set_public_key_path(GetBuildArtifactsPath(kUnittestPublicKeyPath)); + payload_.size = payload_data.size(); ErrorCode error; EXPECT_EQ(MetadataParseResult::kSuccess, performer_.ParsePayloadMetadata(payload_data, &error)); diff --git a/payload_consumer/payload_metadata.cc b/payload_consumer/payload_metadata.cc index 4d8ee7b4..b83001a2 100644 --- a/payload_consumer/payload_metadata.cc +++ b/payload_consumer/payload_metadata.cc @@ -92,6 +92,7 @@ MetadataParseResult PayloadMetadata::ParsePayloadHeader( metadata_size_ = manifest_offset + manifest_size_; if (metadata_size_ < manifest_size_) { // Overflow detected. + LOG(ERROR) << "Overflow detected on manifest size."; *error = ErrorCode::kDownloadInvalidMetadataSize; return MetadataParseResult::kError; } @@ -108,6 +109,7 @@ MetadataParseResult PayloadMetadata::ParsePayloadHeader( if (metadata_size_ + metadata_signature_size_ < metadata_size_) { // Overflow detected. + LOG(ERROR) << "Overflow detected on metadata and signature size."; *error = ErrorCode::kDownloadInvalidMetadataSize; return MetadataParseResult::kError; } From 8da11e21b47d6085438c42bed39eb95315463500 Mon Sep 17 00:00:00 2001 From: Jae Hoon Kim Date: Mon, 23 Dec 2019 11:26:17 -0800 Subject: [PATCH 180/624] update_engine: Fix Install Plan Dump() misleading prints The new way of Dump()'ing the payload urls will be per payload. This will cause less confusion while reading the InstallPlan Dump() when payload urls vary per payload package. Now the log will also show payload urls that are include multiple urls for InstallPlan Dump()'s. Previous: [0106/095336.470400:INFO:install_plan.cc(83)] InstallPlan: new_update, version: 12804.0.0, source_slot: INVALID, target_slot: A, url: fi le:///usr/local/dlc/test1-dlc/test-package/dlcservice_test-dlc.payload, payload: (size: 797, metadata_size: 0, metadata signature: , has h: 9E117706017CC676BB5B9EABDFC7B9A9EF1C650D72F6B411031A4FD1161A9878, payload type: full), payload: (size: 797, metadata_size: 0, metadat a signature: , hash: 47D86E6F790048EBE45597B709BED5A724760160222926F1C9D88F045AE70737, payload type: full), hash_checks_mandatory: false , powerwash_required: false, switch_slot_on_reboot: true, run_post_install: true, is_rollback: false, write_verity: true New: [0106/130245.144640:INFO:install_plan.cc(90)] Install Plan: new_update, version: 12804.0.0, source_slot: IN VALID, target_slot: A, initial url: file:///usr/local /dlc/test1-dlc/test-package/dlcservice_test-dlc.paylo ad, payload: (urls: (file:///usr/local/dlc/test1-dlc/ test-package/dlcservice_test-dlc.payload), size: 797, metadata_size: 0, metadata signature: , hash: 9E1177 06017CC676BB5B9EABDFC7B9A9EF1C650D72F6B411031A4FD1161 A9878, payload type: full), payload: (urls: (file:/// usr/local/dlc/test2-dlc/test-package/dlcservice_test- dlc.payload), size: 797, metadata_size: 0, metadata s ignature: , hash: 47D86E6F790048EBE45597B709BED5A7247 60160222926F1C9D88F045AE70737, payload type: full), h ash_checks_mandatory: false, powerwash_required: fals e, switch_slot_on_reboot: true, run_post_install: tru e, is_rollback: false, write_verity: true BUG=chromium:1036850 TEST=# invoke Omaha respone with varying payload urls Change-Id: I970b1297d8a6b18f378d07583119246f10378d3e Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/1980276 Tested-by: Jae Hoon Kim Reviewed-by: Amin Hassani Reviewed-by: Andrew Lassalle Commit-Queue: Jae Hoon Kim --- omaha_response_handler_action.cc | 3 ++- payload_consumer/install_plan.cc | 13 ++++++++++--- payload_consumer/install_plan.h | 4 +++- 3 files changed, 15 insertions(+), 5 deletions(-) diff --git a/omaha_response_handler_action.cc b/omaha_response_handler_action.cc index 5741a2bd..c25b76f5 100644 --- a/omaha_response_handler_action.cc +++ b/omaha_response_handler_action.cc @@ -97,7 +97,8 @@ void OmahaResponseHandlerAction::PerformAction() { return; } install_plan_.payloads.push_back( - {.size = package.size, + {.payload_urls = package.payload_urls, + .size = package.size, .metadata_size = package.metadata_size, .metadata_signature = package.metadata_signature, .hash = raw_hash, diff --git a/payload_consumer/install_plan.cc b/payload_consumer/install_plan.cc index f52cd2df..4638fbe0 100644 --- a/payload_consumer/install_plan.cc +++ b/payload_consumer/install_plan.cc @@ -19,6 +19,7 @@ #include #include #include +#include #include #include "update_engine/common/utils.h" @@ -28,6 +29,11 @@ using std::string; namespace chromeos_update_engine { +string PayloadUrlsToString( + const decltype(InstallPlan::Payload::payload_urls)& payload_urls) { + return "(" + base::JoinString(payload_urls, ",") + ")"; +} + string InstallPayloadTypeToString(InstallPayloadType type) { switch (type) { case InstallPayloadType::kUnknown: @@ -65,8 +71,9 @@ void InstallPlan::Dump() const { string payloads_str; for (const auto& payload : payloads) { payloads_str += base::StringPrintf( - ", payload: (size: %" PRIu64 ", metadata_size: %" PRIu64 + ", payload: (urls: %s, size: %" PRIu64 ", metadata_size: %" PRIu64 ", metadata signature: %s, hash: %s, payload type: %s)", + PayloadUrlsToString(payload.payload_urls).c_str(), payload.size, payload.metadata_size, payload.metadata_signature.c_str(), @@ -84,8 +91,8 @@ void InstallPlan::Dump() const { << version_str << ", source_slot: " << BootControlInterface::SlotName(source_slot) << ", target_slot: " << BootControlInterface::SlotName(target_slot) - << ", url: " << download_url << payloads_str << partitions_str - << ", hash_checks_mandatory: " + << ", initial url: " << download_url << payloads_str + << partitions_str << ", hash_checks_mandatory: " << utils::ToString(hash_checks_mandatory) << ", powerwash_required: " << utils::ToString(powerwash_required) << ", switch_slot_on_reboot: " diff --git a/payload_consumer/install_plan.h b/payload_consumer/install_plan.h index 17cefd84..7a95ab43 100644 --- a/payload_consumer/install_plan.h +++ b/payload_consumer/install_plan.h @@ -58,6 +58,7 @@ struct InstallPlan { std::string system_version; struct Payload { + std::vector payload_urls; // URLs to download the payload uint64_t size = 0; // size of the payload uint64_t metadata_size = 0; // size of the metadata std::string metadata_signature; // signature of the metadata in base64 @@ -69,7 +70,8 @@ struct InstallPlan { bool already_applied = false; bool operator==(const Payload& that) const { - return size == that.size && metadata_size == that.metadata_size && + return payload_urls == that.payload_urls && size == that.size && + metadata_size == that.metadata_size && metadata_signature == that.metadata_signature && hash == that.hash && type == that.type && already_applied == that.already_applied; From 3bd9b6fc159042d782c657d422b73636ed9b2fba Mon Sep 17 00:00:00 2001 From: Qijiang Fan Date: Thu, 26 Dec 2019 22:02:04 +0900 Subject: [PATCH 181/624] update_engine: dbus mock update for libchrome uprev - OnConnectedCallback will be a OnceCallback, and no longer mockable. * chromeos-dbus-bindings will generate mocks like DoRegisterXXXSignalHandler with OnConnectedCallback* instead of OnceCallback, with a wrapper function RegisterXXXSignalhandler to take move-only OnConnectedCallback and pass the pointer to the mock. BUG=chromium:909719 TEST=unittest of update_engine Cq-Depend: chromium:1980448 Change-Id: I9678c6eadb26e5bacfbca2d70281deee71cfb346 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/1980379 Reviewed-by: Jae Hoon Kim Reviewed-by: Amin Hassani Reviewed-by: Hidehiko Abe Tested-by: Qijiang Fan Commit-Queue: Qijiang Fan --- dbus_test_utils.h | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/dbus_test_utils.h b/dbus_test_utils.h index b3748cea..72fd4e01 100644 --- a/dbus_test_utils.h +++ b/dbus_test_utils.h @@ -17,8 +17,10 @@ #ifndef UPDATE_ENGINE_DBUS_TEST_UTILS_H_ #define UPDATE_ENGINE_DBUS_TEST_UTILS_H_ +#include #include #include +#include #include #include @@ -27,13 +29,13 @@ namespace chromeos_update_engine { namespace dbus_test_utils { -#define MOCK_SIGNAL_HANDLER_EXPECT_SIGNAL_HANDLER( \ - mock_signal_handler, mock_proxy, signal) \ - do { \ - EXPECT_CALL((mock_proxy), \ - Register##signal##SignalHandler(::testing::_, ::testing::_)) \ - .WillOnce(::chromeos_update_engine::dbus_test_utils::GrabCallbacks( \ - &(mock_signal_handler))); \ +#define MOCK_SIGNAL_HANDLER_EXPECT_SIGNAL_HANDLER( \ + mock_signal_handler, mock_proxy, signal) \ + do { \ + EXPECT_CALL((mock_proxy), \ + DoRegister##signal##SignalHandler(::testing::_, ::testing::_)) \ + .WillOnce(::chromeos_update_engine::dbus_test_utils::GrabCallbacks( \ + &(mock_signal_handler))); \ } while (false) template @@ -52,10 +54,10 @@ class MockSignalHandler { void GrabCallbacks( const base::Callback& signal_callback, - dbus::ObjectProxy::OnConnectedCallback on_connected_callback) { + dbus::ObjectProxy::OnConnectedCallback* on_connected_callback) { signal_callback_.reset(new base::Callback(signal_callback)); - on_connected_callback_.reset( - new dbus::ObjectProxy::OnConnectedCallback(on_connected_callback)); + on_connected_callback_.reset(new dbus::ObjectProxy::OnConnectedCallback( + std::move(*on_connected_callback))); // Notify from the main loop that the callback was connected. callback_connected_task_ = brillo::MessageLoop::current()->PostTask( FROM_HERE, @@ -66,7 +68,7 @@ class MockSignalHandler { private: void OnCallbackConnected() { callback_connected_task_ = brillo::MessageLoop::kTaskIdNull; - on_connected_callback_->Run("", "", true); + std::move(*on_connected_callback_).Run("", "", true); } brillo::MessageLoop::TaskId callback_connected_task_{ From 37d15371cfc48ec573137907066f9cb9e7951bf9 Mon Sep 17 00:00:00 2001 From: Jae Hoon Kim Date: Wed, 8 Jan 2020 18:11:26 -0800 Subject: [PATCH 182/624] update_engine: Barebone Omaha Request for DLC(s) No need to maintain certain attributes for DLC update/install request to Omaha. The list of uneccessary attributes for DLC AppIDs: - lang - fw_version - ec_version - requisition BUG=chromium:1039898 TEST=FEATURES=test emerge-$B update_engine Change-Id: I40efc3435d4c359470464f2dc6e32470cc629938 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/1992192 Tested-by: Jae Hoon Kim Reviewed-by: Amin Hassani Commit-Queue: Jae Hoon Kim --- omaha_request_builder_xml.cc | 21 +++++++---- omaha_request_builder_xml.h | 4 +++ omaha_request_builder_xml_unittest.cc | 50 +++++++++++++++++++++++++++ 3 files changed, 68 insertions(+), 7 deletions(-) diff --git a/omaha_request_builder_xml.cc b/omaha_request_builder_xml.cc index 95fb1837..823894e5 100644 --- a/omaha_request_builder_xml.cc +++ b/omaha_request_builder_xml.cc @@ -309,14 +309,18 @@ string OmahaRequestBuilderXml::GetApp(const OmahaAppData& app_data) const { product_components_args + fingerprint_arg + buildtype_arg + - "lang=\"" + XmlEncodeWithDefault(params_->app_lang(), "en-US") + "\" " + "board=\"" + XmlEncodeWithDefault(params_->os_board()) + "\" " + "hardware_class=\"" + XmlEncodeWithDefault(params_->hwid()) + "\" " + - "delta_okay=\"" + delta_okay_str + "\" " + "delta_okay=\"" + delta_okay_str + "\" " + + install_date_in_days_str + + + // DLC excluded for installs and updates. + (app_data.is_dlc ? "" : + "lang=\"" + XmlEncodeWithDefault(params_->app_lang(), "en-US") + "\" " + "fw_version=\"" + XmlEncodeWithDefault(params_->fw_version()) + "\" " + "ec_version=\"" + XmlEncodeWithDefault(params_->ec_version()) + "\" " + - install_date_in_days_str + - requisition_arg + + requisition_arg) + + ">\n" + app_body + " \n"; @@ -363,12 +367,14 @@ string OmahaRequestBuilderXml::GetApps() const { .version = params_->app_version(), .product_components = params_->product_components(), // Skips updatecheck for platform app in case of an install operation. - .skip_update = params_->is_install()}; + .skip_update = params_->is_install(), + .is_dlc = false}; app_xml += GetApp(product_app); if (!params_->system_app_id().empty()) { OmahaAppData system_app = {.id = params_->system_app_id(), .version = params_->system_version(), - .skip_update = false}; + .skip_update = false, + .is_dlc = false}; app_xml += GetApp(system_app); } // Create APP ID according to |dlc_module_id| (sticking the current AppID to @@ -377,7 +383,8 @@ string OmahaRequestBuilderXml::GetApps() const { OmahaAppData dlc_module_app = { .id = params_->GetAppId() + "_" + dlc_module_id, .version = params_->app_version(), - .skip_update = false}; + .skip_update = false, + .is_dlc = true}; app_xml += GetApp(dlc_module_app); } return app_xml; diff --git a/omaha_request_builder_xml.h b/omaha_request_builder_xml.h index 495ddd7c..488be8ac 100644 --- a/omaha_request_builder_xml.h +++ b/omaha_request_builder_xml.h @@ -85,6 +85,7 @@ struct OmahaAppData { std::string version; std::string product_components; bool skip_update; + bool is_dlc; }; // Encodes XML entities in a given string. Input must be ASCII-7 valid. If @@ -139,6 +140,9 @@ class OmahaRequestBuilderXml : OmahaRequestBuilder { std::string GetRequest() const override; private: + FRIEND_TEST(OmahaRequestBuilderXmlTest, PlatformGetAppTest); + FRIEND_TEST(OmahaRequestBuilderXmlTest, DlcGetAppTest); + // Returns an XML that corresponds to the entire node of the Omaha // request based on the member variables. std::string GetOs() const; diff --git a/omaha_request_builder_xml_unittest.cc b/omaha_request_builder_xml_unittest.cc index 4375bed3..ecab0e03 100644 --- a/omaha_request_builder_xml_unittest.cc +++ b/omaha_request_builder_xml_unittest.cc @@ -80,6 +80,56 @@ TEST_F(OmahaRequestBuilderXmlTest, XmlEncodeWithDefaultTest) { EXPECT_EQ("", XmlEncodeWithDefault("\xc2", "")); } +TEST_F(OmahaRequestBuilderXmlTest, PlatformGetAppTest) { + OmahaRequestParams omaha_request_params{&fake_system_state_}; + omaha_request_params.set_device_requisition("device requisition"); + OmahaRequestBuilderXml omaha_request{nullptr, + &omaha_request_params, + false, + false, + 0, + 0, + 0, + fake_system_state_.prefs(), + ""}; + OmahaAppData dlc_module_app = {.id = "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX", + .version = "", + .skip_update = false, + .is_dlc = false}; + + // Verify that the attributes that shouldn't be missing for Platform AppID are + // in fact present in the . + const string app = omaha_request.GetApp(dlc_module_app); + EXPECT_NE(string::npos, app.find("lang=")); + EXPECT_NE(string::npos, app.find("fw_version=")); + EXPECT_NE(string::npos, app.find("ec_version=")); + EXPECT_NE(string::npos, app.find("requisition=")); +} + +TEST_F(OmahaRequestBuilderXmlTest, DlcGetAppTest) { + OmahaRequestParams omaha_request_params{&fake_system_state_}; + omaha_request_params.set_device_requisition("device requisition"); + OmahaRequestBuilderXml omaha_request{nullptr, + &omaha_request_params, + false, + false, + 0, + 0, + 0, + fake_system_state_.prefs(), + ""}; + OmahaAppData dlc_module_app = { + .id = "_dlc_id", .version = "", .skip_update = false, .is_dlc = true}; + + // Verify that the attributes that should be missing for DLC AppIDs are in + // fact not present in the . + const string app = omaha_request.GetApp(dlc_module_app); + EXPECT_EQ(string::npos, app.find("lang=")); + EXPECT_EQ(string::npos, app.find("fw_version=")); + EXPECT_EQ(string::npos, app.find("ec_version=")); + EXPECT_EQ(string::npos, app.find("requisition=")); +} + TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlRequestIdTest) { OmahaEvent omaha_event; OmahaRequestParams omaha_request_params{&fake_system_state_}; From 5fc00a2d517f7920ec7af9fb0da0bce329d9c9eb Mon Sep 17 00:00:00 2001 From: Jae Hoon Kim Date: Wed, 8 Jan 2020 20:15:42 -0800 Subject: [PATCH 183/624] update_engine: Query Omaha with correct version/delta_okay during DLC Installations When DLC(s) are being installed, the Omaha request to check should pass a version of "0.0.0.0" instead of the same version as the platform and the delta_okay should always be false. During a DLC installation, the "" is also skipped for the platform and tests are included for those checks. This means that dlcservice should be extra cautious in not passing in DLC(s) that are already installed as update_engine can potentially overwrite the file that's already in use/installed+mounted. Example Omaha request for DLC installations: BUG=chromium:1039898 TEST=FEATURES=test emerge-$B update_engine Change-Id: Ibc1a29449e9244f38deb661d400d3fc569e7478f Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/1992194 Commit-Queue: Jae Hoon Kim Tested-by: Jae Hoon Kim Reviewed-by: Amin Hassani --- omaha_request_builder_xml.cc | 16 +++--- omaha_request_builder_xml.h | 1 + omaha_request_builder_xml_unittest.cc | 83 +++++++++++++++++++++++++-- 3 files changed, 89 insertions(+), 11 deletions(-) diff --git a/omaha_request_builder_xml.cc b/omaha_request_builder_xml.cc index 823894e5..8439b422 100644 --- a/omaha_request_builder_xml.cc +++ b/omaha_request_builder_xml.cc @@ -37,6 +37,7 @@ using std::string; namespace chromeos_update_engine { const int kNeverPinged = -1; +const char kNoVersion[] = "0.0.0.0"; bool XmlEncode(const string& input, string* output) { if (std::find_if(input.begin(), input.end(), [](const char c) { @@ -131,7 +132,7 @@ string OmahaRequestBuilderXml::GetAppBody(bool skip_updatecheck) const { // sent for this new version with a previous updatecheck. string prev_version; if (!prefs_->GetString(kPrefsPreviousVersion, &prev_version)) { - prev_version = "0.0.0.0"; + prev_version = kNoVersion; } // We only store a non-empty previous version value after a successful // update in the previous boot. After reporting it back to the server, @@ -142,7 +143,7 @@ string OmahaRequestBuilderXml::GetAppBody(bool skip_updatecheck) const { "previousversion=\"%s\">\n", OmahaEvent::kTypeRebootedAfterUpdate, OmahaEvent::kResultSuccess, - XmlEncodeWithDefault(prev_version, "0.0.0.0").c_str()); + XmlEncodeWithDefault(prev_version, kNoVersion).c_str()); LOG_IF(WARNING, !prefs_->SetString(kPrefsPreviousVersion, "")) << "Unable to reset the previous version."; } @@ -219,11 +220,11 @@ string OmahaRequestBuilderXml::GetApp(const OmahaAppData& app_data) const { if (params_->ShouldPowerwash()) { LOG(INFO) << "Passing OS version as 0.0.0.0 as we are set to powerwash " << "on downgrading to the version in the more stable channel"; - app_versions = "version=\"0.0.0.0\" from_version=\"" + - XmlEncodeWithDefault(app_data.version, "0.0.0.0") + "\" "; + app_versions = "version=\"" + string(kNoVersion) + "\" from_version=\"" + + XmlEncodeWithDefault(app_data.version, kNoVersion) + "\" "; } else { app_versions = "version=\"" + - XmlEncodeWithDefault(app_data.version, "0.0.0.0") + "\" "; + XmlEncodeWithDefault(app_data.version, kNoVersion) + "\" "; } string download_channel = params_->download_channel(); @@ -234,7 +235,8 @@ string OmahaRequestBuilderXml::GetApp(const OmahaAppData& app_data) const { XmlEncodeWithDefault(params_->current_channel()) + "\" "; } - string delta_okay_str = params_->delta_okay() ? "true" : "false"; + string delta_okay_str = + params_->delta_okay() && !params_->is_install() ? "true" : "false"; // If install_date_days is not set (e.g. its value is -1 ), don't // include the attribute. @@ -382,7 +384,7 @@ string OmahaRequestBuilderXml::GetApps() const { for (const auto& dlc_module_id : params_->dlc_module_ids()) { OmahaAppData dlc_module_app = { .id = params_->GetAppId() + "_" + dlc_module_id, - .version = params_->app_version(), + .version = params_->is_install() ? kNoVersion : params_->app_version(), .skip_update = false, .is_dlc = true}; app_xml += GetApp(dlc_module_app); diff --git a/omaha_request_builder_xml.h b/omaha_request_builder_xml.h index 488be8ac..d7a81d34 100644 --- a/omaha_request_builder_xml.h +++ b/omaha_request_builder_xml.h @@ -39,6 +39,7 @@ namespace chromeos_update_engine { extern const int kNeverPinged; +extern const char kNoVersion[]; // This struct encapsulates the Omaha event information. For a // complete list of defined event types and results, see diff --git a/omaha_request_builder_xml_unittest.cc b/omaha_request_builder_xml_unittest.cc index ecab0e03..8cf74733 100644 --- a/omaha_request_builder_xml_unittest.cc +++ b/omaha_request_builder_xml_unittest.cc @@ -44,6 +44,13 @@ static string FindAttributeKeyValueInXml(const string& xml, return ""; return xml.substr(val_start_pos + key_with_quotes.size(), val_size); } +// Helper to find the count of substring in a string. +static size_t CountSubstringInString(const string& str, const string& substr) { + size_t count = 0, pos = 0; + while ((pos = str.find(substr, pos ? pos + 1 : 0)) != string::npos) + ++count; + return count; +} } // namespace class OmahaRequestBuilderXmlTest : public ::testing::Test { @@ -131,9 +138,8 @@ TEST_F(OmahaRequestBuilderXmlTest, DlcGetAppTest) { } TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlRequestIdTest) { - OmahaEvent omaha_event; OmahaRequestParams omaha_request_params{&fake_system_state_}; - OmahaRequestBuilderXml omaha_request{&omaha_event, + OmahaRequestBuilderXml omaha_request{nullptr, &omaha_request_params, false, false, @@ -153,9 +159,8 @@ TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlRequestIdTest) { TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlSessionIdTest) { const string gen_session_id = base::GenerateGUID(); - OmahaEvent omaha_event; OmahaRequestParams omaha_request_params{&fake_system_state_}; - OmahaRequestBuilderXml omaha_request{&omaha_event, + OmahaRequestBuilderXml omaha_request{nullptr, &omaha_request_params, false, false, @@ -175,4 +180,74 @@ TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlSessionIdTest) { EXPECT_EQ(gen_session_id, session_id); } +TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlPlatformUpdateTest) { + OmahaRequestParams omaha_request_params{&fake_system_state_}; + OmahaRequestBuilderXml omaha_request{nullptr, + &omaha_request_params, + false, + false, + 0, + 0, + 0, + fake_system_state_.prefs(), + ""}; + const string request_xml = omaha_request.GetRequest(); + EXPECT_EQ(1, CountSubstringInString(request_xml, " dlcs = {"dlc_1", "dlc_2"}; + omaha_request_params.set_dlc_module_ids(dlcs); + omaha_request_params.set_is_install(true); + OmahaRequestBuilderXml omaha_request{nullptr, + &omaha_request_params, + false, + false, + 0, + 0, + 0, + fake_system_state_.prefs(), + ""}; + const string request_xml = omaha_request.GetRequest(); + EXPECT_EQ(2, CountSubstringInString(request_xml, " size_t { + return request_xml.find(" Date: Fri, 1 Nov 2019 17:11:22 -0700 Subject: [PATCH 184/624] Allow update_engine to skip post-install The postinstall steps take long time to finish, even though most of them are optional. Therefore, reuse the flag run_post_install in InstallPlan to allow skipping optional postinstalls. Bug: 136185424 Test: cancel the update during post-install, apply again with the header "RUN_POST_INSTALL=0", check the optional post-installs are skipped Change-Id: Ic5ab89b079dfd547714fd3d1664e044900f9eebe --- common/constants.cc | 3 +- payload_consumer/postinstall_runner_action.cc | 14 ++++---- .../postinstall_runner_action_unittest.cc | 34 +++++++++++++++++-- update_attempter_android.cc | 16 ++------- 4 files changed, 42 insertions(+), 25 deletions(-) diff --git a/common/constants.cc b/common/constants.cc index 310f1b2b..5bfb2b6f 100644 --- a/common/constants.cc +++ b/common/constants.cc @@ -119,8 +119,7 @@ const char kPayloadPropertyNetworkId[] = "NETWORK_ID"; // Set "SWITCH_SLOT_ON_REBOOT=0" to skip marking the updated partitions active. // The default is 1 (always switch slot if update succeeded). const char kPayloadPropertySwitchSlotOnReboot[] = "SWITCH_SLOT_ON_REBOOT"; -// Set "RUN_POST_INSTALL=0" to skip running post install, this will only be -// honored if we're resuming an update and post install has already succeeded. +// Set "RUN_POST_INSTALL=0" to skip running optional post install. // The default is 1 (always run post install). const char kPayloadPropertyRunPostInstall[] = "RUN_POST_INSTALL"; diff --git a/payload_consumer/postinstall_runner_action.cc b/payload_consumer/postinstall_runner_action.cc index c0c3956a..0f484936 100644 --- a/payload_consumer/postinstall_runner_action.cc +++ b/payload_consumer/postinstall_runner_action.cc @@ -70,10 +70,17 @@ void PostinstallRunnerAction::PerformAction() { partition_weight_.resize(install_plan_.partitions.size()); total_weight_ = 0; for (size_t i = 0; i < install_plan_.partitions.size(); ++i) { + auto& partition = install_plan_.partitions[i]; + if (!install_plan_.run_post_install && partition.postinstall_optional) { + partition.run_postinstall = false; + LOG(INFO) << "Skipping optional post-install for partition " + << partition.name << " according to install plan."; + } + // TODO(deymo): This code sets the weight to all the postinstall commands, // but we could remember how long they took in the past and use those // values. - partition_weight_[i] = install_plan_.partitions[i].run_postinstall; + partition_weight_[i] = partition.run_postinstall; total_weight_ += partition_weight_[i]; } accumulated_weight_ = 0; @@ -83,11 +90,6 @@ void PostinstallRunnerAction::PerformAction() { } void PostinstallRunnerAction::PerformPartitionPostinstall() { - if (!install_plan_.run_post_install) { - LOG(INFO) << "Skipping post-install according to install plan."; - return CompletePostinstall(ErrorCode::kSuccess); - } - if (install_plan_.download_url.empty()) { LOG(INFO) << "Skipping post-install during rollback"; return CompletePostinstall(ErrorCode::kSuccess); diff --git a/payload_consumer/postinstall_runner_action_unittest.cc b/payload_consumer/postinstall_runner_action_unittest.cc index caee5e27..e9313f1a 100644 --- a/payload_consumer/postinstall_runner_action_unittest.cc +++ b/payload_consumer/postinstall_runner_action_unittest.cc @@ -102,6 +102,8 @@ class PostinstallRunnerActionTest : public ::testing::Test { bool powerwash_required, bool is_rollback); + void RunPostinstallActionWithInstallPlan(const InstallPlan& install_plan); + public: void ResumeRunningAction() { ASSERT_NE(nullptr, postinstall_action_); @@ -171,9 +173,6 @@ void PostinstallRunnerActionTest::RunPostinstallAction( const string& postinstall_program, bool powerwash_required, bool is_rollback) { - ActionProcessor processor; - processor_ = &processor; - auto feeder_action = std::make_unique>(); InstallPlan::Partition part; part.name = "part"; part.target_path = device_path; @@ -184,6 +183,14 @@ void PostinstallRunnerActionTest::RunPostinstallAction( install_plan.download_url = "http://127.0.0.1:8080/update"; install_plan.powerwash_required = powerwash_required; install_plan.is_rollback = is_rollback; + RunPostinstallActionWithInstallPlan(install_plan); +} + +void PostinstallRunnerActionTest::RunPostinstallActionWithInstallPlan( + const chromeos_update_engine::InstallPlan& install_plan) { + ActionProcessor processor; + processor_ = &processor; + auto feeder_action = std::make_unique>(); feeder_action->set_obj(install_plan); auto runner_action = std::make_unique( &fake_boot_control_, &fake_hardware_); @@ -305,6 +312,27 @@ TEST_F(PostinstallRunnerActionTest, RunAsRootCantMountTest) { EXPECT_FALSE(fake_hardware_.GetIsRollbackPowerwashScheduled()); } +TEST_F(PostinstallRunnerActionTest, RunAsRootSkipOptionalPostinstallTest) { + InstallPlan::Partition part; + part.name = "part"; + part.target_path = "/dev/null"; + part.run_postinstall = true; + part.postinstall_path = kPostinstallDefaultScript; + part.postinstall_optional = true; + InstallPlan install_plan; + install_plan.partitions = {part}; + install_plan.download_url = "http://127.0.0.1:8080/update"; + + // Optional postinstalls will be skipped, and the postinstall action succeeds. + RunPostinstallActionWithInstallPlan(install_plan); + EXPECT_EQ(ErrorCode::kSuccess, processor_delegate_.code_); + + part.postinstall_optional = false; + install_plan.partitions = {part}; + RunPostinstallActionWithInstallPlan(install_plan); + EXPECT_EQ(ErrorCode::kPostinstallRunnerError, processor_delegate_.code_); +} + // Check that the failures from the postinstall script cause the action to // fail. TEST_F(PostinstallRunnerActionTest, RunAsRootErrScriptTest) { diff --git a/update_attempter_android.cc b/update_attempter_android.cc index 263498b5..cc891e7c 100644 --- a/update_attempter_android.cc +++ b/update_attempter_android.cc @@ -233,20 +233,8 @@ bool UpdateAttempterAndroid::ApplyPayload( install_plan_.switch_slot_on_reboot = GetHeaderAsBool(headers[kPayloadPropertySwitchSlotOnReboot], true); - install_plan_.run_post_install = true; - // Optionally skip post install if and only if: - // a) we're resuming - // b) post install has already succeeded before - // c) RUN_POST_INSTALL is set to 0. - if (install_plan_.is_resume && prefs_->Exists(kPrefsPostInstallSucceeded)) { - bool post_install_succeeded = false; - if (prefs_->GetBoolean(kPrefsPostInstallSucceeded, - &post_install_succeeded) && - post_install_succeeded) { - install_plan_.run_post_install = - GetHeaderAsBool(headers[kPayloadPropertyRunPostInstall], true); - } - } + install_plan_.run_post_install = + GetHeaderAsBool(headers[kPayloadPropertyRunPostInstall], true); // Skip writing verity if we're resuming and verity has already been written. install_plan_.write_verity = true; From 0b664d1964f88d865643b7e157842bd9ee4eed0e Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Mon, 13 Jan 2020 18:06:54 -0800 Subject: [PATCH 185/624] DynamicPartitionControl: document FinishUpdate Test: none Change-Id: I05d38e913671b120bc804a867cc82a442e8dd379 --- common/dynamic_partition_control_interface.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/common/dynamic_partition_control_interface.h b/common/dynamic_partition_control_interface.h index c17bafbb..c171fb5d 100644 --- a/common/dynamic_partition_control_interface.h +++ b/common/dynamic_partition_control_interface.h @@ -69,6 +69,8 @@ class DynamicPartitionControlInterface { const DeltaArchiveManifest& manifest, bool update) = 0; + // After writing to new partitions, before rebooting into the new slot, call + // this function to indicate writes to new partitions are done. virtual bool FinishUpdate() = 0; }; From f033ecb6329c9bab43c91b1a2af812d3d2dd7daa Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Tue, 7 Jan 2020 18:13:56 -0800 Subject: [PATCH 186/624] DynamicPartitionControl: Add required_size to Prepare Add out parameter required_size to PreparePartitionsForUpdate to indicate the total size required on /userdata in order to apply the update. Bug: 138808058 Test: update_engine_unittests Change-Id: I2768d13671e212fd24a1a22811b50c9738834459 --- common/dynamic_partition_control_interface.h | 7 +++++- common/dynamic_partition_control_stub.cc | 3 ++- common/dynamic_partition_control_stub.h | 3 ++- dynamic_partition_control_android.cc | 23 ++++++++++++++----- dynamic_partition_control_android.h | 6 +++-- dynamic_partition_control_android_unittest.cc | 17 ++++++++++---- mock_dynamic_partition_control.h | 5 ++-- payload_consumer/delta_performer.cc | 3 ++- 8 files changed, 49 insertions(+), 18 deletions(-) diff --git a/common/dynamic_partition_control_interface.h b/common/dynamic_partition_control_interface.h index c171fb5d..19bb5231 100644 --- a/common/dynamic_partition_control_interface.h +++ b/common/dynamic_partition_control_interface.h @@ -64,10 +64,15 @@ class DynamicPartitionControlInterface { // This is needed before calling MapPartitionOnDeviceMapper(), otherwise the // device would be mapped in an inconsistent way. // If |update| is set, create snapshots and writes super partition metadata. + // If |required_size| is not null and call fails due to insufficient space, + // |required_size| will be set to total free space required on userdata + // partition to apply the update. Otherwise (call succeeds, or fails + // due to other errors), |required_size| is set to zero. virtual bool PreparePartitionsForUpdate(uint32_t source_slot, uint32_t target_slot, const DeltaArchiveManifest& manifest, - bool update) = 0; + bool update, + uint64_t* required_size) = 0; // After writing to new partitions, before rebooting into the new slot, call // this function to indicate writes to new partitions are done. diff --git a/common/dynamic_partition_control_stub.cc b/common/dynamic_partition_control_stub.cc index bc792c8c..974cd1b4 100644 --- a/common/dynamic_partition_control_stub.cc +++ b/common/dynamic_partition_control_stub.cc @@ -43,7 +43,8 @@ bool DynamicPartitionControlStub::PreparePartitionsForUpdate( uint32_t source_slot, uint32_t target_slot, const DeltaArchiveManifest& manifest, - bool update) { + bool update, + uint64_t* required_size) { return true; } diff --git a/common/dynamic_partition_control_stub.h b/common/dynamic_partition_control_stub.h index 1704f056..09990a75 100644 --- a/common/dynamic_partition_control_stub.h +++ b/common/dynamic_partition_control_stub.h @@ -35,7 +35,8 @@ class DynamicPartitionControlStub : public DynamicPartitionControlInterface { bool PreparePartitionsForUpdate(uint32_t source_slot, uint32_t target_slot, const DeltaArchiveManifest& manifest, - bool update) override; + bool update, + uint64_t* required_size) override; bool FinishUpdate() override; }; diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc index 4ad02c74..bb854800 100644 --- a/dynamic_partition_control_android.cc +++ b/dynamic_partition_control_android.cc @@ -50,6 +50,7 @@ using android::fs_mgr::MetadataBuilder; using android::fs_mgr::Partition; using android::fs_mgr::PartitionOpener; using android::fs_mgr::SlotSuffixForSlotNumber; +using android::snapshot::SnapshotManager; using android::snapshot::SourceCopyOperationIsClone; namespace chromeos_update_engine { @@ -92,7 +93,7 @@ DynamicPartitionControlAndroid::DynamicPartitionControlAndroid() GetFeatureFlag(kUseDynamicPartitions, kRetrfoitDynamicPartitions)), virtual_ab_(GetFeatureFlag(kVirtualAbEnabled, kVirtualAbRetrofit)) { if (GetVirtualAbFeatureFlag().IsEnabled()) { - snapshot_ = android::snapshot::SnapshotManager::New(); + snapshot_ = SnapshotManager::New(); CHECK(snapshot_ != nullptr) << "Cannot initialize SnapshotManager."; } } @@ -372,9 +373,13 @@ bool DynamicPartitionControlAndroid::PreparePartitionsForUpdate( uint32_t source_slot, uint32_t target_slot, const DeltaArchiveManifest& manifest, - bool update) { + bool update, + uint64_t* required_size) { source_slot_ = source_slot; target_slot_ = target_slot; + if (required_size != nullptr) { + *required_size = 0; + } if (fs_mgr_overlayfs_is_setup()) { // Non DAP devices can use overlayfs as well. @@ -420,7 +425,7 @@ bool DynamicPartitionControlAndroid::PreparePartitionsForUpdate( // - If !target_supports_snapshot_, explicitly CancelUpdate(). if (target_supports_snapshot_) { return PrepareSnapshotPartitionsForUpdate( - source_slot, target_slot, manifest); + source_slot, target_slot, manifest, required_size); } if (!snapshot_->CancelUpdate()) { LOG(ERROR) << "Cannot cancel previous update."; @@ -473,13 +478,19 @@ bool DynamicPartitionControlAndroid::PrepareDynamicPartitionsForUpdate( bool DynamicPartitionControlAndroid::PrepareSnapshotPartitionsForUpdate( uint32_t source_slot, uint32_t target_slot, - const DeltaArchiveManifest& manifest) { + const DeltaArchiveManifest& manifest, + uint64_t* required_size) { if (!snapshot_->BeginUpdate()) { LOG(ERROR) << "Cannot begin new update."; return false; } - if (!snapshot_->CreateUpdateSnapshots(manifest)) { - LOG(ERROR) << "Cannot create update snapshots."; + auto ret = snapshot_->CreateUpdateSnapshots(manifest); + if (!ret) { + LOG(ERROR) << "Cannot create update snapshots: " << ret.string(); + if (required_size != nullptr && + ret.error_code() == SnapshotManager::Return::ErrorCode::NO_SPACE) { + *required_size = ret.required_size(); + } return false; } return true; diff --git a/dynamic_partition_control_android.h b/dynamic_partition_control_android.h index 13fbb1ae..a79f41ac 100644 --- a/dynamic_partition_control_android.h +++ b/dynamic_partition_control_android.h @@ -42,7 +42,8 @@ class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { bool PreparePartitionsForUpdate(uint32_t source_slot, uint32_t target_slot, const DeltaArchiveManifest& manifest, - bool update) override; + bool update, + uint64_t* required_size) override; bool FinishUpdate() override; // Return the device for partition |partition_name| at slot |slot|. @@ -151,7 +152,8 @@ class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { // Virtual A/B update. bool PrepareSnapshotPartitionsForUpdate(uint32_t source_slot, uint32_t target_slot, - const DeltaArchiveManifest& manifest); + const DeltaArchiveManifest& manifest, + uint64_t* required_size); enum class DynamicPartitionDeviceStatus { SUCCESS, diff --git a/dynamic_partition_control_android_unittest.cc b/dynamic_partition_control_android_unittest.cc index 207a97e0..3e8375c1 100644 --- a/dynamic_partition_control_android_unittest.cc +++ b/dynamic_partition_control_android_unittest.cc @@ -120,7 +120,11 @@ class DynamicPartitionControlAndroidTest : public ::testing::Test { } bool PreparePartitionsForUpdate(const PartitionSizes& partition_sizes) { return dynamicControl().PreparePartitionsForUpdate( - source(), target(), PartitionSizesToManifest(partition_sizes), true); + source(), + target(), + PartitionSizesToManifest(partition_sizes), + true, + nullptr); } void SetSlots(const TestParam& slots) { slots_ = slots; } @@ -317,7 +321,7 @@ TEST_P(DynamicPartitionControlAndroidTestP, // DynamicPartitionControlAndroidTest::PreparePartitionsForUpdate(), since we // don't want any default group in the PartitionMetadata. EXPECT_TRUE(dynamicControl().PreparePartitionsForUpdate( - source(), target(), {}, true)); + source(), target(), {}, true, nullptr)); // Should use dynamic source partitions. EXPECT_CALL(dynamicControl(), GetState(S("system"))) @@ -371,7 +375,8 @@ TEST_P(DynamicPartitionControlAndroidTestP, source(), target(), PartitionSizesToManifest({{"system", 2_GiB}, {"vendor", 1_GiB}}), - false)); + false, + nullptr)); // Dynamic partition "system". EXPECT_CALL(dynamicControl(), GetState(S("system"))) @@ -622,7 +627,11 @@ TEST_F(DynamicPartitionControlAndroidTest, ApplyingToCurrentSlot) { TEST_P(DynamicPartitionControlAndroidTestP, ShouldSkipOperationTest) { ASSERT_TRUE(dynamicControl().PreparePartitionsForUpdate( - source(), target(), PartitionSizesToManifest({{"foo", 4_MiB}}), false)); + source(), + target(), + PartitionSizesToManifest({{"foo", 4_MiB}}), + false, + nullptr)); dynamicControl().set_fake_mapped_devices({T("foo")}); InstallOperation iop; diff --git a/mock_dynamic_partition_control.h b/mock_dynamic_partition_control.h index 09b825d9..ffabac77 100644 --- a/mock_dynamic_partition_control.h +++ b/mock_dynamic_partition_control.h @@ -38,8 +38,9 @@ class MockDynamicPartitionControl : public DynamicPartitionControlInterface { std::string*)); MOCK_METHOD0(Cleanup, void()); MOCK_METHOD0(GetDynamicPartitionsFeatureFlag, FeatureFlag()); - MOCK_METHOD4(PreparePartitionsForUpdate, - bool(uint32_t, uint32_t, const DeltaArchiveManifest&, bool)); + MOCK_METHOD5( + PreparePartitionsForUpdate, + bool(uint32_t, uint32_t, const DeltaArchiveManifest&, bool, uint64_t*)); MOCK_METHOD0(GetVirtualAbFeatureFlag, FeatureFlag()); MOCK_METHOD0(FinishUpdate, bool()); }; diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc index c49474ce..8cec0768 100644 --- a/payload_consumer/delta_performer.cc +++ b/payload_consumer/delta_performer.cc @@ -951,7 +951,8 @@ bool DeltaPerformer::PreparePartitionsForUpdate() { boot_control_->GetCurrentSlot(), install_plan_->target_slot, manifest_, - !metadata_updated)) { + !metadata_updated, + nullptr /* required_size */)) { LOG(ERROR) << "Unable to initialize partition metadata for slot " << BootControlInterface::SlotName(install_plan_->target_slot); return false; From b9d6357b01e91033e76d44d7a4237ebc39a2f1f6 Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Thu, 9 Jan 2020 17:50:46 -0800 Subject: [PATCH 187/624] DeltaPerformer: add static PreparePartitionsForUpdate Expose a static PreparePartitionsForUpdate for implementation of AllocateSpaceForPayload. - If this function is called multiple times with the same 'update_check_response_hash', calls after the first call has no effect. - If this function is called again with a different 'update_check_response_hash', space is re-allocated. - DeltaPerformer::ResetUpdateProgress deletes the stored hash and cause the next PreparePartitionsForUpdate to always re-allocate space. - DeltaPerformer::ParseManifestPartitions now set error code to kNotEnoughSpace when appropriate. Test: apply an OTA manually Bug: 138808058 Change-Id: I6fb60016088a3133af3fc961196f63e7d079ae93 --- payload_consumer/delta_performer.cc | 68 +++++++++++++++---- payload_consumer/delta_performer.h | 21 +++++- .../delta_performer_integration_test.cc | 2 +- 3 files changed, 75 insertions(+), 16 deletions(-) diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc index 8cec0768..bb7c98c4 100644 --- a/payload_consumer/delta_performer.cc +++ b/payload_consumer/delta_performer.cc @@ -925,8 +925,13 @@ bool DeltaPerformer::ParseManifestPartitions(ErrorCode* error) { } if (install_plan_->target_slot != BootControlInterface::kInvalidSlot) { - if (!PreparePartitionsForUpdate()) { - *error = ErrorCode::kInstallDeviceOpenError; + uint64_t required_size = 0; + if (!PreparePartitionsForUpdate(&required_size)) { + if (required_size > 0) { + *error = ErrorCode::kNotEnoughSpace; + } else { + *error = ErrorCode::kInstallDeviceOpenError; + } return false; } } @@ -944,21 +949,56 @@ bool DeltaPerformer::ParseManifestPartitions(ErrorCode* error) { return true; } -bool DeltaPerformer::PreparePartitionsForUpdate() { - bool metadata_updated = false; - prefs_->GetBoolean(kPrefsDynamicPartitionMetadataUpdated, &metadata_updated); - if (!boot_control_->GetDynamicPartitionControl()->PreparePartitionsForUpdate( - boot_control_->GetCurrentSlot(), - install_plan_->target_slot, - manifest_, - !metadata_updated, - nullptr /* required_size */)) { +bool DeltaPerformer::PreparePartitionsForUpdate(uint64_t* required_size) { + // Call static PreparePartitionsForUpdate with hash from + // kPrefsUpdateCheckResponseHash to ensure hash of payload that space is + // preallocated for is the same as the hash of payload being applied. + string update_check_response_hash; + ignore_result(prefs_->GetString(kPrefsUpdateCheckResponseHash, + &update_check_response_hash)); + return PreparePartitionsForUpdate(prefs_, + boot_control_, + install_plan_->target_slot, + manifest_, + update_check_response_hash, + required_size); +} + +bool DeltaPerformer::PreparePartitionsForUpdate( + PrefsInterface* prefs, + BootControlInterface* boot_control, + BootControlInterface::Slot target_slot, + const DeltaArchiveManifest& manifest, + const std::string& update_check_response_hash, + uint64_t* required_size) { + string last_hash; + ignore_result( + prefs->GetString(kPrefsDynamicPartitionMetadataUpdated, &last_hash)); + + bool is_resume = !update_check_response_hash.empty() && + last_hash == update_check_response_hash; + + if (is_resume) { + LOG(INFO) << "Using previously prepared partitions for update. hash = " + << last_hash; + } else { + LOG(INFO) << "Preparing partitions for new update. last hash = " + << last_hash << ", new hash = " << update_check_response_hash; + } + + if (!boot_control->GetDynamicPartitionControl()->PreparePartitionsForUpdate( + boot_control->GetCurrentSlot(), + target_slot, + manifest, + !is_resume /* should update */, + required_size)) { LOG(ERROR) << "Unable to initialize partition metadata for slot " - << BootControlInterface::SlotName(install_plan_->target_slot); + << BootControlInterface::SlotName(target_slot); return false; } - TEST_AND_RETURN_FALSE( - prefs_->SetBoolean(kPrefsDynamicPartitionMetadataUpdated, true)); + + TEST_AND_RETURN_FALSE(prefs->SetString(kPrefsDynamicPartitionMetadataUpdated, + update_check_response_hash)); LOG(INFO) << "PreparePartitionsForUpdate done."; return true; diff --git a/payload_consumer/delta_performer.h b/payload_consumer/delta_performer.h index 4c64dfa9..6dbd3b84 100644 --- a/payload_consumer/delta_performer.h +++ b/payload_consumer/delta_performer.h @@ -179,6 +179,24 @@ class DeltaPerformer : public FileWriter { const FileDescriptorPtr source_fd, ErrorCode* error); + // Initialize partitions and allocate required space for an update with the + // given |manifest|. |update_check_response_hash| is used to check if the + // previous call to this function corresponds to the same payload. + // - Same payload: not make any persistent modifications (not write to disk) + // - Different payload: make persistent modifications (write to disk) + // In both cases, in-memory flags are updated. This function must be called + // on the payload at least once (to update in-memory flags) before writing + // (applying) the payload. + // If error due to insufficient space, |required_size| is set to the required + // size on the device to apply the payload. + static bool PreparePartitionsForUpdate( + PrefsInterface* prefs, + BootControlInterface* boot_control, + BootControlInterface::Slot target_slot, + const DeltaArchiveManifest& manifest, + const std::string& update_check_response_hash, + uint64_t* required_size); + private: friend class DeltaPerformerTest; friend class DeltaPerformerIntegrationTest; @@ -289,7 +307,8 @@ class DeltaPerformer : public FileWriter { // After install_plan_ is filled with partition names and sizes, initialize // metadata of partitions and map necessary devices before opening devices. - bool PreparePartitionsForUpdate(); + // Also see comment for the static PreparePartitionsForUpdate(). + bool PreparePartitionsForUpdate(uint64_t* required_size); // Update Engine preference store. PrefsInterface* prefs_; diff --git a/payload_consumer/delta_performer_integration_test.cc b/payload_consumer/delta_performer_integration_test.cc index a2ad77b9..4797137b 100644 --- a/payload_consumer/delta_performer_integration_test.cc +++ b/payload_consumer/delta_performer_integration_test.cc @@ -728,7 +728,7 @@ static void ApplyDeltaFile(bool full_kernel, .WillRepeatedly(Return(true)); EXPECT_CALL(prefs, SetString(kPrefsUpdateStateSignedSHA256Context, _)) .WillRepeatedly(Return(true)); - EXPECT_CALL(prefs, SetBoolean(kPrefsDynamicPartitionMetadataUpdated, _)) + EXPECT_CALL(prefs, SetString(kPrefsDynamicPartitionMetadataUpdated, _)) .WillRepeatedly(Return(true)); if (op_hash_test == kValidOperationData && signature_test != kSignatureNone) { EXPECT_CALL(prefs, SetString(kPrefsUpdateStateSignatureBlob, _)) From 6561303cc3020e1049e1fd67f999d19097e72ede Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Thu, 9 Jan 2020 17:52:13 -0800 Subject: [PATCH 188/624] Implement UpdateAttempterAndroid::AllocateSpaceForPayload - If insufficient space, error is not set, and required size is returned. - If successful, error is not set, and 0 is returned. - If other failures, error is set, and 0 is returned. Test: apply OTA Bug: 138808058 Change-Id: If0a8834afb841ce714379556836fe99c5e3da8ac --- update_attempter_android.cc | 30 ++++++++++++++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) diff --git a/update_attempter_android.cc b/update_attempter_android.cc index cc891e7c..3292dd55 100644 --- a/update_attempter_android.cc +++ b/update_attempter_android.cc @@ -893,8 +893,34 @@ uint64_t UpdateAttempterAndroid::AllocateSpaceForPayload( const std::string& metadata_filename, const vector& key_value_pair_headers, brillo::ErrorPtr* error) { - // TODO(elsk): implement b/138808058 - LogAndSetError(error, FROM_HERE, "Not implemented."); + DeltaArchiveManifest manifest; + if (!VerifyPayloadParseManifest(metadata_filename, &manifest, error)) { + return 0; + } + std::map headers; + if (!ParseKeyValuePairHeaders(key_value_pair_headers, &headers, error)) { + return 0; + } + + string payload_id = GetPayloadId(headers); + uint64_t required_size = 0; + if (!DeltaPerformer::PreparePartitionsForUpdate(prefs_, + boot_control_, + GetTargetSlot(), + manifest, + payload_id, + &required_size)) { + if (required_size == 0) { + LogAndSetError(error, FROM_HERE, "Failed to allocate space for payload."); + return 0; + } else { + LOG(ERROR) << "Insufficient space for payload: " << required_size + << " bytes"; + return required_size; + } + } + + LOG(INFO) << "Successfully allocated space for payload."; return 0; } From 55c2bfe0f7ed671b8e1b59b38e1e485a1e937886 Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Mon, 13 Jan 2020 17:01:19 -0800 Subject: [PATCH 189/624] DeltaPerformer: Don't destroy previously allocated space After allocateSpaceForPayload() is called, applyPayload() with the same hash should not destroy the allocated files then re-allocate. Fix it so that DeltaPerformer::ResetUpdateProgress skip resetting the hash in this case. Bug: 138808058 Test: update_engine_client --allocate, then --apply Change-Id: Ibc2a7449a6953a707d1c4f23ee11d572f498457c --- payload_consumer/delta_performer.cc | 11 +++++++++-- payload_consumer/delta_performer.h | 11 ++++++++--- update_attempter_android.cc | 9 ++++++++- 3 files changed, 25 insertions(+), 6 deletions(-) diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc index bb7c98c4..d8e58b4b 100644 --- a/payload_consumer/delta_performer.cc +++ b/payload_consumer/delta_performer.cc @@ -1959,7 +1959,10 @@ bool DeltaPerformer::CanResumeUpdate(PrefsInterface* prefs, return true; } -bool DeltaPerformer::ResetUpdateProgress(PrefsInterface* prefs, bool quick) { +bool DeltaPerformer::ResetUpdateProgress( + PrefsInterface* prefs, + bool quick, + bool skip_dynamic_partititon_metadata_updated) { TEST_AND_RETURN_FALSE(prefs->SetInt64(kPrefsUpdateStateNextOperation, kUpdateStateOperationInvalid)); if (!quick) { @@ -1973,7 +1976,11 @@ bool DeltaPerformer::ResetUpdateProgress(PrefsInterface* prefs, bool quick) { prefs->SetInt64(kPrefsResumedUpdateFailures, 0); prefs->Delete(kPrefsPostInstallSucceeded); prefs->Delete(kPrefsVerityWritten); - prefs->Delete(kPrefsDynamicPartitionMetadataUpdated); + + if (!skip_dynamic_partititon_metadata_updated) { + LOG(INFO) << "Resetting recorded hash for prepared partitions."; + prefs->Delete(kPrefsDynamicPartitionMetadataUpdated); + } } return true; } diff --git a/payload_consumer/delta_performer.h b/payload_consumer/delta_performer.h index 6dbd3b84..01fcc5c9 100644 --- a/payload_consumer/delta_performer.h +++ b/payload_consumer/delta_performer.h @@ -143,9 +143,14 @@ class DeltaPerformer : public FileWriter { // Resets the persistent update progress state to indicate that an update // can't be resumed. Performs a quick update-in-progress reset if |quick| is - // true, otherwise resets all progress-related update state. Returns true on - // success, false otherwise. - static bool ResetUpdateProgress(PrefsInterface* prefs, bool quick); + // true, otherwise resets all progress-related update state. + // If |skip_dynamic_partititon_metadata_updated| is true, do not reset + // dynamic-partition-metadata-updated. + // Returns true on success, false otherwise. + static bool ResetUpdateProgress( + PrefsInterface* prefs, + bool quick, + bool skip_dynamic_partititon_metadata_updated = false); // Attempts to parse the update metadata starting from the beginning of // |payload|. On success, returns kMetadataParseSuccess. Returns diff --git a/update_attempter_android.cc b/update_attempter_android.cc index 3292dd55..034b4ea2 100644 --- a/update_attempter_android.cc +++ b/update_attempter_android.cc @@ -217,7 +217,14 @@ bool UpdateAttempterAndroid::ApplyPayload( install_plan_.is_resume = !payload_id.empty() && DeltaPerformer::CanResumeUpdate(prefs_, payload_id); if (!install_plan_.is_resume) { - if (!DeltaPerformer::ResetUpdateProgress(prefs_, false)) { + // No need to reset dynamic_partititon_metadata_updated. If previous calls + // to AllocateSpaceForPayload uses the same payload_id, reuse preallocated + // space. Otherwise, DeltaPerformer re-allocates space when the payload is + // applied. + if (!DeltaPerformer::ResetUpdateProgress( + prefs_, + false /* quick */, + true /* skip_dynamic_partititon_metadata_updated */)) { LOG(WARNING) << "Unable to reset the update progress."; } if (!prefs_->SetString(kPrefsUpdateCheckResponseHash, payload_id)) { From 82cd9d3aad1ea0fac770c20a1479c73496df4cab Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Fri, 10 Jan 2020 14:40:25 -0800 Subject: [PATCH 190/624] update_engine_client_android: Add --allocate option Test: update_engine_client --allocate --header=... --metadata=... Test: run it twice with the same hash in header; the second time does nothing. Test: run it twice with different hash; the second time re-allocates Test: apply OTA after allocation. Bug: 138808058 Change-Id: I3b125bd7e3e340f2f46dd3c58f5b9128ba70e045 --- update_engine_client_android.cc | 44 +++++++++++++++++++++++++++------ 1 file changed, 36 insertions(+), 8 deletions(-) diff --git a/update_engine_client_android.cc b/update_engine_client_android.cc index 6863799f..7766f793 100644 --- a/update_engine_client_android.cc +++ b/update_engine_client_android.cc @@ -72,6 +72,8 @@ class UpdateEngineClientAndroid : public brillo::Daemon { // Called whenever the UpdateEngine daemon dies. void UpdateEngineServiceDied(); + static std::vector ParseHeaders(const std::string& arg); + // Copy of argc and argv passed to main(). int argc_; char** argv_; @@ -123,15 +125,16 @@ int UpdateEngineClientAndroid::OnInit() { DEFINE_string(headers, "", "A list of key-value pairs, one element of the list per line. " - "Used when --update is passed."); + "Used when --update or --allocate is passed."); DEFINE_bool(verify, false, "Given payload metadata, verify if the payload is applicable."); + DEFINE_bool(allocate, false, "Given payload metadata, allocate space."); DEFINE_string(metadata, "/data/ota_package/metadata", "The path to the update payload metadata. " - "Used when --verify is passed."); + "Used when --verify or --allocate is passed."); DEFINE_bool(suspend, false, "Suspend an ongoing update and exit."); DEFINE_bool(resume, false, "Resume a suspended update."); @@ -200,6 +203,25 @@ int UpdateEngineClientAndroid::OnInit() { return ExitWhenIdle(status); } + if (FLAGS_allocate) { + auto headers = ParseHeaders(FLAGS_headers); + int64_t ret = 0; + Status status = service_->allocateSpaceForPayload( + android::String16{FLAGS_metadata.data(), FLAGS_metadata.size()}, + headers, + &ret); + if (status.isOk()) { + if (ret == 0) { + LOG(INFO) << "Successfully allocated space for payload."; + } else { + LOG(INFO) << "Insufficient space; required " << ret << " bytes."; + } + } else { + LOG(INFO) << "Allocation failed."; + } + return ExitWhenIdle(status); + } + if (FLAGS_follow) { // Register a callback object with the service. callback_ = new UECallback(this); @@ -212,12 +234,7 @@ int UpdateEngineClientAndroid::OnInit() { } if (FLAGS_update) { - std::vector headers = base::SplitString( - FLAGS_headers, "\n", base::KEEP_WHITESPACE, base::SPLIT_WANT_NONEMPTY); - std::vector and_headers; - for (const auto& header : headers) { - and_headers.push_back(android::String16{header.data(), header.size()}); - } + auto and_headers = ParseHeaders(FLAGS_headers); Status status = service_->applyPayload( android::String16{FLAGS_payload.data(), FLAGS_payload.size()}, FLAGS_offset, @@ -261,6 +278,17 @@ void UpdateEngineClientAndroid::UpdateEngineServiceDied() { QuitWithExitCode(1); } +std::vector UpdateEngineClientAndroid::ParseHeaders( + const std::string& arg) { + std::vector headers = base::SplitString( + arg, "\n", base::KEEP_WHITESPACE, base::SPLIT_WANT_NONEMPTY); + std::vector and_headers; + for (const auto& header : headers) { + and_headers.push_back(android::String16{header.data(), header.size()}); + } + return and_headers; +} + } // namespace internal } // namespace chromeos_update_engine From b3ca6de42efe07f41541634c1e65fe5ed67f8e30 Mon Sep 17 00:00:00 2001 From: Ross Zwisler Date: Wed, 15 Jan 2020 16:16:02 -0700 Subject: [PATCH 191/624] drop COMMIT-QUEUE.ini Nothing reads this file anymore. BUG=chromium:1025955 TEST=CQ passes Exempt-From-Owner-Approval: cleanup removing unused file Change-Id: I75173f2bfc1d9ba652223216cf893d6a86b27467 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2003634 Tested-by: Ross Zwisler Auto-Submit: Ross Zwisler Reviewed-by: Sean Abraham Commit-Queue: Sean Abraham --- COMMIT-QUEUE.ini | 11 ----------- 1 file changed, 11 deletions(-) delete mode 100644 COMMIT-QUEUE.ini diff --git a/COMMIT-QUEUE.ini b/COMMIT-QUEUE.ini deleted file mode 100644 index ed99b9ff..00000000 --- a/COMMIT-QUEUE.ini +++ /dev/null @@ -1,11 +0,0 @@ -# Copyright 2017 The Chromium OS Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -# Per-project Commit Queue settings. -# Documentation: http://goo.gl/5J7oND - -[GENERAL] - -# Moblab testing is needed because of the udpate_payloads ebuild. -pre-cq-configs: default guado_moblab-no-vmtest-pre-cq From 75cc9f2ce877ff03d167f15fc791ef5718ab0530 Mon Sep 17 00:00:00 2001 From: Tianjie Xu Date: Fri, 2 Aug 2019 14:53:38 -0700 Subject: [PATCH 192/624] Report metrics to statsd from update engine Call the proper logging functions after an update attempt or a successful update. This is part of the effort for the new metrics mechanism migration. Bug: 137682371 Test: run statsd_testdrive and check events Change-Id: I1174ff37d049172a8a6b14d47aa40c54f26be183 Merged-In: I1174ff37d049172a8a6b14d47aa40c54f26be183 (cherry picked from commit a215b59432218842c99b244107b15d4894944577) --- Android.bp | 1 + metrics_reporter_android.cc | 35 +++++++++++++++++++++++++++++++++++ 2 files changed, 36 insertions(+) diff --git a/Android.bp b/Android.bp index e457cd1b..693abf8c 100644 --- a/Android.bp +++ b/Android.bp @@ -266,6 +266,7 @@ cc_defaults { "liblog", "libmetricslogger", "libssl", + "libstatslog", "libutils", ], } diff --git a/metrics_reporter_android.cc b/metrics_reporter_android.cc index 9165f0d5..4165c143 100644 --- a/metrics_reporter_android.cc +++ b/metrics_reporter_android.cc @@ -16,10 +16,14 @@ #include "update_engine/metrics_reporter_android.h" +#include + #include #include +#include #include +#include #include "update_engine/common/constants.h" @@ -28,6 +32,16 @@ void LogHistogram(const std::string& metrics, int value) { android::metricslogger::LogHistogram(metrics, value); LOG(INFO) << "uploading " << value << " to histogram for metric " << metrics; } + +// A number offset adds on top of the enum value. e.g. ErrorCode::SUCCESS will +// be reported as 10000, and AttemptResult::UPDATE_CANCELED will be reported as +// 10011. The keeps the ordering of update engine's enum definition when statsd +// atoms reserve the value 0 for unknown state. +constexpr auto kMetricsReporterEnumOffset = 10000; + +int32_t GetStatsdEnumValue(int32_t value) { + return kMetricsReporterEnumOffset + value; +} } // namespace namespace chromeos_update_engine { @@ -100,6 +114,17 @@ void MetricsReporterAndroid::ReportUpdateAttemptMetrics( static_cast(attempt_result)); LogHistogram(metrics::kMetricsUpdateEngineAttemptErrorCode, static_cast(error_code)); + + android::util::stats_write( + android::util::UPDATE_ENGINE_UPDATE_ATTEMPT_REPORTED, + attempt_number, + GetStatsdEnumValue(static_cast(payload_type)), + duration.InMinutes(), + duration_uptime.InMinutes(), + payload_size_mib, + GetStatsdEnumValue(static_cast(attempt_result)), + GetStatsdEnumValue(static_cast(error_code)), + android::base::GetProperty("ro.build.fingerprint", "").c_str()); } void MetricsReporterAndroid::ReportUpdateAttemptDownloadMetrics( @@ -148,6 +173,16 @@ void MetricsReporterAndroid::ReportSuccessfulUpdateMetrics( total_duration.InMinutes()); LogHistogram(metrics::kMetricsUpdateEngineSuccessfulUpdateRebootCount, reboot_count); + + android::util::stats_write( + android::util::UPDATE_ENGINE_SUCCESSFUL_UPDATE_REPORTED, + attempt_count, + GetStatsdEnumValue(static_cast(payload_type)), + payload_size_mib, + total_bytes_downloaded, + download_overhead_percentage, + total_duration.InMinutes(), + reboot_count); } void MetricsReporterAndroid::ReportAbnormallyTerminatedUpdateAttemptMetrics() { From 62300250fe91896f1e31a6a62f4990d296398460 Mon Sep 17 00:00:00 2001 From: Tianjie Xu Date: Wed, 15 Jan 2020 20:57:45 -0800 Subject: [PATCH 193/624] Remove libmetricslogger dependency from update_engine As we migrate the update_engine's metrics away from Tron, stop reporting them using libmetricslogger. Bug: 147776349 Test: unittests pass Change-Id: Ide355d0125ab33f93b721b0c5abdcb139f339fb1 --- Android.bp | 1 - metrics_reporter_android.cc | 87 +++---------------------------------- 2 files changed, 6 insertions(+), 82 deletions(-) diff --git a/Android.bp b/Android.bp index 693abf8c..01ea4fa4 100644 --- a/Android.bp +++ b/Android.bp @@ -264,7 +264,6 @@ cc_defaults { "libcurl", "libcutils", "liblog", - "libmetricslogger", "libssl", "libstatslog", "libutils", diff --git a/metrics_reporter_android.cc b/metrics_reporter_android.cc index 4165c143..9cef43cd 100644 --- a/metrics_reporter_android.cc +++ b/metrics_reporter_android.cc @@ -22,20 +22,14 @@ #include #include -#include #include #include "update_engine/common/constants.h" namespace { -void LogHistogram(const std::string& metrics, int value) { - android::metricslogger::LogHistogram(metrics, value); - LOG(INFO) << "uploading " << value << " to histogram for metric " << metrics; -} - // A number offset adds on top of the enum value. e.g. ErrorCode::SUCCESS will // be reported as 10000, and AttemptResult::UPDATE_CANCELED will be reported as -// 10011. The keeps the ordering of update engine's enum definition when statsd +// 10011. This keeps the ordering of update engine's enum definition when statsd // atoms reserve the value 0 for unknown state. constexpr auto kMetricsReporterEnumOffset = 10000; @@ -48,41 +42,6 @@ namespace chromeos_update_engine { namespace metrics { -// The histograms are defined in: -// depot/google3/analysis/uma/configs/clearcut/TRON/histograms.xml -constexpr char kMetricsUpdateEngineAttemptNumber[] = - "ota_update_engine_attempt_number"; -constexpr char kMetricsUpdateEngineAttemptResult[] = - "ota_update_engine_attempt_result"; -constexpr char kMetricsUpdateEngineAttemptDurationInMinutes[] = - "ota_update_engine_attempt_fixed_duration_boottime_in_minutes"; -constexpr char kMetricsUpdateEngineAttemptDurationUptimeInMinutes[] = - "ota_update_engine_attempt_duration_monotonic_in_minutes"; -constexpr char kMetricsUpdateEngineAttemptErrorCode[] = - "ota_update_engine_attempt_error_code"; -constexpr char kMetricsUpdateEngineAttemptPayloadSizeMiB[] = - "ota_update_engine_attempt_payload_size_mib"; -constexpr char kMetricsUpdateEngineAttemptPayloadType[] = - "ota_update_engine_attempt_payload_type"; -constexpr char kMetricsUpdateEngineAttemptCurrentBytesDownloadedMiB[] = - "ota_update_engine_attempt_fixed_current_bytes_downloaded_mib"; - -constexpr char kMetricsUpdateEngineSuccessfulUpdateAttemptCount[] = - "ota_update_engine_successful_update_attempt_count"; -constexpr char kMetricsUpdateEngineSuccessfulUpdateTotalDurationInMinutes[] = - "ota_update_engine_successful_update_fixed_total_duration_in_minutes"; -constexpr char kMetricsUpdateEngineSuccessfulUpdatePayloadSizeMiB[] = - "ota_update_engine_successful_update_payload_size_mib"; -constexpr char kMetricsUpdateEngineSuccessfulUpdatePayloadType[] = - "ota_update_engine_successful_update_payload_type"; -constexpr char kMetricsUpdateEngineSuccessfulUpdateRebootCount[] = - "ota_update_engine_successful_update_reboot_count"; -constexpr char kMetricsUpdateEngineSuccessfulUpdateTotalBytesDownloadedMiB[] = - "ota_update_engine_successful_update_total_bytes_downloaded_mib"; -constexpr char - kMetricsUpdateEngineSuccessfulUpdateDownloadOverheadPercentage[] = - "ota_update_engine_successful_update_download_overhead_percentage"; - std::unique_ptr CreateMetricsReporter() { return std::make_unique(); } @@ -98,23 +57,7 @@ void MetricsReporterAndroid::ReportUpdateAttemptMetrics( int64_t payload_size, metrics::AttemptResult attempt_result, ErrorCode error_code) { - LogHistogram(metrics::kMetricsUpdateEngineAttemptNumber, attempt_number); - LogHistogram(metrics::kMetricsUpdateEngineAttemptPayloadType, - static_cast(payload_type)); - LogHistogram(metrics::kMetricsUpdateEngineAttemptDurationInMinutes, - duration.InMinutes()); - LogHistogram(metrics::kMetricsUpdateEngineAttemptDurationUptimeInMinutes, - duration_uptime.InMinutes()); - int64_t payload_size_mib = payload_size / kNumBytesInOneMiB; - LogHistogram(metrics::kMetricsUpdateEngineAttemptPayloadSizeMiB, - payload_size_mib); - - LogHistogram(metrics::kMetricsUpdateEngineAttemptResult, - static_cast(attempt_result)); - LogHistogram(metrics::kMetricsUpdateEngineAttemptErrorCode, - static_cast(error_code)); - android::util::stats_write( android::util::UPDATE_ENGINE_UPDATE_ATTEMPT_REPORTED, attempt_number, @@ -133,8 +76,9 @@ void MetricsReporterAndroid::ReportUpdateAttemptDownloadMetrics( DownloadSource /* download_source */, metrics::DownloadErrorCode /* payload_download_error_code */, metrics::ConnectionType /* connection_type */) { - LogHistogram(metrics::kMetricsUpdateEngineAttemptCurrentBytesDownloadedMiB, - payload_bytes_downloaded / kNumBytesInOneMiB); + // TODO(xunchang) add statsd reporting + LOG(INFO) << "Current update attempt downloads " + << payload_bytes_downloaded / kNumBytesInOneMiB << " bytes data"; } void MetricsReporterAndroid::ReportSuccessfulUpdateMetrics( @@ -148,31 +92,11 @@ void MetricsReporterAndroid::ReportSuccessfulUpdateMetrics( base::TimeDelta /* total_duration_uptime */, int reboot_count, int /* url_switch_count */) { - LogHistogram(metrics::kMetricsUpdateEngineSuccessfulUpdateAttemptCount, - attempt_count); - LogHistogram(metrics::kMetricsUpdateEngineSuccessfulUpdatePayloadType, - static_cast(payload_type)); - int64_t payload_size_mib = payload_size / kNumBytesInOneMiB; - LogHistogram(metrics::kMetricsUpdateEngineSuccessfulUpdatePayloadSizeMiB, - payload_size_mib); - int64_t total_bytes_downloaded = 0; for (size_t i = 0; i < kNumDownloadSources; i++) { total_bytes_downloaded += num_bytes_downloaded[i] / kNumBytesInOneMiB; } - LogHistogram( - metrics::kMetricsUpdateEngineSuccessfulUpdateTotalBytesDownloadedMiB, - total_bytes_downloaded); - LogHistogram( - metrics::kMetricsUpdateEngineSuccessfulUpdateDownloadOverheadPercentage, - download_overhead_percentage); - - LogHistogram( - metrics::kMetricsUpdateEngineSuccessfulUpdateTotalDurationInMinutes, - total_duration.InMinutes()); - LogHistogram(metrics::kMetricsUpdateEngineSuccessfulUpdateRebootCount, - reboot_count); android::util::stats_write( android::util::UPDATE_ENGINE_SUCCESSFUL_UPDATE_REPORTED, @@ -188,7 +112,8 @@ void MetricsReporterAndroid::ReportSuccessfulUpdateMetrics( void MetricsReporterAndroid::ReportAbnormallyTerminatedUpdateAttemptMetrics() { int attempt_result = static_cast(metrics::AttemptResult::kAbnormalTermination); - LogHistogram(metrics::kMetricsUpdateEngineAttemptResult, attempt_result); + // TODO(xunchang) add statsd reporting + LOG(INFO) << "Abnormally terminated update attempt result " << attempt_result; } }; // namespace chromeos_update_engine From 0850bcab2bc7fe14de3b16fe5b6603d586a8597a Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Thu, 16 Jan 2020 15:14:07 -0800 Subject: [PATCH 194/624] SnapshotManager::Return -> Return Class is moved out of SnapshotManager. Test: builds Change-Id: I956d3cf839300822b3c88d7eada985e6379886b9 --- dynamic_partition_control_android.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc index bb854800..bd07165c 100644 --- a/dynamic_partition_control_android.cc +++ b/dynamic_partition_control_android.cc @@ -50,6 +50,7 @@ using android::fs_mgr::MetadataBuilder; using android::fs_mgr::Partition; using android::fs_mgr::PartitionOpener; using android::fs_mgr::SlotSuffixForSlotNumber; +using android::snapshot::Return; using android::snapshot::SnapshotManager; using android::snapshot::SourceCopyOperationIsClone; @@ -488,7 +489,7 @@ bool DynamicPartitionControlAndroid::PrepareSnapshotPartitionsForUpdate( if (!ret) { LOG(ERROR) << "Cannot create update snapshots: " << ret.string(); if (required_size != nullptr && - ret.error_code() == SnapshotManager::Return::ErrorCode::NO_SPACE) { + ret.error_code() == Return::ErrorCode::NO_SPACE) { *required_size = ret.required_size(); } return false; From 2257ee178b66938bb8b166342727c429297acae2 Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Mon, 13 Jan 2020 18:33:00 -0800 Subject: [PATCH 195/624] Add DynamicPartitionControlInterface::CleanupSuccessfulUpdate This is a wrapper over SnapshotManager::WaitForMerge. It waits until the previous update is merged, then return. Bug: 138808328 Test: manual with update_engine_client Change-Id: If44854810f37dd959ffdf3f62f26528867a71fc8 --- common/dynamic_partition_control_interface.h | 9 +++++++++ common/dynamic_partition_control_stub.cc | 4 ++++ common/dynamic_partition_control_stub.h | 1 + dynamic_partition_control_android.cc | 16 ++++++++++++++++ dynamic_partition_control_android.h | 1 + mock_dynamic_partition_control.h | 1 + 6 files changed, 32 insertions(+) diff --git a/common/dynamic_partition_control_interface.h b/common/dynamic_partition_control_interface.h index 19bb5231..48cd9be1 100644 --- a/common/dynamic_partition_control_interface.h +++ b/common/dynamic_partition_control_interface.h @@ -22,6 +22,7 @@ #include #include +#include "update_engine/common/error_code.h" #include "update_engine/update_metadata.pb.h" namespace chromeos_update_engine { @@ -77,6 +78,14 @@ class DynamicPartitionControlInterface { // After writing to new partitions, before rebooting into the new slot, call // this function to indicate writes to new partitions are done. virtual bool FinishUpdate() = 0; + + // Before applying the next update, call this function to clean up previous + // update files. This function blocks until delta files are merged into + // current OS partitions and finished cleaning up. + // - If successful, return kSuccess. + // - If any error, but caller should retry after reboot, return kError. + // - If any irrecoverable failures, return kDeviceCorrupted. + virtual ErrorCode CleanupSuccessfulUpdate() = 0; }; } // namespace chromeos_update_engine diff --git a/common/dynamic_partition_control_stub.cc b/common/dynamic_partition_control_stub.cc index 974cd1b4..cc36c5c9 100644 --- a/common/dynamic_partition_control_stub.cc +++ b/common/dynamic_partition_control_stub.cc @@ -52,4 +52,8 @@ bool DynamicPartitionControlStub::FinishUpdate() { return true; } +ErrorCode DynamicPartitionControlStub::CleanupSuccessfulUpdate() { + return ErrorCode::kError; +} + } // namespace chromeos_update_engine diff --git a/common/dynamic_partition_control_stub.h b/common/dynamic_partition_control_stub.h index 09990a75..02575a13 100644 --- a/common/dynamic_partition_control_stub.h +++ b/common/dynamic_partition_control_stub.h @@ -39,6 +39,7 @@ class DynamicPartitionControlStub : public DynamicPartitionControlInterface { uint64_t* required_size) override; bool FinishUpdate() override; + ErrorCode CleanupSuccessfulUpdate() override; }; } // namespace chromeos_update_engine diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc index bd07165c..881ff111 100644 --- a/dynamic_partition_control_android.cc +++ b/dynamic_partition_control_android.cc @@ -53,6 +53,7 @@ using android::fs_mgr::SlotSuffixForSlotNumber; using android::snapshot::Return; using android::snapshot::SnapshotManager; using android::snapshot::SourceCopyOperationIsClone; +using android::snapshot::UpdateState; namespace chromeos_update_engine { @@ -697,4 +698,19 @@ void DynamicPartitionControlAndroid::set_fake_mapped_devices( mapped_devices_ = fake; } +ErrorCode DynamicPartitionControlAndroid::CleanupSuccessfulUpdate() { + // Already reboot into new boot. Clean up. + if (!GetVirtualAbFeatureFlag().IsEnabled()) { + return ErrorCode::kSuccess; + } + auto ret = snapshot_->WaitForMerge(); + if (ret.is_ok()) { + return ErrorCode::kSuccess; + } + if (ret.error_code() == Return::ErrorCode::NEEDS_REBOOT) { + return ErrorCode::kError; + } + return ErrorCode::kDeviceCorrupted; +} + } // namespace chromeos_update_engine diff --git a/dynamic_partition_control_android.h b/dynamic_partition_control_android.h index a79f41ac..ba23e7c6 100644 --- a/dynamic_partition_control_android.h +++ b/dynamic_partition_control_android.h @@ -45,6 +45,7 @@ class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { bool update, uint64_t* required_size) override; bool FinishUpdate() override; + ErrorCode CleanupSuccessfulUpdate() override; // Return the device for partition |partition_name| at slot |slot|. // |current_slot| should be set to the current active slot. diff --git a/mock_dynamic_partition_control.h b/mock_dynamic_partition_control.h index ffabac77..1237d763 100644 --- a/mock_dynamic_partition_control.h +++ b/mock_dynamic_partition_control.h @@ -43,6 +43,7 @@ class MockDynamicPartitionControl : public DynamicPartitionControlInterface { bool(uint32_t, uint32_t, const DeltaArchiveManifest&, bool, uint64_t*)); MOCK_METHOD0(GetVirtualAbFeatureFlag, FeatureFlag()); MOCK_METHOD0(FinishUpdate, bool()); + MOCK_METHOD0(CleanupSuccessfulUpdate, ErrorCode()); }; class MockDynamicPartitionControlAndroid From 4f611563370df96912a7ea018291c5d7b10d5548 Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Wed, 15 Jan 2020 23:41:33 -0800 Subject: [PATCH 196/624] Implement UpdateAttempterAndroid::CleanupSuccessfulUpdate Calls DynamicPartitionControlInterface::CleanupSuccessfulUpdate. Test: manual with update_engine_client Bug: 138808328 Change-Id: Ia18d52dc354a5a0f9b29cfb908fbec77616c05e7 --- update_attempter_android.cc | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/update_attempter_android.cc b/update_attempter_android.cc index 034b4ea2..680bbafb 100644 --- a/update_attempter_android.cc +++ b/update_attempter_android.cc @@ -933,9 +933,15 @@ uint64_t UpdateAttempterAndroid::AllocateSpaceForPayload( int32_t UpdateAttempterAndroid::CleanupSuccessfulUpdate( brillo::ErrorPtr* error) { - // TODO(elsk): implement b/138808328 - LogAndSetError(error, FROM_HERE, "Not implemented."); - return static_cast(ErrorCode::kError); + ErrorCode error_code = + boot_control_->GetDynamicPartitionControl()->CleanupSuccessfulUpdate(); + if (error_code == ErrorCode::kSuccess) { + LOG(INFO) << "Previous update is merged and cleaned up successfully."; + } else { + LOG(ERROR) << "CleanupSuccessfulUpdate failed with " + << utils::ErrorCodeToString(error_code); + } + return static_cast(error_code); } } // namespace chromeos_update_engine From e5a8f23df27bbcf5c6bd3b764819d765a632b18e Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Thu, 16 Jan 2020 09:37:16 -0800 Subject: [PATCH 197/624] update_engine_client_android implement --merge This flag corresponds to cleanupSuccessfulUpdate function of IUpdateEngine. It waits until merge completes or fails. Note that this doesn't initiate a merge if one has not been started. To initiate a merge, use snapshotctl. Test: use it without OTA, it exits immediately Test: use it after applying update before reboot, it exits immediately (however, it cancels the update. See b/147819418) Test: use it after rebooting into new update, it waits until merge completes. Bug: 138808328 Change-Id: Id61c21811f2c33397246035788d139b059208b7f --- update_engine_client_android.cc | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/update_engine_client_android.cc b/update_engine_client_android.cc index 7766f793..7d9bc3de 100644 --- a/update_engine_client_android.cc +++ b/update_engine_client_android.cc @@ -144,7 +144,10 @@ int UpdateEngineClientAndroid::OnInit() { false, "Follow status update changes until a final state is reached. " "Exit status is 0 if the update succeeded, and 1 otherwise."); - + DEFINE_bool(merge, + false, + "Wait for previous update to merge. " + "Only available after rebooting to new slot."); // Boilerplate init commands. base::CommandLine::Init(argc_, argv_); brillo::FlagHelper::Init(argc_, argv_, "Android Update Engine Client"); @@ -222,6 +225,16 @@ int UpdateEngineClientAndroid::OnInit() { return ExitWhenIdle(status); } + if (FLAGS_merge) { + int32_t ret = 0; + Status status = service_->cleanupSuccessfulUpdate(&ret); + if (status.isOk()) { + LOG(INFO) << "CleanupSuccessfulUpdate exits with " + << utils::ErrorCodeToString(static_cast(ret)); + } + return ExitWhenIdle(status); + } + if (FLAGS_follow) { // Register a callback object with the service. callback_ = new UECallback(this); From b6d1b41097e25d1e43ab6eadf91909d4c41900b7 Mon Sep 17 00:00:00 2001 From: Grace Cham Date: Thu, 31 Oct 2019 14:53:42 +0900 Subject: [PATCH 198/624] update_engine: remove version suffix in libbrillo build rules The libbrillo library is installed without version suffix. BUG=chromium:920513 TEST=emerge Cq-Depend: chromium:1892317, chromium:1892377 Change-Id: I704dd1877e5994c10f629e6be9e74f4e0bdc5ddd Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/1890578 Tested-by: Grace Cham Reviewed-by: Hidehiko Abe Reviewed-by: Amin Hassani Commit-Queue: Grace Cham --- BUILD.gn | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/BUILD.gn b/BUILD.gn index 0cc27e6f..d416b948 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -89,7 +89,7 @@ pkg_config("target_defaults") { "${platform2_root}/update_engine/client_library/include", ] pkg_deps = [ - "libbrillo-${libbase_ver}", + "libbrillo", "libchrome-${libbase_ver}", # system_api depends on protobuf (or protobuf-lite). It must appear @@ -245,7 +245,7 @@ static_library("libupdate_engine") { configs += [ ":target_defaults" ] libs = [ "bz2", - "policy-${libbase_ver}", + "policy", "rootdev", "rt", ] @@ -543,7 +543,7 @@ if (use.test) { ":target_defaults", ] pkg_deps = [ - "libbrillo-test-${libbase_ver}", + "libbrillo-test", "libchrome-test-${libbase_ver}", "libdebugd-client-test", "libpower_manager-client-test", @@ -569,7 +569,7 @@ if (use.fuzzer) { ":target_defaults", ] pkg_deps = [ - "libbrillo-test-${libbase_ver}", + "libbrillo-test", "libchrome-test-${libbase_ver}", ] deps = [ @@ -586,7 +586,7 @@ if (use.fuzzer) { ":target_defaults", ] pkg_deps = [ - "libbrillo-test-${libbase_ver}", + "libbrillo-test", "libchrome-test-${libbase_ver}", ] deps = [ From fac20229289cf4d4373fffe83037d44b780eabd0 Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Tue, 21 Jan 2020 11:32:50 -0800 Subject: [PATCH 199/624] Revert "update_engine: Deprecate major version 1" This partially reverts commit 55c75417e22d5026971276997924a345d9973bbc. It turns out that we forgot a scenario when we deprecated major version 1. We use update_engine in lab tests (specifically autoupdate_EndToEndTests on stable channel) to update a DUT to an old (very old) versions using actual update payloads so we can test that they can get updated to newer versions. However, deprecating major version 1 in the update_engine caused trouble because we no longer can update from a newer version to a version before M72 (to prepare the device for update test). We need to put this feature back until we find a better solution for it. On this CL, we only support major version 1 in the client and only for test (non-official) images. We don't even bother adding paygen support for it. This CL should be reverted once we figured out what to do with provisioning the autoupdate end to end tests. BUG=chromium:1043428 TEST=FEATURES=test emerge-reef update_engine TEST=cros deployed it, then cros flash using an m71 payload, it succeeded. Change-Id: I1fecbe3ae845b2e419f0999adc53e4732b1f7696 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2013884 Reviewed-by: Tianjie Xu Reviewed-by: Sen Jiang Tested-by: Amin Hassani Commit-Queue: Amin Hassani --- payload_consumer/delta_performer.cc | 110 +++++++++++++++--- payload_consumer/delta_performer.h | 5 + .../delta_performer_integration_test.cc | 3 +- payload_consumer/payload_constants.cc | 2 +- payload_consumer/payload_constants.h | 2 +- payload_consumer/payload_metadata.cc | 107 +++++++++++------ payload_consumer/payload_metadata.h | 19 +-- payload_generator/payload_signer.cc | 4 +- update_attempter_android.cc | 2 +- 9 files changed, 189 insertions(+), 65 deletions(-) diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc index 3263ff7d..262e8bc0 100644 --- a/payload_consumer/delta_performer.cc +++ b/payload_consumer/delta_performer.cc @@ -437,7 +437,7 @@ MetadataParseResult DeltaPerformer::ParsePayloadMetadata( if (!IsHeaderParsed()) { MetadataParseResult result = - payload_metadata_.ParsePayloadHeader(payload, error); + payload_metadata_.ParsePayloadHeader(payload, hardware_, error); if (result != MetadataParseResult::kSuccess) return result; @@ -728,7 +728,8 @@ bool DeltaPerformer::Write(const void* bytes, size_t count, ErrorCode* error) { // In major version 2, we don't add dummy operation to the payload. // If we already extracted the signature we should skip this step. - if (manifest_.has_signatures_offset() && manifest_.has_signatures_size() && + if (major_payload_version_ == kBrilloMajorPayloadVersion && + manifest_.has_signatures_offset() && manifest_.has_signatures_size() && signatures_message_data_.empty()) { if (manifest_.signatures_offset() != buffer_offset_) { LOG(ERROR) << "Payload signatures offset points to blob offset " @@ -763,11 +764,51 @@ bool DeltaPerformer::IsManifestValid() { } bool DeltaPerformer::ParseManifestPartitions(ErrorCode* error) { - partitions_.clear(); - for (const PartitionUpdate& partition : manifest_.partitions()) { - partitions_.push_back(partition); + if (major_payload_version_ == kBrilloMajorPayloadVersion) { + partitions_.clear(); + for (const PartitionUpdate& partition : manifest_.partitions()) { + partitions_.push_back(partition); + } + manifest_.clear_partitions(); + } else if (major_payload_version_ == kChromeOSMajorPayloadVersion) { + LOG(INFO) << "Converting update information from old format."; + PartitionUpdate root_part; + root_part.set_partition_name(kPartitionNameRoot); +#ifdef __ANDROID__ + LOG(WARNING) << "Legacy payload major version provided to an Android " + "build. Assuming no post-install. Please use major version " + "2 or newer."; + root_part.set_run_postinstall(false); +#else + root_part.set_run_postinstall(true); +#endif // __ANDROID__ + if (manifest_.has_old_rootfs_info()) { + *root_part.mutable_old_partition_info() = manifest_.old_rootfs_info(); + manifest_.clear_old_rootfs_info(); + } + if (manifest_.has_new_rootfs_info()) { + *root_part.mutable_new_partition_info() = manifest_.new_rootfs_info(); + manifest_.clear_new_rootfs_info(); + } + *root_part.mutable_operations() = manifest_.install_operations(); + manifest_.clear_install_operations(); + partitions_.push_back(std::move(root_part)); + + PartitionUpdate kern_part; + kern_part.set_partition_name(kPartitionNameKernel); + kern_part.set_run_postinstall(false); + if (manifest_.has_old_kernel_info()) { + *kern_part.mutable_old_partition_info() = manifest_.old_kernel_info(); + manifest_.clear_old_kernel_info(); + } + if (manifest_.has_new_kernel_info()) { + *kern_part.mutable_new_partition_info() = manifest_.new_kernel_info(); + manifest_.clear_new_kernel_info(); + } + *kern_part.mutable_operations() = manifest_.kernel_install_operations(); + manifest_.clear_kernel_install_operations(); + partitions_.push_back(std::move(kern_part)); } - manifest_.clear_partitions(); // Fill in the InstallPlan::partitions based on the partitions from the // payload. @@ -928,6 +969,14 @@ bool DeltaPerformer::PerformReplaceOperation( TEST_AND_RETURN_FALSE(buffer_offset_ == operation.data_offset()); TEST_AND_RETURN_FALSE(buffer_.size() >= operation.data_length()); + // Extract the signature message if it's in this operation. + if (ExtractSignatureMessageFromOperation(operation)) { + // If this is dummy replace operation, we ignore it after extracting the + // signature. + DiscardBuffer(true, 0); + return true; + } + // Setup the ExtentWriter stack based on the operation type. std::unique_ptr writer = std::make_unique(); @@ -1378,6 +1427,19 @@ bool DeltaPerformer::PerformPuffDiffOperation(const InstallOperation& operation, return true; } +bool DeltaPerformer::ExtractSignatureMessageFromOperation( + const InstallOperation& operation) { + if (operation.type() != InstallOperation::REPLACE || + !manifest_.has_signatures_offset() || + manifest_.signatures_offset() != operation.data_offset()) { + return false; + } + TEST_AND_RETURN_FALSE(manifest_.has_signatures_size() && + manifest_.signatures_size() == operation.data_length()); + TEST_AND_RETURN_FALSE(ExtractSignatureMessage()); + return true; +} + bool DeltaPerformer::ExtractSignatureMessage() { TEST_AND_RETURN_FALSE(signatures_message_data_.empty()); TEST_AND_RETURN_FALSE(buffer_offset_ == manifest_.signatures_offset()); @@ -1429,11 +1491,11 @@ ErrorCode DeltaPerformer::ValidateManifest() { // Perform assorted checks to sanity check the manifest, make sure it // matches data from other sources, and that it is a supported version. - bool has_old_fields = std::any_of(manifest_.partitions().begin(), - manifest_.partitions().end(), - [](const PartitionUpdate& partition) { - return partition.has_old_partition_info(); - }); + bool has_old_fields = + (manifest_.has_old_kernel_info() || manifest_.has_old_rootfs_info()); + for (const PartitionUpdate& partition : manifest_.partitions()) { + has_old_fields = has_old_fields || partition.has_old_partition_info(); + } // The presence of an old partition hash is the sole indicator for a delta // update. @@ -1475,12 +1537,16 @@ ErrorCode DeltaPerformer::ValidateManifest() { } } - if (manifest_.has_old_rootfs_info() || manifest_.has_new_rootfs_info() || - manifest_.has_old_kernel_info() || manifest_.has_new_kernel_info() || - manifest_.install_operations_size() != 0 || - manifest_.kernel_install_operations_size() != 0) { - LOG(ERROR) << "Manifest contains deprecated fields."; - return ErrorCode::kPayloadMismatchedType; + if (major_payload_version_ != kChromeOSMajorPayloadVersion) { + if (manifest_.has_old_rootfs_info() || manifest_.has_new_rootfs_info() || + manifest_.has_old_kernel_info() || manifest_.has_new_kernel_info() || + manifest_.install_operations_size() != 0 || + manifest_.kernel_install_operations_size() != 0) { + LOG(ERROR) << "Manifest contains deprecated field only supported in " + << "major payload version 1, but the payload major version is " + << major_payload_version_; + return ErrorCode::kPayloadMismatchedType; + } } if (manifest_.max_timestamp() < hardware_->GetBuildTimestamp()) { @@ -1491,6 +1557,16 @@ ErrorCode DeltaPerformer::ValidateManifest() { return ErrorCode::kPayloadTimestampError; } + if (major_payload_version_ == kChromeOSMajorPayloadVersion) { + if (manifest_.has_dynamic_partition_metadata()) { + LOG(ERROR) + << "Should not contain dynamic_partition_metadata for major version " + << kChromeOSMajorPayloadVersion + << ". Please use major version 2 or above."; + return ErrorCode::kPayloadMismatchedType; + } + } + // TODO(crbug.com/37661) we should be adding more and more manifest checks, // such as partition boundaries, etc. diff --git a/payload_consumer/delta_performer.h b/payload_consumer/delta_performer.h index 78607475..4493c2ae 100644 --- a/payload_consumer/delta_performer.h +++ b/payload_consumer/delta_performer.h @@ -237,6 +237,11 @@ class DeltaPerformer : public FileWriter { FileDescriptorPtr ChooseSourceFD(const InstallOperation& operation, ErrorCode* error); + // Extracts the payload signature message from the blob on the |operation| if + // the offset matches the one specified by the manifest. Returns whether the + // signature was extracted. + bool ExtractSignatureMessageFromOperation(const InstallOperation& operation); + // Extracts the payload signature message from the current |buffer_| if the // offset matches the one specified by the manifest. Returns whether the // signature was extracted. diff --git a/payload_consumer/delta_performer_integration_test.cc b/payload_consumer/delta_performer_integration_test.cc index f1a492b5..8de70e35 100644 --- a/payload_consumer/delta_performer_integration_test.cc +++ b/payload_consumer/delta_performer_integration_test.cc @@ -591,7 +591,8 @@ static void ApplyDeltaFile(bool full_kernel, { EXPECT_TRUE(utils::ReadFile(state->delta_path, &state->delta)); PayloadMetadata payload_metadata; - EXPECT_TRUE(payload_metadata.ParsePayloadHeader(state->delta)); + EXPECT_TRUE(payload_metadata.ParsePayloadHeader(state->delta, + &state->fake_hardware_)); state->metadata_size = payload_metadata.GetMetadataSize(); LOG(INFO) << "Metadata size: " << state->metadata_size; state->metadata_signature_size = diff --git a/payload_consumer/payload_constants.cc b/payload_consumer/payload_constants.cc index 908a8933..4015a0ad 100644 --- a/payload_consumer/payload_constants.cc +++ b/payload_consumer/payload_constants.cc @@ -20,7 +20,7 @@ namespace chromeos_update_engine { -// const uint64_t kChromeOSMajorPayloadVersion = 1; DEPRECATED +const uint64_t kChromeOSMajorPayloadVersion = 1; const uint64_t kBrilloMajorPayloadVersion = 2; const uint64_t kMinSupportedMajorPayloadVersion = kBrilloMajorPayloadVersion; diff --git a/payload_consumer/payload_constants.h b/payload_consumer/payload_constants.h index 888fa2a1..fe823f41 100644 --- a/payload_consumer/payload_constants.h +++ b/payload_consumer/payload_constants.h @@ -26,7 +26,7 @@ namespace chromeos_update_engine { // The major version used by Chrome OS. -// extern const uint64_t kChromeOSMajorPayloadVersion; DEPRECATED +extern const uint64_t kChromeOSMajorPayloadVersion; // The major version used by Brillo. extern const uint64_t kBrilloMajorPayloadVersion; diff --git a/payload_consumer/payload_metadata.cc b/payload_consumer/payload_metadata.cc index b83001a2..69ccb461 100644 --- a/payload_consumer/payload_metadata.cc +++ b/payload_consumer/payload_metadata.cc @@ -20,6 +20,7 @@ #include +#include "update_engine/common/hardware_interface.h" #include "update_engine/common/hash_calculator.h" #include "update_engine/common/utils.h" #include "update_engine/payload_consumer/payload_constants.h" @@ -36,18 +37,36 @@ const uint64_t PayloadMetadata::kDeltaManifestSizeOffset = const uint64_t PayloadMetadata::kDeltaManifestSizeSize = 8; const uint64_t PayloadMetadata::kDeltaMetadataSignatureSizeSize = 4; -uint64_t PayloadMetadata::GetMetadataSignatureSizeOffset() const { - return kDeltaManifestSizeOffset + kDeltaManifestSizeSize; +bool PayloadMetadata::GetMetadataSignatureSizeOffset( + uint64_t* out_offset) const { + if (GetMajorVersion() == kBrilloMajorPayloadVersion) { + *out_offset = kDeltaManifestSizeOffset + kDeltaManifestSizeSize; + return true; + } + return false; } -uint64_t PayloadMetadata::GetManifestOffset() const { - // Actual manifest begins right after the metadata signature size field. - return kDeltaManifestSizeOffset + kDeltaManifestSizeSize + - kDeltaMetadataSignatureSizeSize; +bool PayloadMetadata::GetManifestOffset(uint64_t* out_offset) const { + // Actual manifest begins right after the manifest size field or + // metadata signature size field if major version >= 2. + if (major_payload_version_ == kChromeOSMajorPayloadVersion) { + *out_offset = kDeltaManifestSizeOffset + kDeltaManifestSizeSize; + return true; + } + if (major_payload_version_ == kBrilloMajorPayloadVersion) { + *out_offset = kDeltaManifestSizeOffset + kDeltaManifestSizeSize + + kDeltaMetadataSignatureSizeSize; + return true; + } + LOG(ERROR) << "Unknown major payload version: " << major_payload_version_; + return false; } MetadataParseResult PayloadMetadata::ParsePayloadHeader( - const brillo::Blob& payload, ErrorCode* error) { + const brillo::Blob& payload, + HardwareInterface* hardware, + ErrorCode* error) { + uint64_t manifest_offset; // Ensure we have data to cover the major payload version. if (payload.size() < kDeltaManifestSizeOffset) return MetadataParseResult::kInsufficientData; @@ -59,11 +78,6 @@ MetadataParseResult PayloadMetadata::ParsePayloadHeader( return MetadataParseResult::kError; } - uint64_t manifest_offset = GetManifestOffset(); - // Check again with the manifest offset. - if (payload.size() < manifest_offset) - return MetadataParseResult::kInsufficientData; - // Extract the payload version from the metadata. static_assert(sizeof(major_payload_version_) == kDeltaVersionSize, "Major payload version size mismatch"); @@ -73,14 +87,26 @@ MetadataParseResult PayloadMetadata::ParsePayloadHeader( // Switch big endian to host. major_payload_version_ = be64toh(major_payload_version_); - if (major_payload_version_ < kMinSupportedMajorPayloadVersion || - major_payload_version_ > kMaxSupportedMajorPayloadVersion) { + // We only want to test major version 1 for test images. + if (major_payload_version_ == kChromeOSMajorPayloadVersion + ? hardware != nullptr && hardware->IsOfficialBuild() + : major_payload_version_ < kMinSupportedMajorPayloadVersion || + major_payload_version_ > kMaxSupportedMajorPayloadVersion) { LOG(ERROR) << "Bad payload format -- unsupported payload version: " << major_payload_version_; *error = ErrorCode::kUnsupportedMajorPayloadVersion; return MetadataParseResult::kError; } + // Get the manifest offset now that we have payload version. + if (!GetManifestOffset(&manifest_offset)) { + *error = ErrorCode::kUnsupportedMajorPayloadVersion; + return MetadataParseResult::kError; + } + // Check again with the manifest offset. + if (payload.size() < manifest_offset) + return MetadataParseResult::kInsufficientData; + // Next, parse the manifest size. static_assert(sizeof(manifest_size_) == kDeltaManifestSizeSize, "manifest_size size mismatch"); @@ -97,33 +123,43 @@ MetadataParseResult PayloadMetadata::ParsePayloadHeader( return MetadataParseResult::kError; } - // Parse the metadata signature size. - static_assert( - sizeof(metadata_signature_size_) == kDeltaMetadataSignatureSizeSize, - "metadata_signature_size size mismatch"); - uint64_t metadata_signature_size_offset = GetMetadataSignatureSizeOffset(); - memcpy(&metadata_signature_size_, - &payload[metadata_signature_size_offset], - kDeltaMetadataSignatureSizeSize); - metadata_signature_size_ = be32toh(metadata_signature_size_); - - if (metadata_size_ + metadata_signature_size_ < metadata_size_) { - // Overflow detected. - LOG(ERROR) << "Overflow detected on metadata and signature size."; - *error = ErrorCode::kDownloadInvalidMetadataSize; - return MetadataParseResult::kError; + if (GetMajorVersion() == kBrilloMajorPayloadVersion) { + // Parse the metadata signature size. + static_assert( + sizeof(metadata_signature_size_) == kDeltaMetadataSignatureSizeSize, + "metadata_signature_size size mismatch"); + uint64_t metadata_signature_size_offset; + if (!GetMetadataSignatureSizeOffset(&metadata_signature_size_offset)) { + *error = ErrorCode::kError; + return MetadataParseResult::kError; + } + memcpy(&metadata_signature_size_, + &payload[metadata_signature_size_offset], + kDeltaMetadataSignatureSizeSize); + metadata_signature_size_ = be32toh(metadata_signature_size_); + + if (metadata_size_ + metadata_signature_size_ < metadata_size_) { + // Overflow detected. + LOG(ERROR) << "Overflow detected on metadata and signature size."; + *error = ErrorCode::kDownloadInvalidMetadataSize; + return MetadataParseResult::kError; + } } return MetadataParseResult::kSuccess; } -bool PayloadMetadata::ParsePayloadHeader(const brillo::Blob& payload) { +bool PayloadMetadata::ParsePayloadHeader(const brillo::Blob& payload, + HardwareInterface* hardware) { ErrorCode error; - return ParsePayloadHeader(payload, &error) == MetadataParseResult::kSuccess; + return ParsePayloadHeader(payload, hardware, &error) == + MetadataParseResult::kSuccess; } bool PayloadMetadata::GetManifest(const brillo::Blob& payload, DeltaArchiveManifest* out_manifest) const { - uint64_t manifest_offset = GetManifestOffset(); + uint64_t manifest_offset; + if (!GetManifestOffset(&manifest_offset)) + return false; CHECK_GE(payload.size(), manifest_offset + manifest_size_); return out_manifest->ParseFromArray(&payload[manifest_offset], manifest_size_); @@ -145,7 +181,7 @@ ErrorCode PayloadMetadata::ValidateMetadataSignature( << metadata_signature; return ErrorCode::kDownloadMetadataSignatureError; } - } else { + } else if (major_payload_version_ == kBrilloMajorPayloadVersion) { metadata_signature_protobuf_blob.assign( payload.begin() + metadata_size_, payload.begin() + metadata_size_ + metadata_signature_size_); @@ -206,7 +242,7 @@ bool PayloadMetadata::ParsePayloadFile(const string& payload_path, brillo::Blob payload; TEST_AND_RETURN_FALSE( utils::ReadFileChunk(payload_path, 0, kMaxPayloadHeaderSize, &payload)); - TEST_AND_RETURN_FALSE(ParsePayloadHeader(payload)); + TEST_AND_RETURN_FALSE(ParsePayloadHeader(payload, nullptr)); if (manifest != nullptr) { TEST_AND_RETURN_FALSE( @@ -217,7 +253,8 @@ bool PayloadMetadata::ParsePayloadFile(const string& payload_path, TEST_AND_RETURN_FALSE(GetManifest(payload, manifest)); } - if (metadata_signatures != nullptr) { + if (metadata_signatures != nullptr && + GetMajorVersion() >= kBrilloMajorPayloadVersion) { payload.clear(); TEST_AND_RETURN_FALSE(utils::ReadFileChunk( payload_path, GetMetadataSize(), GetMetadataSignatureSize(), &payload)); diff --git a/payload_consumer/payload_metadata.h b/payload_consumer/payload_metadata.h index be43c410..32923518 100644 --- a/payload_consumer/payload_metadata.h +++ b/payload_consumer/payload_metadata.h @@ -26,6 +26,7 @@ #include #include "update_engine/common/error_code.h" +#include "update_engine/common/hardware_interface.h" #include "update_engine/common/platform_constants.h" #include "update_engine/update_metadata.pb.h" @@ -54,9 +55,11 @@ class PayloadMetadata { // metadata. Returns kMetadataParseError if the metadata can't be parsed given // the payload. MetadataParseResult ParsePayloadHeader(const brillo::Blob& payload, + HardwareInterface* hardware, ErrorCode* error); // Simpler version of the above, returns true on success. - bool ParsePayloadHeader(const brillo::Blob& payload); + bool ParsePayloadHeader(const brillo::Blob& payload, + HardwareInterface* hardware); // Given the |payload|, verifies that the signed hash of its metadata matches // |metadata_signature| (if present) or the metadata signature in payload @@ -94,12 +97,14 @@ class PayloadMetadata { Signatures* metadata_signatures); private: - // Returns the byte offset at which the manifest protobuf begins in a payload. - uint64_t GetManifestOffset() const; - - // Returns the byte offset where the size of the metadata signature is stored - // in a payload. - uint64_t GetMetadataSignatureSizeOffset() const; + // Set |*out_offset| to the byte offset at which the manifest protobuf begins + // in a payload. Return true on success, false if the offset is unknown. + bool GetManifestOffset(uint64_t* out_offset) const; + + // Set |*out_offset| to the byte offset where the size of the metadata + // signature is stored in a payload. Return true on success, if this field is + // not present in the payload, return false. + bool GetMetadataSignatureSizeOffset(uint64_t* out_offset) const; uint64_t metadata_size_{0}; uint64_t manifest_size_{0}; diff --git a/payload_generator/payload_signer.cc b/payload_generator/payload_signer.cc index 420329ff..613202d0 100644 --- a/payload_generator/payload_signer.cc +++ b/payload_generator/payload_signer.cc @@ -94,7 +94,7 @@ bool AddSignatureBlobToPayload(const string& payload_path, brillo::Blob payload; TEST_AND_RETURN_FALSE(utils::ReadFile(payload_path, &payload)); PayloadMetadata payload_metadata; - TEST_AND_RETURN_FALSE(payload_metadata.ParsePayloadHeader(payload)); + TEST_AND_RETURN_FALSE(payload_metadata.ParsePayloadHeader(payload, nullptr)); uint64_t metadata_size = payload_metadata.GetMetadataSize(); uint32_t metadata_signature_size = payload_metadata.GetMetadataSignatureSize(); @@ -218,7 +218,7 @@ bool PayloadSigner::VerifySignedPayload(const string& payload_path, brillo::Blob payload; TEST_AND_RETURN_FALSE(utils::ReadFile(payload_path, &payload)); PayloadMetadata payload_metadata; - TEST_AND_RETURN_FALSE(payload_metadata.ParsePayloadHeader(payload)); + TEST_AND_RETURN_FALSE(payload_metadata.ParsePayloadHeader(payload, nullptr)); DeltaArchiveManifest manifest; TEST_AND_RETURN_FALSE(payload_metadata.GetManifest(payload, &manifest)); TEST_AND_RETURN_FALSE(manifest.has_signatures_offset() && diff --git a/update_attempter_android.cc b/update_attempter_android.cc index c738e4ef..e2b5a88a 100644 --- a/update_attempter_android.cc +++ b/update_attempter_android.cc @@ -368,7 +368,7 @@ bool UpdateAttempterAndroid::VerifyPayloadApplicable( } ErrorCode errorcode; PayloadMetadata payload_metadata; - if (payload_metadata.ParsePayloadHeader(metadata, &errorcode) != + if (payload_metadata.ParsePayloadHeader(metadata, nullptr, &errorcode) != MetadataParseResult::kSuccess) { return LogAndSetError(error, FROM_HERE, From 955aa6e428cc1e684e3341e558de6c8b6d7c6fba Mon Sep 17 00:00:00 2001 From: hscham Date: Tue, 21 Jan 2020 17:14:57 +0900 Subject: [PATCH 200/624] update_engine: post libchrome uprev clean-up BUG=chromium:909719 TEST=unit tests Change-Id: I9ec6c6d8cb23fbd49a86734648d95acc08b791e8 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2009957 Tested-by: Grace Cham Reviewed-by: Amin Hassani Commit-Queue: Qijiang Fan --- common_service.cc | 4 --- omaha_request_action_unittest.cc | 4 --- omaha_utils.cc | 4 --- payload_consumer/download_action.cc | 3 -- update_engine_client.cc | 5 --- update_manager/boxed_value.cc | 20 ------------ update_manager/evaluation_context_unittest.cc | 32 ------------------- update_status_utils.cc | 8 ----- 8 files changed, 80 deletions(-) diff --git a/common_service.cc b/common_service.cc index d520cf13..a99d10c8 100644 --- a/common_service.cc +++ b/common_service.cc @@ -50,11 +50,7 @@ namespace chromeos_update_engine { namespace { // Log and set the error on the passed ErrorPtr. void LogAndSetError(ErrorPtr* error, -#if BASE_VER < 576279 - const tracked_objects::Location& location, -#else const base::Location& location, -#endif const string& reason) { brillo::Error::AddTo(error, location, diff --git a/omaha_request_action_unittest.cc b/omaha_request_action_unittest.cc index 8dcec044..7b676e25 100644 --- a/omaha_request_action_unittest.cc +++ b/omaha_request_action_unittest.cc @@ -91,11 +91,7 @@ const char kTestAppIdSkipUpdatecheck[] = "test-app-id-skip-updatecheck"; struct FakeUpdateResponse { string GetRollbackVersionAttributes() const { string num_milestones; -#if BASE_VER < 576279 - num_milestones = base::IntToString(rollback_allowed_milestones); -#else num_milestones = base::NumberToString(rollback_allowed_milestones); -#endif const string rollback_version = " _firmware_version_" + num_milestones + "=\"" + past_rollback_key_version.first + "\"" + " _kernel_version_" + diff --git a/omaha_utils.cc b/omaha_utils.cc index 18a99cea..54e9fc0e 100644 --- a/omaha_utils.cc +++ b/omaha_utils.cc @@ -24,11 +24,7 @@ namespace chromeos_update_engine { const EolDate kEolDateInvalid = -9999; std::string EolDateToString(EolDate eol_date) { -#if BASE_VER < 576279 - return base::Int64ToString(eol_date); -#else return base::NumberToString(eol_date); -#endif } EolDate StringToEolDate(const std::string& eol_date) { diff --git a/payload_consumer/download_action.cc b/payload_consumer/download_action.cc index 09afc42c..45df5a92 100644 --- a/payload_consumer/download_action.cc +++ b/payload_consumer/download_action.cc @@ -56,9 +56,6 @@ DownloadAction::DownloadAction(PrefsInterface* prefs, delegate_(nullptr), p2p_sharing_fd_(-1), p2p_visible_(true) { -#if BASE_VER < 576279 - base::StatisticsRecorder::Initialize(); -#endif } DownloadAction::~DownloadAction() {} diff --git a/update_engine_client.cc b/update_engine_client.cc index 33425924..eb718410 100644 --- a/update_engine_client.cc +++ b/update_engine_client.cc @@ -562,13 +562,8 @@ int UpdateEngineClient::ProcessFlags() { ErrorCode code = static_cast(last_attempt_error); KeyValueStore last_attempt_error_store; -#if BASE_VER < 576279 - last_attempt_error_store.SetString( - "ERROR_CODE", base::Int64ToString(last_attempt_error)); -#else last_attempt_error_store.SetString( "ERROR_CODE", base::NumberToString(last_attempt_error)); -#endif last_attempt_error_store.SetString("ERROR_MESSAGE", ErrorCodeToString(code)); printf("%s", last_attempt_error_store.SaveToString().c_str()); diff --git a/update_manager/boxed_value.cc b/update_manager/boxed_value.cc index ef321e6f..4dff9efa 100644 --- a/update_manager/boxed_value.cc +++ b/update_manager/boxed_value.cc @@ -51,41 +51,25 @@ string BoxedValue::ValuePrinter(const void* value) { template <> string BoxedValue::ValuePrinter(const void* value) { const int* val = reinterpret_cast(value); -#if BASE_VER < 576279 - return base::IntToString(*val); -#else return base::NumberToString(*val); -#endif } template <> string BoxedValue::ValuePrinter(const void* value) { const unsigned int* val = reinterpret_cast(value); -#if BASE_VER < 576279 - return base::UintToString(*val); -#else return base::NumberToString(*val); -#endif } template <> string BoxedValue::ValuePrinter(const void* value) { const int64_t* val = reinterpret_cast(value); -#if BASE_VER < 576279 - return base::Int64ToString(*val); -#else return base::NumberToString(*val); -#endif } template <> string BoxedValue::ValuePrinter(const void* value) { const uint64_t* val = reinterpret_cast(value); -#if BASE_VER < 576279 - return base::Uint64ToString(*val); -#else return base::NumberToString(*val); -#endif } template <> @@ -97,11 +81,7 @@ string BoxedValue::ValuePrinter(const void* value) { template <> string BoxedValue::ValuePrinter(const void* value) { const double* val = reinterpret_cast(value); -#if BASE_VER < 576279 - return base::DoubleToString(*val); -#else return base::NumberToString(*val); -#endif } template <> diff --git a/update_manager/evaluation_context_unittest.cc b/update_manager/evaluation_context_unittest.cc index a50defd3..cd0b2e6c 100644 --- a/update_manager/evaluation_context_unittest.cc +++ b/update_manager/evaluation_context_unittest.cc @@ -211,11 +211,7 @@ TEST_F(UmEvaluationContextTest, RunOnValueChangeOrTimeoutWithoutVariables) { fake_const_var_.reset(new string("Hello world!")); EXPECT_EQ(*eval_ctx_->GetValue(&fake_const_var_), "Hello world!"); -#if BASE_VER < 576279 - EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(Bind(&base::DoNothing))); -#else EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(base::DoNothing())); -#endif } // Test that reevaluation occurs when an async variable it depends on changes. @@ -285,19 +281,11 @@ TEST_F(UmEvaluationContextTest, RunOnValueChangeOrTimeoutExpires) { EXPECT_TRUE(value); // Ensure that we cannot reschedule an evaluation. -#if BASE_VER < 576279 - EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(Bind(&base::DoNothing))); -#else EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(base::DoNothing())); -#endif // Ensure that we can reschedule an evaluation after resetting expiration. eval_ctx_->ResetExpiration(); -#if BASE_VER < 576279 - EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(Bind(&base::DoNothing))); -#else EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(base::DoNothing())); -#endif } // Test that we clear the events when destroying the EvaluationContext. @@ -343,11 +331,7 @@ TEST_F(UmEvaluationContextTest, ObjectDeletedWithPendingEventsTest) { fake_poll_var_.reset(new string("Polled value")); eval_ctx_->GetValue(&fake_async_var_); eval_ctx_->GetValue(&fake_poll_var_); -#if BASE_VER < 576279 - EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(Bind(&base::DoNothing))); -#else EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(base::DoNothing())); -#endif // TearDown() checks for leaked observers on this async_variable, which means // that our object is still alive after removing its reference. } @@ -439,11 +423,7 @@ TEST_F(UmEvaluationContextTest, // The "false" from IsWallclockTimeGreaterThan means that's not that timestamp // yet, so this should schedule a callback for when that happens. -#if BASE_VER < 576279 - EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(Bind(&base::DoNothing))); -#else EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(base::DoNothing())); -#endif } TEST_F(UmEvaluationContextTest, @@ -453,11 +433,7 @@ TEST_F(UmEvaluationContextTest, // The "false" from IsMonotonicTimeGreaterThan means that's not that timestamp // yet, so this should schedule a callback for when that happens. -#if BASE_VER < 576279 - EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(Bind(&base::DoNothing))); -#else EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(base::DoNothing())); -#endif } TEST_F(UmEvaluationContextTest, @@ -470,11 +446,7 @@ TEST_F(UmEvaluationContextTest, fake_clock_.GetWallclockTime() - TimeDelta::FromSeconds(1))); // Callback should not be scheduled. -#if BASE_VER < 576279 - EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(Bind(&base::DoNothing))); -#else EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(base::DoNothing())); -#endif } TEST_F(UmEvaluationContextTest, @@ -487,11 +459,7 @@ TEST_F(UmEvaluationContextTest, fake_clock_.GetMonotonicTime() - TimeDelta::FromSeconds(1))); // Callback should not be scheduled. -#if BASE_VER < 576279 - EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(Bind(&base::DoNothing))); -#else EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(base::DoNothing())); -#endif } TEST_F(UmEvaluationContextTest, DumpContext) { diff --git a/update_status_utils.cc b/update_status_utils.cc index 639dc8be..f88bb1aa 100644 --- a/update_status_utils.cc +++ b/update_status_utils.cc @@ -74,19 +74,11 @@ const char* UpdateStatusToString(const UpdateStatus& status) { string UpdateEngineStatusToString(const UpdateEngineStatus& status) { KeyValueStore key_value_store; -#if BASE_VER < 576279 - key_value_store.SetString(kLastCheckedTime, - base::Int64ToString(status.last_checked_time)); - key_value_store.SetString(kProgress, base::DoubleToString(status.progress)); - key_value_store.SetString(kNewSize, - base::Uint64ToString(status.new_size_bytes)); -#else key_value_store.SetString(kLastCheckedTime, base::NumberToString(status.last_checked_time)); key_value_store.SetString(kProgress, base::NumberToString(status.progress)); key_value_store.SetString(kNewSize, base::NumberToString(status.new_size_bytes)); -#endif key_value_store.SetString(kCurrentOp, UpdateStatusToString(status.status)); key_value_store.SetString(kNewVersion, status.new_version); key_value_store.SetBoolean(kIsEnterpriseRollback, From e59faa410424f740bc5b9c4e098d4d98c8902957 Mon Sep 17 00:00:00 2001 From: Jae Hoon Kim Date: Mon, 27 Jan 2020 10:46:36 -0800 Subject: [PATCH 201/624] update_engine: Log invalid Omaha _eol_date attribute Log may be turned back on. BUG=b:142823480 TEST=# CQ Change-Id: Ie562162b36faff3ea81ce9c65cbd9abc19fc43b3 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2023386 Tested-by: Jae Hoon Kim Auto-Submit: Jae Hoon Kim Reviewed-by: Amin Hassani Commit-Queue: Amin Hassani --- omaha_utils.cc | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/omaha_utils.cc b/omaha_utils.cc index 54e9fc0e..1ef8049a 100644 --- a/omaha_utils.cc +++ b/omaha_utils.cc @@ -30,9 +30,7 @@ std::string EolDateToString(EolDate eol_date) { EolDate StringToEolDate(const std::string& eol_date) { EolDate date = kEolDateInvalid; if (!base::StringToInt64(eol_date, &date)) { - // TODO(b/142823480): Once Omaha is passing _eol_date attribute, this log - // may be turned back on. - // LOG(WARNING) << "Invalid EOL date attribute: " << eol_date; + LOG(WARNING) << "Invalid EOL date attribute: " << eol_date; return kEolDateInvalid; } return date; From bae2784f42f598d57b0155cc9336a73782c8def0 Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Thu, 24 Oct 2019 16:56:12 -0700 Subject: [PATCH 202/624] Support sideload full update on VAB On devices with Virtual A/B, certain regions are snapshotted during the udpate. Snapshots may fail to be created in recovery because fiemap of userdata cannot be retrieved (it may succeed if empty space in super is enough to hold CoW, although this is likely not the case for launch VAB). In this case (on VAB devices when super is not big enough, esp. for launch VAB devices): - sideloading incremental OTAs are not allowed - sideloading full OTAs is similar to flashing. Source partitions are deleted from super. Note that to reduce the difference between launch and retrofit VAB devices, most code only checks GetVirtualAbFeatureFlag().IsEnabled(). IsLaunch() is only used when doing sanity checks and providing more detailed logging information. Test: manually make PrepareSnapshotPartitionsForUpdate fail, then sideload Test: update_engine_unittests Bug: 140749209 Change-Id: I64927c85bfb10b34d8bd19bd88c18663f4a2a917 --- dynamic_partition_control_android.cc | 92 +++++++++++++++++++++++++--- dynamic_partition_control_android.h | 19 ++++-- 2 files changed, 97 insertions(+), 14 deletions(-) diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc index 881ff111..072a3ecb 100644 --- a/dynamic_partition_control_android.cc +++ b/dynamic_partition_control_android.cc @@ -68,8 +68,14 @@ constexpr std::chrono::milliseconds kMapTimeout{1000}; // needs to be mapped, this timeout is longer than |kMapTimeout|. constexpr std::chrono::milliseconds kMapSnapshotTimeout{5000}; +#ifdef __ANDROID_RECOVERY__ +constexpr bool kIsRecovery = true; +#else +constexpr bool kIsRecovery = false; +#endif + DynamicPartitionControlAndroid::~DynamicPartitionControlAndroid() { - CleanupInternal(); + Cleanup(); } static FeatureFlag GetFeatureFlag(const char* enable_prop, @@ -234,8 +240,7 @@ bool DynamicPartitionControlAndroid::UnmapPartitionOnDeviceMapper( return true; } -void DynamicPartitionControlAndroid::CleanupInternal() { - metadata_device_.reset(); +void DynamicPartitionControlAndroid::UnmapAllPartitions() { if (mapped_devices_.empty()) { return; } @@ -249,7 +254,8 @@ void DynamicPartitionControlAndroid::CleanupInternal() { } void DynamicPartitionControlAndroid::Cleanup() { - CleanupInternal(); + UnmapAllPartitions(); + metadata_device_.reset(); } bool DynamicPartitionControlAndroid::DeviceExists(const std::string& path) { @@ -419,28 +425,53 @@ bool DynamicPartitionControlAndroid::PreparePartitionsForUpdate( if (!update) return true; + bool delete_source = false; + if (GetVirtualAbFeatureFlag().IsEnabled()) { // On Virtual A/B device, either CancelUpdate() or BeginUpdate() must be // called before calling UnmapUpdateSnapshot. // - If target_supports_snapshot_, PrepareSnapshotPartitionsForUpdate() // calls BeginUpdate() which resets update state - // - If !target_supports_snapshot_, explicitly CancelUpdate(). + // - If !target_supports_snapshot_ or PrepareSnapshotPartitionsForUpdate + // failed in recovery, explicitly CancelUpdate(). if (target_supports_snapshot_) { - return PrepareSnapshotPartitionsForUpdate( - source_slot, target_slot, manifest, required_size); + if (PrepareSnapshotPartitionsForUpdate( + source_slot, target_slot, manifest, required_size)) { + return true; + } + + // Virtual A/B device doing Virtual A/B update in Android mode must use + // snapshots. + if (!IsRecovery()) { + LOG(ERROR) << "PrepareSnapshotPartitionsForUpdate failed in Android " + << "mode"; + return false; + } + + delete_source = true; + LOG(INFO) << "PrepareSnapshotPartitionsForUpdate failed in recovery. " + << "Attempt to overwrite existing partitions if possible"; + } else { + // Downgrading to an non-Virtual A/B build or is secondary OTA. + LOG(INFO) << "Using regular A/B on Virtual A/B because package disabled " + << "snapshots."; } + if (!snapshot_->CancelUpdate()) { LOG(ERROR) << "Cannot cancel previous update."; return false; } } - return PrepareDynamicPartitionsForUpdate(source_slot, target_slot, manifest); + + return PrepareDynamicPartitionsForUpdate( + source_slot, target_slot, manifest, delete_source); } bool DynamicPartitionControlAndroid::PrepareDynamicPartitionsForUpdate( uint32_t source_slot, uint32_t target_slot, - const DeltaArchiveManifest& manifest) { + const DeltaArchiveManifest& manifest, + bool delete_source) { const std::string target_suffix = SlotSuffixForSlotNumber(target_slot); // Unmap all the target dynamic partitions because they would become @@ -468,6 +499,11 @@ bool DynamicPartitionControlAndroid::PrepareDynamicPartitionsForUpdate( return false; } + if (delete_source) { + TEST_AND_RETURN_FALSE( + DeleteSourcePartitions(builder.get(), source_slot, manifest)); + } + if (!UpdatePartitionMetadata(builder.get(), target_slot, manifest)) { return false; } @@ -585,7 +621,7 @@ bool DynamicPartitionControlAndroid::UpdatePartitionMetadata( } bool DynamicPartitionControlAndroid::FinishUpdate() { - if (GetVirtualAbFeatureFlag().IsEnabled() && target_supports_snapshot_) { + if (snapshot_->GetUpdateState() == UpdateState::Initiated) { LOG(INFO) << "Snapshot writes are done."; return snapshot_->FinishedSnapshotWrites(); } @@ -713,4 +749,40 @@ ErrorCode DynamicPartitionControlAndroid::CleanupSuccessfulUpdate() { return ErrorCode::kDeviceCorrupted; } +bool DynamicPartitionControlAndroid::IsRecovery() { + return kIsRecovery; +} + +static bool IsIncrementalUpdate(const DeltaArchiveManifest& manifest) { + const auto& partitions = manifest.partitions(); + return std::any_of(partitions.begin(), partitions.end(), [](const auto& p) { + return p.has_old_partition_info(); + }); +} + +bool DynamicPartitionControlAndroid::DeleteSourcePartitions( + MetadataBuilder* builder, + uint32_t source_slot, + const DeltaArchiveManifest& manifest) { + TEST_AND_RETURN_FALSE(IsRecovery()); + + if (IsIncrementalUpdate(manifest)) { + LOG(ERROR) << "Cannot sideload incremental OTA because snapshots cannot " + << "be created."; + if (GetVirtualAbFeatureFlag().IsLaunch()) { + LOG(ERROR) << "Sideloading incremental updates on devices launches " + << " Virtual A/B is not supported."; + } + return false; + } + + LOG(INFO) << "Will overwrite existing partitions. Slot " + << BootControlInterface::SlotName(source_slot) + << "may be unbootable until update finishes!"; + const std::string source_suffix = SlotSuffixForSlotNumber(source_slot); + DeleteGroupsWithSuffix(builder, source_suffix); + + return true; +} + } // namespace chromeos_update_engine diff --git a/dynamic_partition_control_android.h b/dynamic_partition_control_android.h index ba23e7c6..e7ae26b4 100644 --- a/dynamic_partition_control_android.h +++ b/dynamic_partition_control_android.h @@ -127,10 +127,13 @@ class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { virtual void set_fake_mapped_devices(const std::set& fake); + // Allow mock objects to override this to test recovery mode. + virtual bool IsRecovery(); + private: friend class DynamicPartitionControlAndroidTest; - void CleanupInternal(); + void UnmapAllPartitions(); bool MapPartitionInternal(const std::string& super_device, const std::string& target_partition_name, uint32_t slot, @@ -143,11 +146,14 @@ class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { uint32_t target_slot, const DeltaArchiveManifest& manifest); - // Helper for PreparePartitionsForUpdate. Used for dynamic partitions without - // Virtual A/B update. + // Helper for PreparePartitionsForUpdate. Used for devices with dynamic + // partitions updating without snapshots. + // If |delete_source| is set, source partitions are deleted before resizing + // target partitions (using DeleteSourcePartitions). bool PrepareDynamicPartitionsForUpdate(uint32_t source_slot, uint32_t target_slot, - const DeltaArchiveManifest& manifest); + const DeltaArchiveManifest& manifest, + bool delete_source); // Helper for PreparePartitionsForUpdate. Used for snapshotted partitions for // Virtual A/B update. @@ -179,6 +185,11 @@ class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { uint32_t current_slot, const std::string& partition_name_suffix); + // If sideloading a full OTA, delete source partitions from |builder|. + bool DeleteSourcePartitions(android::fs_mgr::MetadataBuilder* builder, + uint32_t source_slot, + const DeltaArchiveManifest& manifest); + std::set mapped_devices_; const FeatureFlag dynamic_partitions_; const FeatureFlag virtual_ab_; From d66ecf171c91a456d84d8da086530dd37a31066a Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Tue, 4 Feb 2020 11:15:50 -0800 Subject: [PATCH 203/624] Fix nullptr segv snapshot_ is only initialized when VAB feature flag is set. Fixes: 148771898 Test: TH Change-Id: I5552cf19213820c267cb5f48cd923144474a22a1 --- dynamic_partition_control_android.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc index 072a3ecb..938a75bc 100644 --- a/dynamic_partition_control_android.cc +++ b/dynamic_partition_control_android.cc @@ -621,7 +621,8 @@ bool DynamicPartitionControlAndroid::UpdatePartitionMetadata( } bool DynamicPartitionControlAndroid::FinishUpdate() { - if (snapshot_->GetUpdateState() == UpdateState::Initiated) { + if (GetVirtualAbFeatureFlag().IsEnabled() && + snapshot_->GetUpdateState() == UpdateState::Initiated) { LOG(INFO) << "Snapshot writes are done."; return snapshot_->FinishedSnapshotWrites(); } From 212d795e71224b06143f3a3b62bbb1ef79dfc1b7 Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Tue, 4 Feb 2020 11:08:08 -0800 Subject: [PATCH 204/624] Temporarily add libutilscallstack dependency ... to update_engine. Bug: 148818798 Test: run OTA and reboot Change-Id: Ib80c74a9adbc269295620091474626290f5d2325 --- Android.bp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/Android.bp b/Android.bp index 01ea4fa4..166ed397 100644 --- a/Android.bp +++ b/Android.bp @@ -208,6 +208,9 @@ cc_defaults { "libutils", "android.hardware.boot@1.0", "android.hardware.boot@1.1", + + //TODO(b/148818798): remove when parent bug fixed + "libutilscallstack", ], target: { recovery: { @@ -219,6 +222,9 @@ cc_defaults { "libfs_mgr_binder", "libsnapshot", ], + exclude_shared_libs: [ + "libutilscallstack", + ], }, }, } From e045aef54f45794ce5d1b138f83199258dfc166f Mon Sep 17 00:00:00 2001 From: Andrew Date: Wed, 8 Jan 2020 16:29:22 -0800 Subject: [PATCH 205/624] update_engine: Add ping for DLCs in update_engine Send ping to omaha with the metadata values 'active','date_last_active' and 'date_last_rollcall'. Update engine resets the 'active' flag after succesfully sending a ping to Omaha. The 'date_last_active' value is sent and updated only when the DLC was active since the previous ping. 'date_last_rollcall' is sent on every ping. BUG=chromium:912666 TEST=unittests TEST=Test on DUT using Nebraska and forcing ping values by changing the metadata files in /var/lib/dlc/dummy-dlc/. Installed dlc using:dlcservice_util --dlc_ids="dummy-dlc" --install Trigger the pings by calling: update_engine_client --check_for_update Change-Id: I47eff8c7923f5b3a7e892c281933c9a12b619ee7 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2001095 Tested-by: Andrew Lassalle Commit-Queue: Andrew Lassalle Reviewed-by: Amin Hassani --- common/constants.cc | 8 ++ common/constants.h | 7 ++ omaha_request_action.cc | 64 ++++++++++++- omaha_request_action.h | 4 + omaha_request_action_unittest.cc | 131 ++++++++++++++++++++++++-- omaha_request_builder_xml.cc | 74 +++++++++++---- omaha_request_builder_xml.h | 16 +++- omaha_request_builder_xml_unittest.cc | 75 ++++++++++++++- omaha_request_params.cc | 9 +- omaha_request_params.h | 43 +++++++-- update_attempter.cc | 79 ++++++++++++++-- update_attempter.h | 16 ++++ update_attempter_unittest.cc | 123 ++++++++++++++++++++++++ 13 files changed, 595 insertions(+), 54 deletions(-) diff --git a/common/constants.cc b/common/constants.cc index d779dd44..58cf1b3d 100644 --- a/common/constants.cc +++ b/common/constants.cc @@ -18,6 +18,9 @@ namespace chromeos_update_engine { +// Keep this in sync with the one in dlcservice. +const char kDlcMetadataRootpath[] = "/var/lib/dlc/"; + const char kPowerwashSafePrefsSubDirectory[] = "update_engine/prefs"; const char kPrefsSubDirectory[] = "prefs"; @@ -60,6 +63,11 @@ const char kPrefsP2PEnabled[] = "p2p-enabled"; const char kPrefsP2PFirstAttemptTimestamp[] = "p2p-first-attempt-timestamp"; const char kPrefsP2PNumAttempts[] = "p2p-num-attempts"; const char kPrefsPayloadAttemptNumber[] = "payload-attempt-number"; +// Keep |kPrefsPingActive| in sync with |kDlcMetadataFilePingActive| in +// dlcservice. +const char kPrefsPingActive[] = "active"; +const char kPrefsPingLastActive[] = "date_last_active"; +const char kPrefsPingLastRollcall[] = "date_last_rollcall"; const char kPrefsPostInstallSucceeded[] = "post-install-succeeded"; const char kPrefsPreviousVersion[] = "previous-version"; const char kPrefsResumedUpdateFailures[] = "resumed-update-failures"; diff --git a/common/constants.h b/common/constants.h index 8685f7e7..44b20b0f 100644 --- a/common/constants.h +++ b/common/constants.h @@ -19,6 +19,10 @@ namespace chromeos_update_engine { +// The root path of all DLC modules metadata. +// Keep this in sync with the one in dlcservice. +extern const char kDlcMetadataRootpath[]; + // Directory for AU prefs that are preserved across powerwash. extern const char kPowerwashSafePrefsSubDirectory[]; @@ -61,6 +65,9 @@ extern const char kPrefsP2PEnabled[]; extern const char kPrefsP2PFirstAttemptTimestamp[]; extern const char kPrefsP2PNumAttempts[]; extern const char kPrefsPayloadAttemptNumber[]; +extern const char kPrefsPingActive[]; +extern const char kPrefsPingLastActive[]; +extern const char kPrefsPingLastRollcall[]; extern const char kPrefsPostInstallSucceeded[]; extern const char kPrefsPreviousVersion[]; extern const char kPrefsResumedUpdateFailures[]; diff --git a/omaha_request_action.cc b/omaha_request_action.cc index f25f8ee4..b6b43560 100644 --- a/omaha_request_action.cc +++ b/omaha_request_action.cc @@ -26,6 +26,7 @@ #include #include +#include #include #include #include @@ -43,6 +44,7 @@ #include "update_engine/common/hardware_interface.h" #include "update_engine/common/hash_calculator.h" #include "update_engine/common/platform_constants.h" +#include "update_engine/common/prefs.h" #include "update_engine/common/prefs_interface.h" #include "update_engine/common/utils.h" #include "update_engine/connection_manager_interface.h" @@ -299,7 +301,7 @@ OmahaRequestAction::~OmahaRequestAction() {} // Calculates the value to use for the ping days parameter. int OmahaRequestAction::CalculatePingDays(const string& key) { - int days = kNeverPinged; + int days = kPingNeverPinged; int64_t last_ping = 0; if (system_state_->prefs()->GetInt64(key, &last_ping) && last_ping >= 0) { days = (Time::Now() - Time::FromInternalValue(last_ping)).InDays(); @@ -330,8 +332,8 @@ void OmahaRequestAction::InitPingDays() { } bool OmahaRequestAction::ShouldPing() const { - if (ping_active_days_ == kNeverPinged && - ping_roll_call_days_ == kNeverPinged) { + if (ping_active_days_ == kPingNeverPinged && + ping_roll_call_days_ == kPingNeverPinged) { int powerwash_count = system_state_->hardware()->GetPowerwashCount(); if (powerwash_count > 0) { LOG(INFO) << "Not sending ping with a=-1 r=-1 to omaha because " @@ -412,6 +414,59 @@ int OmahaRequestAction::GetInstallDate(SystemState* system_state) { return num_days; } +// static +void OmahaRequestAction::StorePingReply( + const OmahaParserData& parser_data) const { + for (const auto& app : parser_data.apps) { + auto it = params_->dlc_apps_params().find(app.id); + if (it == params_->dlc_apps_params().end()) + continue; + + const OmahaRequestParams::AppParams& dlc_params = it->second; + + // Skip if the ping for this DLC was not sent. + if (!dlc_params.send_ping) + continue; + + base::FilePath metadata_path = + base::FilePath(params_->dlc_prefs_root()).Append(dlc_params.name); + if (!base::PathExists(metadata_path)) { + LOG(ERROR) << "Metadata path (" << metadata_path.value() << ") " + << "doesn't exist."; + // Skip this DLC if the metadata directory is missing. + continue; + } + + Prefs prefs; + if (!prefs.Init(metadata_path)) { + LOG(ERROR) << "Failed to initialize the preferences path:" + << metadata_path.value() << "."; + continue; + } + // Reset the active metadata value to |kPingInactiveValue|. + // Only write into this file if the file exists, otherwise the file will be + // created with different owner/permissions. + if (prefs.Exists(kPrefsPingActive) && + !prefs.SetInt64(kPrefsPingActive, kPingInactiveValue)) + LOG(ERROR) << "Failed to set the value of ping metadata '" + << kPrefsPingActive << "'."; + + if (!prefs.SetString(kPrefsPingLastRollcall, + parser_data.daystart_elapsed_days)) + LOG(ERROR) << "Failed to set the value of ping metadata '" + << kPrefsPingLastRollcall << "'."; + + if (dlc_params.ping_active) { + // Write the value of elapsed_days into |kPrefsPingLastActive| only if + // the previous ping was an active one. + if (!prefs.SetString(kPrefsPingLastActive, + parser_data.daystart_elapsed_days)) + LOG(ERROR) << "Failed to set the value of ping metadata '" + << kPrefsPingLastActive << "'."; + } + } +} + void OmahaRequestAction::PerformAction() { http_fetcher_->set_delegate(this); InitPingDays(); @@ -922,6 +977,9 @@ void OmahaRequestAction::TransferComplete(HttpFetcher* fetcher, } } + // Create/update the metadata files for each DLC app received. + StorePingReply(parser_data); + if (!HasOutputPipe()) { // Just set success to whether or not the http transfer succeeded, // which must be true at this point in the code. diff --git a/omaha_request_action.h b/omaha_request_action.h index 3f66de9b..623a704e 100644 --- a/omaha_request_action.h +++ b/omaha_request_action.h @@ -200,6 +200,10 @@ class OmahaRequestAction : public Action, // send to Omaha and thus we should include them in the response. bool ShouldPing() const; + // Process Omaha's response to a ping request and store the results in the DLC + // metadata directory. + void StorePingReply(const OmahaParserData& parser_data) const; + // Returns true if the download of a new update should be deferred. // False if the update can be downloaded. bool ShouldDeferDownload(OmahaResponse* output_object); diff --git a/omaha_request_action_unittest.cc b/omaha_request_action_unittest.cc index 7b676e25..d1cb4ed1 100644 --- a/omaha_request_action_unittest.cc +++ b/omaha_request_action_unittest.cc @@ -35,6 +35,7 @@ #include #include #include +#include #include #include #include @@ -347,7 +348,7 @@ class OmahaRequestActionTest : public ::testing::Test { request_params_.set_rollback_allowed(false); request_params_.set_is_powerwash_allowed(false); request_params_.set_is_install(false); - request_params_.set_dlc_module_ids({}); + request_params_.set_dlc_apps_params({}); fake_system_state_.set_request_params(&request_params_); fake_system_state_.set_prefs(&fake_prefs_); @@ -367,8 +368,8 @@ class OmahaRequestActionTest : public ::testing::Test { }; } - // This function uses the paramets in |tuc_params_| to do an update check. It - // will fill out |post_str| with the result data and |response| with + // This function uses the parameters in |tuc_params_| to do an update check. + // It will fill out |post_str| with the result data and |response| with // |OmahaResponse|. Returns true iff an output response was obtained from the // |OmahaRequestAction|. If |fail_http_response_code| is non-negative, the // transfer will fail with that code. |ping_only| is passed through to the @@ -405,6 +406,12 @@ class OmahaRequestActionTest : public ::testing::Test { bool expected_allow_p2p_for_sharing, const string& expected_p2p_url); + // Helper function used to test the Ping request. + // Create the test directory and setup the Omaha response. + void SetUpStorePingReply(const string& dlc_id, + base::FilePath* metadata_path_dlc, + base::ScopedTempDir* tempdir); + FakeSystemState fake_system_state_; FakeUpdateResponse fake_update_response_; // Used by all tests. @@ -2660,15 +2667,15 @@ TEST_F(OmahaRequestActionTest, TEST_F(OmahaRequestActionTest, InstallTest) { request_params_.set_is_install(true); - request_params_.set_dlc_module_ids({"dlc_no_0", "dlc_no_1"}); + request_params_.set_dlc_apps_params( + {{request_params_.GetDlcAppId("dlc_no_0"), {.name = "dlc_no_0"}}, + {request_params_.GetDlcAppId("dlc_no_1"), {.name = "dlc_no_1"}}}); tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); ASSERT_TRUE(TestUpdateCheck()); - for (const auto& dlc_module_id : request_params_.dlc_module_ids()) { - EXPECT_NE(string::npos, - post_str.find("appid=\"" + fake_update_response_.app_id + "_" + - dlc_module_id + "\"")); + for (const auto& it : request_params_.dlc_apps_params()) { + EXPECT_NE(string::npos, post_str.find("appid=\"" + it.first + "\"")); } EXPECT_NE(string::npos, post_str.find("appid=\"" + fake_update_response_.app_id + "\"")); @@ -2680,14 +2687,16 @@ TEST_F(OmahaRequestActionTest, InstallTest) { updatecheck_count++; pos++; } - EXPECT_EQ(request_params_.dlc_module_ids().size(), updatecheck_count); + EXPECT_EQ(request_params_.dlc_apps_params().size(), updatecheck_count); } TEST_F(OmahaRequestActionTest, InstallMissingPlatformVersionTest) { fake_update_response_.multi_app_skip_updatecheck = true; fake_update_response_.multi_app_no_update = false; request_params_.set_is_install(true); - request_params_.set_dlc_module_ids({"dlc_no_0", "dlc_no_1"}); + request_params_.set_dlc_apps_params( + {{request_params_.GetDlcAppId("dlc_no_0"), {.name = "dlc_no_0"}}, + {request_params_.GetDlcAppId("dlc_no_1"), {.name = "dlc_no_1"}}}); request_params_.set_app_id(fake_update_response_.app_id_skip_updatecheck); tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); @@ -2839,4 +2848,106 @@ TEST_F(OmahaRequestActionTest, PersistEolBadDateTest) { EXPECT_EQ(kEolDateInvalid, StringToEolDate(eol_date)); } +void OmahaRequestActionTest::SetUpStorePingReply( + const string& dlc_id, + base::FilePath* metadata_path_dlc, + base::ScopedTempDir* tempdir) { + // Create a uniquely named test directory. + ASSERT_TRUE(tempdir->CreateUniqueTempDir()); + request_params_.set_root(tempdir->GetPath().value()); + *metadata_path_dlc = + base::FilePath(request_params_.dlc_prefs_root()).Append(dlc_id); + ASSERT_TRUE(base::CreateDirectory(*metadata_path_dlc)); + + tuc_params_.http_response = + "\"" + "" + ""; + tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; +} + +TEST_F(OmahaRequestActionTest, StorePingReplyNoPing) { + string dlc_id = "dlc0"; + base::FilePath metadata_path_dlc0; + base::ScopedTempDir tempdir; + SetUpStorePingReply(dlc_id, &metadata_path_dlc0, &tempdir); + int64_t temp_int; + Prefs prefs; + ASSERT_TRUE(prefs.Init(metadata_path_dlc0)); + + OmahaRequestParams::AppParams app_param = {.name = dlc_id}; + request_params_.set_dlc_apps_params( + {{request_params_.GetDlcAppId(dlc_id), app_param}}); + + ASSERT_TRUE(TestUpdateCheck()); + // If there was no ping, the metadata files shouldn't exist yet. + EXPECT_FALSE(prefs.GetInt64(kPrefsPingActive, &temp_int)); + EXPECT_FALSE(prefs.GetInt64(kPrefsPingLastActive, &temp_int)); + EXPECT_FALSE(prefs.GetInt64(kPrefsPingLastRollcall, &temp_int)); +} + +TEST_F(OmahaRequestActionTest, StorePingReplyActiveTest) { + string dlc_id = "dlc0"; + base::FilePath metadata_path_dlc0; + base::ScopedTempDir tempdir; + SetUpStorePingReply(dlc_id, &metadata_path_dlc0, &tempdir); + int64_t temp_int; + Prefs prefs; + ASSERT_TRUE(prefs.Init(metadata_path_dlc0)); + // Create Active value + prefs.SetInt64(kPrefsPingActive, 0); + + OmahaRequestParams::AppParams app_param = { + .active_counting_type = OmahaRequestParams::kDateBased, + .name = dlc_id, + .ping_active = 1, + .send_ping = true}; + request_params_.set_dlc_apps_params( + {{request_params_.GetDlcAppId(dlc_id), app_param}}); + + ASSERT_TRUE(TestUpdateCheck()); + EXPECT_TRUE(prefs.GetInt64(kPrefsPingActive, &temp_int)); + EXPECT_EQ(temp_int, kPingInactiveValue); + EXPECT_TRUE(prefs.GetInt64(kPrefsPingLastActive, &temp_int)); + EXPECT_EQ(temp_int, 4763); + EXPECT_TRUE(prefs.GetInt64(kPrefsPingLastRollcall, &temp_int)); + EXPECT_EQ(temp_int, 4763); +} + +TEST_F(OmahaRequestActionTest, StorePingReplyInactiveTest) { + string dlc_id = "dlc0"; + base::FilePath metadata_path_dlc0; + base::ScopedTempDir tempdir; + SetUpStorePingReply(dlc_id, &metadata_path_dlc0, &tempdir); + int64_t temp_int; + Prefs prefs; + ASSERT_TRUE(prefs.Init(metadata_path_dlc0)); + // Create Active value + prefs.SetInt64(kPrefsPingActive, 0); + + OmahaRequestParams::AppParams app_param = { + .active_counting_type = OmahaRequestParams::kDateBased, + .name = dlc_id, + .ping_active = 0, + .send_ping = true}; + request_params_.set_dlc_apps_params( + {{request_params_.GetDlcAppId(dlc_id), app_param}}); + + // Set the previous active value to an older value than 4763. + prefs.SetInt64(kPrefsPingLastActive, 555); + + ASSERT_TRUE(TestUpdateCheck()); + ASSERT_TRUE(prefs.Init(metadata_path_dlc0)); + EXPECT_TRUE(prefs.GetInt64(kPrefsPingActive, &temp_int)); + EXPECT_EQ(temp_int, kPingInactiveValue); + EXPECT_TRUE(prefs.GetInt64(kPrefsPingLastActive, &temp_int)); + EXPECT_EQ(temp_int, 555); + EXPECT_TRUE(prefs.GetInt64(kPrefsPingLastRollcall, &temp_int)); + EXPECT_EQ(temp_int, 4763); +} + } // namespace chromeos_update_engine diff --git a/omaha_request_builder_xml.cc b/omaha_request_builder_xml.cc index 8439b422..e2bf307a 100644 --- a/omaha_request_builder_xml.cc +++ b/omaha_request_builder_xml.cc @@ -36,8 +36,11 @@ using std::string; namespace chromeos_update_engine { -const int kNeverPinged = -1; const char kNoVersion[] = "0.0.0.0"; +const int kPingNeverPinged = -1; +const int kPingUnknownValue = -2; +const int kPingActiveValue = 1; +const int kPingInactiveValue = 0; bool XmlEncode(const string& input, string* output) { if (std::find_if(input.begin(), input.end(), [](const char c) { @@ -87,7 +90,7 @@ string OmahaRequestBuilderXml::GetPing() const { // |name| and value |ping_days| if |ping_days| has a value that needs // to be sent, or an empty string otherwise. auto GetPingAttribute = [](const char* name, int ping_days) -> string { - if (ping_days > 0 || ping_days == kNeverPinged) + if (ping_days > 0 || ping_days == kPingNeverPinged) return base::StringPrintf(" %s=\"%d\"", name, ping_days); return ""; }; @@ -102,13 +105,45 @@ string OmahaRequestBuilderXml::GetPing() const { return ""; } -string OmahaRequestBuilderXml::GetAppBody(bool skip_updatecheck) const { +string OmahaRequestBuilderXml::GetPingDateBased( + const OmahaRequestParams::AppParams& app_params) const { + if (!app_params.send_ping) + return ""; + string ping_active = ""; + string ping_ad = ""; + if (app_params.ping_active == kPingActiveValue) { + ping_active = + base::StringPrintf(" active=\"%" PRId64 "\"", app_params.ping_active); + ping_ad = base::StringPrintf(" ad=\"%" PRId64 "\"", + app_params.ping_date_last_active); + } + + string ping_rd = base::StringPrintf(" rd=\"%" PRId64 "\"", + app_params.ping_date_last_rollcall); + + return base::StringPrintf(" \n", + ping_active.c_str(), + ping_ad.c_str(), + ping_rd.c_str()); +} + +string OmahaRequestBuilderXml::GetAppBody(const OmahaAppData& app_data) const { string app_body; if (event_ == nullptr) { - if (include_ping_) - app_body = GetPing(); + if (app_data.app_params.send_ping) { + switch (app_data.app_params.active_counting_type) { + case OmahaRequestParams::kDayBased: + app_body = GetPing(); + break; + case OmahaRequestParams::kDateBased: + app_body = GetPingDateBased(app_data.app_params); + break; + default: + NOTREACHED(); + } + } if (!ping_only_) { - if (!skip_updatecheck) { + if (!app_data.skip_update) { app_body += " target_version_prefix().empty()) { app_body += base::StringPrintf( @@ -211,7 +246,7 @@ bool IsValidComponentID(const string& id) { } string OmahaRequestBuilderXml::GetApp(const OmahaAppData& app_data) const { - string app_body = GetAppBody(app_data.skip_update); + string app_body = GetAppBody(app_data); string app_versions; // If we are downgrading to a more stable channel and we are allowed to do @@ -370,23 +405,28 @@ string OmahaRequestBuilderXml::GetApps() const { .product_components = params_->product_components(), // Skips updatecheck for platform app in case of an install operation. .skip_update = params_->is_install(), - .is_dlc = false}; + .is_dlc = false, + + .app_params = {.active_counting_type = OmahaRequestParams::kDayBased, + .send_ping = include_ping_}}; app_xml += GetApp(product_app); if (!params_->system_app_id().empty()) { - OmahaAppData system_app = {.id = params_->system_app_id(), - .version = params_->system_version(), - .skip_update = false, - .is_dlc = false}; + OmahaAppData system_app = { + .id = params_->system_app_id(), + .version = params_->system_version(), + .skip_update = false, + .is_dlc = false, + .app_params = {.active_counting_type = OmahaRequestParams::kDayBased, + .send_ping = include_ping_}}; app_xml += GetApp(system_app); } - // Create APP ID according to |dlc_module_id| (sticking the current AppID to - // the DLC module ID with an underscode). - for (const auto& dlc_module_id : params_->dlc_module_ids()) { + for (const auto& it : params_->dlc_apps_params()) { OmahaAppData dlc_module_app = { - .id = params_->GetAppId() + "_" + dlc_module_id, + .id = it.first, .version = params_->is_install() ? kNoVersion : params_->app_version(), .skip_update = false, - .is_dlc = true}; + .is_dlc = true, + .app_params = it.second}; app_xml += GetApp(dlc_module_app); } return app_xml; diff --git a/omaha_request_builder_xml.h b/omaha_request_builder_xml.h index d7a81d34..50c708da 100644 --- a/omaha_request_builder_xml.h +++ b/omaha_request_builder_xml.h @@ -33,13 +33,17 @@ #include "update_engine/common/action.h" #include "update_engine/common/http_fetcher.h" +#include "update_engine/omaha_request_params.h" #include "update_engine/omaha_response.h" #include "update_engine/system_state.h" namespace chromeos_update_engine { -extern const int kNeverPinged; extern const char kNoVersion[]; +extern const int kPingNeverPinged; +extern const int kPingUnknownValue; +extern const int kPingActiveValue; +extern const int kPingInactiveValue; // This struct encapsulates the Omaha event information. For a // complete list of defined event types and results, see @@ -87,6 +91,7 @@ struct OmahaAppData { std::string product_components; bool skip_update; bool is_dlc; + OmahaRequestParams::AppParams app_params; }; // Encodes XML entities in a given string. Input must be ASCII-7 valid. If @@ -158,9 +163,7 @@ class OmahaRequestBuilderXml : OmahaRequestBuilder { // Returns an XML that goes into the body of the element of the Omaha // request based on the given parameters. - // The skip_updatecheck argument if set to true will omit the emission of - // the updatecheck xml tag in the body of the element. - std::string GetAppBody(bool skip_updatecheck) const; + std::string GetAppBody(const OmahaAppData& app_data) const; // Returns the cohort* argument to include in the tag for the passed // |arg_name| and |prefs_key|, if any. The return value is suitable to @@ -173,6 +176,11 @@ class OmahaRequestBuilderXml : OmahaRequestBuilder { // sent, or an empty string otherwise. std::string GetPing() const; + // Returns an XML ping element if any of the elapsed days need to be + // sent, or an empty string otherwise. + std::string GetPingDateBased( + const OmahaRequestParams::AppParams& app_params) const; + const OmahaEvent* event_; OmahaRequestParams* params_; bool ping_only_; diff --git a/omaha_request_builder_xml_unittest.cc b/omaha_request_builder_xml_unittest.cc index 8cf74733..3cf5cc06 100644 --- a/omaha_request_builder_xml_unittest.cc +++ b/omaha_request_builder_xml_unittest.cc @@ -198,7 +198,9 @@ TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlPlatformUpdateTest) { TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlPlatformUpdateWithDlcsTest) { OmahaRequestParams omaha_request_params{&fake_system_state_}; - omaha_request_params.set_dlc_module_ids({"dlc_1", "dlc_2"}); + omaha_request_params.set_dlc_apps_params( + {{omaha_request_params.GetDlcAppId("dlc_no_0"), {.name = "dlc_no_0"}}, + {omaha_request_params.GetDlcAppId("dlc_no_1"), {.name = "dlc_no_1"}}}); OmahaRequestBuilderXml omaha_request{nullptr, &omaha_request_params, false, @@ -215,8 +217,10 @@ TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlPlatformUpdateWithDlcsTest) { TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlDlcInstallationTest) { OmahaRequestParams omaha_request_params{&fake_system_state_}; - const vector dlcs = {"dlc_1", "dlc_2"}; - omaha_request_params.set_dlc_module_ids(dlcs); + const std::map dlcs = { + {omaha_request_params.GetDlcAppId("dlc_no_0"), {.name = "dlc_no_0"}}, + {omaha_request_params.GetDlcAppId("dlc_no_1"), {.name = "dlc_no_1"}}}; + omaha_request_params.set_dlc_apps_params(dlcs); omaha_request_params.set_is_install(true); OmahaRequestBuilderXml omaha_request{nullptr, &omaha_request_params, @@ -250,4 +254,69 @@ TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlDlcInstallationTest) { } } +TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlDlcNoPing) { + OmahaRequestParams omaha_request_params{&fake_system_state_}; + omaha_request_params.set_dlc_apps_params( + {{omaha_request_params.GetDlcAppId("dlc_no_0"), {.name = "dlc_no_0"}}}); + OmahaRequestBuilderXml omaha_request{nullptr, + &omaha_request_params, + false, + false, + 0, + 0, + 0, + fake_system_state_.prefs(), + ""}; + const string request_xml = omaha_request.GetRequest(); + EXPECT_EQ(0, CountSubstringInString(request_xml, " +#include #include #include @@ -26,6 +27,7 @@ #include #include // for FRIEND_TEST +#include "update_engine/common/constants.h" #include "update_engine/common/platform_constants.h" #include "update_engine/image_properties.h" @@ -56,10 +58,26 @@ class OmahaRequestParams { update_check_count_wait_enabled_(false), min_update_checks_needed_(kDefaultMinUpdateChecks), max_update_checks_allowed_(kDefaultMaxUpdateChecks), + dlc_prefs_root_(kDlcMetadataRootpath), is_install_(false) {} virtual ~OmahaRequestParams(); + enum ActiveCountingType { + kDayBased = 0, + kDateBased, + }; + + struct AppParams { + ActiveCountingType active_counting_type; + // |name| is only used for DLCs to store the DLC ID. + std::string name; + int64_t ping_active; + int64_t ping_date_last_active; + int64_t ping_date_last_rollcall; + bool send_ping; + }; + // Setters and getters for the various properties. inline std::string os_platform() const { return os_platform_; } inline std::string os_version() const { return os_version_; } @@ -184,12 +202,12 @@ class OmahaRequestParams { inline int64_t max_update_checks_allowed() const { return max_update_checks_allowed_; } - inline void set_dlc_module_ids( - const std::vector& dlc_module_ids) { - dlc_module_ids_ = dlc_module_ids; + inline void set_dlc_apps_params( + const std::map& dlc_apps_params) { + dlc_apps_params_ = dlc_apps_params; } - inline std::vector dlc_module_ids() const { - return dlc_module_ids_; + inline const std::map& dlc_apps_params() const { + return dlc_apps_params_; } inline void set_is_install(bool is_install) { is_install_ = is_install; } inline bool is_install() const { return is_install_; } @@ -201,10 +219,16 @@ class OmahaRequestParams { return autoupdate_token_; } - // Returns the app id corresponding to the current value of the + inline std::string dlc_prefs_root() const { return dlc_prefs_root_; } + + // Returns the App ID corresponding to the current value of the // download channel. virtual std::string GetAppId() const; + // Returns the DLC app ID corresponding to the current value of the + // download channel. + virtual std::string GetDlcAppId(std::string dlc_id) const; + // Suggested defaults static const char kOsVersion[]; static const int64_t kDefaultMinUpdateChecks = 0; @@ -377,8 +401,11 @@ class OmahaRequestParams { // When reading files, prepend root_ to the paths. Useful for testing. std::string root_; - // A list of DLC module IDs to install. - std::vector dlc_module_ids_; + // The metadata/prefs root path for DLCs. + std::string dlc_prefs_root_; + + // A list of DLC modules to install. + std::map dlc_apps_params_; // This variable defines whether the payload is being installed in the current // partition. At the moment, this is used for installing DLC modules on the diff --git a/update_attempter.cc b/update_attempter.cc index f5e2037f..8e320913 100644 --- a/update_attempter.cc +++ b/update_attempter.cc @@ -19,6 +19,7 @@ #include #include +#include #include #include #include @@ -29,6 +30,7 @@ #include #include #include +#include #include #include #include @@ -46,6 +48,7 @@ #include "update_engine/common/dlcservice_interface.h" #include "update_engine/common/hardware_interface.h" #include "update_engine/common/platform_constants.h" +#include "update_engine/common/prefs.h" #include "update_engine/common/prefs_interface.h" #include "update_engine/common/subprocess.h" #include "update_engine/common/utils.h" @@ -70,6 +73,7 @@ using base::Bind; using base::Callback; +using base::FilePath; using base::Time; using base::TimeDelta; using base::TimeTicks; @@ -427,14 +431,13 @@ bool UpdateAttempter::CalculateUpdateParams(const string& app_version, omaha_request_params_->UpdateDownloadChannel(); } - // Set the |dlc_module_ids_| only for an update. This is required to get the - // currently installed DLC(s). - if (!is_install_ && - !system_state_->dlcservice()->GetInstalled(&dlc_module_ids_)) { - LOG(INFO) << "Failed to retrieve DLC module IDs from dlcservice. Check the " - "state of dlcservice, will not update DLC modules."; - } - omaha_request_params_->set_dlc_module_ids(dlc_module_ids_); + // The function |CalculateDlcParams| makes use of the function |GetAppId| from + // |OmahaRequestParams|, so to ensure that the return from |GetAppId| + // doesn't change, no changes to the values |download_channel_|, + // |image_props_.product_id| and |image_props_.canary_product_id| from + // |omaha_request_params_| shall be made below this line. + CalculateDlcParams(); + omaha_request_params_->set_is_install(is_install_); // Set Quick Fix Build token if policy is set and the device is enterprise @@ -653,6 +656,66 @@ void UpdateAttempter::CalculateStagingParams(bool interactive) { } } +int64_t UpdateAttempter::GetPingMetadata( + const PrefsInterface& prefs, const std::string& metadata_name) const { + // The first time a ping is sent, the metadata files containing the values + // sent back by the server still don't exist. A value of -1 is used to + // indicate this. + if (!prefs.Exists(metadata_name)) + return kPingNeverPinged; + + int64_t value; + if (prefs.GetInt64(metadata_name, &value)) + return value; + + // Return -2 when the file exists and there is a problem reading from it, or + // the value cannot be converted to an integer. + return kPingUnknownValue; +} + +void UpdateAttempter::CalculateDlcParams() { + // Set the |dlc_module_ids_| only for an update. This is required to get the + // currently installed DLC(s). + if (!is_install_ && + !system_state_->dlcservice()->GetInstalled(&dlc_module_ids_)) { + LOG(INFO) << "Failed to retrieve DLC module IDs from dlcservice. Check the " + "state of dlcservice, will not update DLC modules."; + } + std::map dlc_apps_params; + for (auto dlc_id : dlc_module_ids_) { + OmahaRequestParams::AppParams dlc_params{ + .active_counting_type = OmahaRequestParams::kDateBased, + .name = dlc_id, + .send_ping = false}; + // Only send the ping when the request is to update DLCs. When installing + // DLCs, we don't want to send the ping yet, since the DLCs might fail to + // install or might not really be active yet. + if (!is_install_) { + base::FilePath metadata_path = + base::FilePath(omaha_request_params_->dlc_prefs_root()) + .Append(dlc_id); + Prefs prefs; + if (!prefs.Init(metadata_path)) { + LOG(ERROR) << "Failed to initialize the preferences path:" + << metadata_path.value() << "."; + } else { + dlc_params.ping_active = kPingActiveValue; + if (!prefs.GetInt64(kPrefsPingActive, &dlc_params.ping_active) || + dlc_params.ping_active != kPingActiveValue) { + dlc_params.ping_active = kPingInactiveValue; + } + dlc_params.ping_date_last_active = + GetPingMetadata(prefs, kPrefsPingLastActive); + dlc_params.ping_date_last_rollcall = + GetPingMetadata(prefs, kPrefsPingLastRollcall); + dlc_params.send_ping = true; + } + } + dlc_apps_params[omaha_request_params_->GetDlcAppId(dlc_id)] = dlc_params; + } + omaha_request_params_->set_dlc_apps_params(dlc_apps_params); +} + void UpdateAttempter::BuildUpdateActions(bool interactive) { CHECK(!processor_->IsRunning()); processor_->set_delegate(this); diff --git a/update_attempter.h b/update_attempter.h index 51b672d0..91e072a2 100644 --- a/update_attempter.h +++ b/update_attempter.h @@ -249,6 +249,10 @@ class UpdateAttempter : public ActionProcessorDelegate, FRIEND_TEST(UpdateAttempterTest, ActionCompletedOmahaRequestTest); FRIEND_TEST(UpdateAttempterTest, BootTimeInUpdateMarkerFile); FRIEND_TEST(UpdateAttempterTest, BroadcastCompleteDownloadTest); + FRIEND_TEST(UpdateAttempterTest, CalculateDlcParamsInstallTest); + FRIEND_TEST(UpdateAttempterTest, CalculateDlcParamsNoPrefFilesTest); + FRIEND_TEST(UpdateAttempterTest, CalculateDlcParamsNonParseableValuesTest); + FRIEND_TEST(UpdateAttempterTest, CalculateDlcParamsValidValuesTest); FRIEND_TEST(UpdateAttempterTest, ChangeToDownloadingOnReceivedBytesTest); FRIEND_TEST(UpdateAttempterTest, CheckForInstallNotIdleFails); FRIEND_TEST(UpdateAttempterTest, CheckForUpdateAUDlcTest); @@ -430,6 +434,18 @@ class UpdateAttempter : public ActionProcessorDelegate, // Resets interactivity and forced update flags. void ResetInteractivityFlags(); + // Get the integer values from the metadata directory set in |prefs| for + // |kPrefsPingLastActive| or |kPrefsPingLastRollcall|. + // The value is equal to -2 when the value cannot be read or is not numeric. + // The value is equal to -1 the first time it is being sent, which is + // when the metadata file doesn't exist. + int64_t GetPingMetadata(const PrefsInterface& prefs, + const std::string& metadata_name) const; + + // Calculates the update parameters for DLCs. Sets the |dlc_modules_| + // parameter on the |omaha_request_params_| object. + void CalculateDlcParams(); + // Last status notification timestamp used for throttling. Use monotonic // TimeTicks to ensure that notifications are sent even if the system clock is // set back in the middle of an update. diff --git a/update_attempter_unittest.cc b/update_attempter_unittest.cc index 4aff897b..d468f56f 100644 --- a/update_attempter_unittest.cc +++ b/update_attempter_unittest.cc @@ -2278,4 +2278,127 @@ TEST_F(UpdateAttempterTest, FailedEolTest) { EXPECT_EQ(eol_date, status.eol_date); } +TEST_F(UpdateAttempterTest, CalculateDlcParamsInstallTest) { + // Create a uniquely named test directory. + base::ScopedTempDir tempdir; + ASSERT_TRUE(tempdir.CreateUniqueTempDir()); + fake_system_state_.request_params()->set_root(tempdir.GetPath().value()); + string dlc_id = "dlc0"; + base::FilePath metadata_path_dlc0 = + base::FilePath(fake_system_state_.request_params()->dlc_prefs_root()) + .Append(dlc_id); + + ASSERT_TRUE(base::CreateDirectory(metadata_path_dlc0)); + attempter_.is_install_ = true; + attempter_.dlc_module_ids_ = {dlc_id}; + attempter_.CalculateDlcParams(); + + OmahaRequestParams* params = fake_system_state_.request_params(); + EXPECT_EQ(1, params->dlc_apps_params().count(params->GetDlcAppId(dlc_id))); + OmahaRequestParams::AppParams dlc_app_params = + params->dlc_apps_params().at(params->GetDlcAppId(dlc_id)); + EXPECT_STREQ(dlc_id.c_str(), dlc_app_params.name.c_str()); + EXPECT_EQ(false, dlc_app_params.send_ping); + // When the DLC gets installed, a ping is not sent, therefore we don't store + // the values sent by Omaha. + EXPECT_FALSE( + base::PathExists(metadata_path_dlc0.Append(kPrefsPingLastActive))); + EXPECT_FALSE( + base::PathExists(metadata_path_dlc0.Append(kPrefsPingLastRollcall))); +} + +TEST_F(UpdateAttempterTest, CalculateDlcParamsNoPrefFilesTest) { + // Create a uniquely named test directory. + base::ScopedTempDir tempdir; + ASSERT_TRUE(tempdir.CreateUniqueTempDir()); + fake_system_state_.request_params()->set_root(tempdir.GetPath().value()); + string dlc_id = "dlc0"; + base::FilePath metadata_path_dlc0 = + base::FilePath(fake_system_state_.request_params()->dlc_prefs_root()) + .Append(dlc_id); + ASSERT_TRUE(base::CreateDirectory(metadata_path_dlc0)); + EXPECT_CALL(mock_dlcservice_, GetInstalled(_)) + .WillOnce( + DoAll(SetArgPointee<0>(std::vector({dlc_id})), Return(true))); + + attempter_.is_install_ = false; + attempter_.CalculateDlcParams(); + + OmahaRequestParams* params = fake_system_state_.request_params(); + EXPECT_EQ(1, params->dlc_apps_params().count(params->GetDlcAppId(dlc_id))); + OmahaRequestParams::AppParams dlc_app_params = + params->dlc_apps_params().at(params->GetDlcAppId(dlc_id)); + EXPECT_STREQ(dlc_id.c_str(), dlc_app_params.name.c_str()); + + EXPECT_EQ(true, dlc_app_params.send_ping); + EXPECT_EQ(0, dlc_app_params.ping_active); + EXPECT_EQ(-1, dlc_app_params.ping_date_last_active); + EXPECT_EQ(-1, dlc_app_params.ping_date_last_rollcall); +} + +TEST_F(UpdateAttempterTest, CalculateDlcParamsNonParseableValuesTest) { + // Create a uniquely named test directory. + base::ScopedTempDir tempdir; + ASSERT_TRUE(tempdir.CreateUniqueTempDir()); + fake_system_state_.request_params()->set_root(tempdir.GetPath().value()); + string dlc_id = "dlc0"; + base::FilePath metadata_path_dlc0 = + base::FilePath(fake_system_state_.request_params()->dlc_prefs_root()) + .Append(dlc_id); + ASSERT_TRUE(base::CreateDirectory(metadata_path_dlc0)); + EXPECT_CALL(mock_dlcservice_, GetInstalled(_)) + .WillOnce( + DoAll(SetArgPointee<0>(std::vector({dlc_id})), Return(true))); + + // Write non numeric values in the metadata files. + base::WriteFile(metadata_path_dlc0.Append(kPrefsPingActive), "z2yz", 4); + base::WriteFile(metadata_path_dlc0.Append(kPrefsPingLastActive), "z2yz", 4); + base::WriteFile(metadata_path_dlc0.Append(kPrefsPingLastRollcall), "z2yz", 4); + attempter_.is_install_ = false; + attempter_.CalculateDlcParams(); + + OmahaRequestParams* params = fake_system_state_.request_params(); + EXPECT_EQ(1, params->dlc_apps_params().count(params->GetDlcAppId(dlc_id))); + OmahaRequestParams::AppParams dlc_app_params = + params->dlc_apps_params().at(params->GetDlcAppId(dlc_id)); + EXPECT_STREQ(dlc_id.c_str(), dlc_app_params.name.c_str()); + + EXPECT_EQ(true, dlc_app_params.send_ping); + EXPECT_EQ(0, dlc_app_params.ping_active); + EXPECT_EQ(-2, dlc_app_params.ping_date_last_active); + EXPECT_EQ(-2, dlc_app_params.ping_date_last_rollcall); +} + +TEST_F(UpdateAttempterTest, CalculateDlcParamsValidValuesTest) { + // Create a uniquely named test directory. + base::ScopedTempDir tempdir; + ASSERT_TRUE(tempdir.CreateUniqueTempDir()); + fake_system_state_.request_params()->set_root(tempdir.GetPath().value()); + string dlc_id = "dlc0"; + base::FilePath metadata_path_dlc0 = + base::FilePath(fake_system_state_.request_params()->dlc_prefs_root()) + .Append(dlc_id); + ASSERT_TRUE(base::CreateDirectory(metadata_path_dlc0)); + EXPECT_CALL(mock_dlcservice_, GetInstalled(_)) + .WillOnce( + DoAll(SetArgPointee<0>(std::vector({dlc_id})), Return(true))); + + // Write numeric values in the metadata files. + base::WriteFile(metadata_path_dlc0.Append(kPrefsPingActive), "1", 1); + base::WriteFile(metadata_path_dlc0.Append(kPrefsPingLastActive), "78", 2); + base::WriteFile(metadata_path_dlc0.Append(kPrefsPingLastRollcall), "99", 2); + attempter_.is_install_ = false; + attempter_.CalculateDlcParams(); + + OmahaRequestParams* params = fake_system_state_.request_params(); + EXPECT_EQ(1, params->dlc_apps_params().count(params->GetDlcAppId(dlc_id))); + OmahaRequestParams::AppParams dlc_app_params = + params->dlc_apps_params().at(params->GetDlcAppId(dlc_id)); + EXPECT_STREQ(dlc_id.c_str(), dlc_app_params.name.c_str()); + + EXPECT_EQ(true, dlc_app_params.send_ping); + EXPECT_EQ(1, dlc_app_params.ping_active); + EXPECT_EQ(78, dlc_app_params.ping_date_last_active); + EXPECT_EQ(99, dlc_app_params.ping_date_last_rollcall); +} } // namespace chromeos_update_engine From a281c56112e9a620aa1c7edfd32a8aeae82438e4 Mon Sep 17 00:00:00 2001 From: Jae Hoon Kim Date: Tue, 4 Feb 2020 11:54:22 -0800 Subject: [PATCH 206/624] update_engine: Remove EOL_DATE missing warning The warning log is logged everytime status is asked for to update_engine and prefs for EOL date is missing. This log can be removed. BUG=none TEST=unittests Change-Id: Ie9e5494a46b7331d605e690e15d4d0bf936bde9b Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2037641 Tested-by: Jae Hoon Kim Auto-Submit: Jae Hoon Kim Reviewed-by: Amin Hassani Commit-Queue: Jae Hoon Kim --- omaha_utils.cc | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/omaha_utils.cc b/omaha_utils.cc index 1ef8049a..c7f99217 100644 --- a/omaha_utils.cc +++ b/omaha_utils.cc @@ -29,10 +29,8 @@ std::string EolDateToString(EolDate eol_date) { EolDate StringToEolDate(const std::string& eol_date) { EolDate date = kEolDateInvalid; - if (!base::StringToInt64(eol_date, &date)) { - LOG(WARNING) << "Invalid EOL date attribute: " << eol_date; + if (!base::StringToInt64(eol_date, &date)) return kEolDateInvalid; - } return date; } From 5e9cd71ea8f964c71de657d0a14a2bd0dcc5af75 Mon Sep 17 00:00:00 2001 From: Jae Hoon Kim Date: Wed, 12 Feb 2020 15:28:28 -0800 Subject: [PATCH 207/624] update_engine: Allow deprecation of DLC(s) Currently, platform updates would block if any supported DLC(s) are installed on the device and later CrOS version deprecates any of the installed DLC(s). [INFO:omaha_request_action.cc(501)] Request: [INFO:omaha_request_action.cc(903)] Omaha request response: ... [INFO:omaha_request_action.cc(706)] Found 3 . [INFO:omaha_request_action.cc(811)] Update for test-app-id [INFO:omaha_request_action.cc(811)] Update for test-app-id_dlc-id-1 [INFO:omaha_request_action.cc(794)] No update for test-app-id_dlc-id-2 but update continuing since a DLC. ... BUG=chromium:1050777 TEST=# update_engine unittest Change-Id: I43640f3463aa74c67465a2027bc530794eb73210 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2053077 Tested-by: Jae Hoon Kim Commit-Queue: Amin Hassani Reviewed-by: Amin Hassani --- omaha_request_action.cc | 7 ++++ omaha_request_action_unittest.cc | 56 ++++++++++++++++++++++++++++++++ omaha_request_params.cc | 6 +++- omaha_request_params.h | 11 ++++--- 4 files changed, 75 insertions(+), 5 deletions(-) diff --git a/omaha_request_action.cc b/omaha_request_action.cc index b6b43560..58e7f470 100644 --- a/omaha_request_action.cc +++ b/omaha_request_action.cc @@ -788,6 +788,13 @@ bool OmahaRequestAction::ParseStatus(OmahaParserData* parser_data, for (const auto& app : parser_data->apps) { const string& status = app.updatecheck_status; if (status == kValNoUpdate) { + // If the app is a DLC, allow status "noupdate" to support DLC + // deprecations. + if (params_->IsDlcAppId(app.id)) { + LOG(INFO) << "No update for " << app.id + << " but update continuing since a DLC."; + continue; + } // Don't update if any app has status="noupdate". LOG(INFO) << "No update for " << app.id; output_object->update_exists = false; diff --git a/omaha_request_action_unittest.cc b/omaha_request_action_unittest.cc index d1cb4ed1..2528f7b7 100644 --- a/omaha_request_action_unittest.cc +++ b/omaha_request_action_unittest.cc @@ -86,6 +86,8 @@ const char kCurrentVersion[] = "0.1.0.0"; const char kTestAppId[] = "test-app-id"; const char kTestAppId2[] = "test-app2-id"; const char kTestAppIdSkipUpdatecheck[] = "test-app-id-skip-updatecheck"; +const char kDlcId1[] = "dlc-id-1"; +const char kDlcId2[] = "dlc-id-2"; // This is a helper struct to allow unit tests build an update response with the // values they care about. @@ -131,6 +133,8 @@ struct FakeUpdateResponse { } string GetUpdateResponse() const { + chromeos_update_engine::OmahaRequestParams request_params{nullptr}; + request_params.set_app_id(app_id); return "" "" : "") + + (dlc_app_update + ? "" + "" + "" + "" + "" + : "") + + (dlc_app_no_update + ? "" + : "") + ""; } @@ -244,6 +263,10 @@ struct FakeUpdateResponse { bool multi_app_skip_updatecheck = false; // Whether to include more than one package in an app. bool multi_package = false; + // Whether to include a DLC app with updatecheck tag. + bool dlc_app_update = false; + // Whether to include a DLC app with no updatecheck tag. + bool dlc_app_no_update = false; // Whether the payload is a rollback. bool rollback = false; @@ -2688,6 +2711,7 @@ TEST_F(OmahaRequestActionTest, InstallTest) { pos++; } EXPECT_EQ(request_params_.dlc_apps_params().size(), updatecheck_count); + EXPECT_TRUE(response.update_exists); } TEST_F(OmahaRequestActionTest, InstallMissingPlatformVersionTest) { @@ -2706,6 +2730,38 @@ TEST_F(OmahaRequestActionTest, InstallMissingPlatformVersionTest) { EXPECT_EQ(fake_update_response_.current_version, response.version); } +TEST_F(OmahaRequestActionTest, UpdateWithDlcTest) { + request_params_.set_dlc_apps_params( + {{request_params_.GetDlcAppId(kDlcId1), {.name = kDlcId1}}}); + fake_update_response_.dlc_app_update = true; + tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); + ASSERT_TRUE(TestUpdateCheck()); + + EXPECT_TRUE(response.update_exists); +} + +TEST_F(OmahaRequestActionTest, UpdateWithDeprecatedDlcTest) { + request_params_.set_dlc_apps_params( + {{request_params_.GetDlcAppId(kDlcId2), {.name = kDlcId2}}}); + fake_update_response_.dlc_app_no_update = true; + tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); + ASSERT_TRUE(TestUpdateCheck()); + + EXPECT_TRUE(response.update_exists); +} + +TEST_F(OmahaRequestActionTest, UpdateWithDlcAndDeprecatedDlcTest) { + request_params_.set_dlc_apps_params( + {{request_params_.GetDlcAppId(kDlcId1), {.name = kDlcId1}}, + {request_params_.GetDlcAppId(kDlcId2), {.name = kDlcId2}}}); + fake_update_response_.dlc_app_update = true; + fake_update_response_.dlc_app_no_update = true; + tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); + ASSERT_TRUE(TestUpdateCheck()); + + EXPECT_TRUE(response.update_exists); +} + TEST_F(OmahaRequestActionTest, PastRollbackVersionsNoEntries) { fake_update_response_.rollback = true; fake_update_response_.rollback_allowed_milestones = 4; diff --git a/omaha_request_params.cc b/omaha_request_params.cc index e6d96a4d..1cfbc9c8 100644 --- a/omaha_request_params.cc +++ b/omaha_request_params.cc @@ -249,10 +249,14 @@ string OmahaRequestParams::GetAppId() const { : image_props_.product_id; } -string OmahaRequestParams::GetDlcAppId(std::string dlc_id) const { +string OmahaRequestParams::GetDlcAppId(const std::string& dlc_id) const { // Create APP ID according to |dlc_id| (sticking the current AppID to the // DLC module ID with an underscode). return GetAppId() + "_" + dlc_id; } +bool OmahaRequestParams::IsDlcAppId(const std::string& app_id) const { + return dlc_apps_params().find(app_id) != dlc_apps_params().end(); +} + } // namespace chromeos_update_engine diff --git a/omaha_request_params.h b/omaha_request_params.h index b9840029..14f3eaf6 100644 --- a/omaha_request_params.h +++ b/omaha_request_params.h @@ -225,9 +225,12 @@ class OmahaRequestParams { // download channel. virtual std::string GetAppId() const; - // Returns the DLC app ID corresponding to the current value of the - // download channel. - virtual std::string GetDlcAppId(std::string dlc_id) const; + // Returns the DLC app ID. + virtual std::string GetDlcAppId(const std::string& dlc_id) const; + + // Returns true if the App ID is a DLC App ID that is currently part of the + // request parameters. + virtual bool IsDlcAppId(const std::string& app_id) const; // Suggested defaults static const char kOsVersion[]; @@ -404,7 +407,7 @@ class OmahaRequestParams { // The metadata/prefs root path for DLCs. std::string dlc_prefs_root_; - // A list of DLC modules to install. + // A list of DLC modules to install. A mapping from DLC App ID to |AppParams|. std::map dlc_apps_params_; // This variable defines whether the payload is being installed in the current From 5d496765b6f263db78f21827e0b5da8b3b4fe17e Mon Sep 17 00:00:00 2001 From: Tianjie Xu Date: Fri, 21 Feb 2020 11:42:08 -0800 Subject: [PATCH 208/624] Change the push test path for atest When running atest, tradefed will push the unittest executable and test data files to the test path on the device. And on cuttlefish, we encounter a selinux denial when the test accessing /data/local/tmp. avc: denied { read } for path="/data/local/tmp/update_engine_unittests/x86/gen/disk_ext2_unittest.img" dev="dm-4" ino=2643 scontext=u:r:kernel:s0 tcontext=u:object_r:shell_data_file:s0 tclass=file permissive=0 The directory under /data/nativetest has a different label 'update_engine_data_file' which kernel already has access to. Move the path in the test config so that we can avoid granting the access to shell_data_file. Bug: 149942139 Test: atest update_engine_unittests Change-Id: Id71cb18b80b70f6119fe27b7c9d9e49f86324089 --- test_config.xml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/test_config.xml b/test_config.xml index 2639e7f3..fe3cbfda 100644 --- a/test_config.xml +++ b/test_config.xml @@ -16,13 +16,14 @@ diff --git a/client_library/client_dbus.cc b/client_library/client_dbus.cc index 18b155b3..f16b7591 100644 --- a/client_library/client_dbus.cc +++ b/client_library/client_dbus.cc @@ -98,6 +98,11 @@ bool DBusUpdateEngineClient::AttemptInstall(const string& omaha_url, nullptr /* brillo::ErrorPtr* */); } +bool DBusUpdateEngineClient::SetDlcActiveValue(bool is_active, + const std::string& dlc_id) { + return proxy_->SetDlcActiveValue(is_active, dlc_id, /*error=*/nullptr); +} + bool DBusUpdateEngineClient::GetStatus(UpdateEngineStatus* out_status) const { StatusResult status; if (!proxy_->GetStatusAdvanced(&status, nullptr)) { diff --git a/client_library/client_dbus.h b/client_library/client_dbus.h index 6d7784ad..a032d214 100644 --- a/client_library/client_dbus.h +++ b/client_library/client_dbus.h @@ -46,6 +46,8 @@ class DBusUpdateEngineClient : public UpdateEngineClient { bool AttemptInstall(const std::string& omaha_url, const std::vector& dlc_module_ids) override; + bool SetDlcActiveValue(bool is_active, const std::string& dlc_id) override; + bool GetStatus(UpdateEngineStatus* out_status) const override; bool SetCohortHint(const std::string& cohort_hint) override; diff --git a/client_library/include/update_engine/client.h b/client_library/include/update_engine/client.h index d13359b6..9bda0b9b 100644 --- a/client_library/include/update_engine/client.h +++ b/client_library/include/update_engine/client.h @@ -63,6 +63,11 @@ class UpdateEngineClient { // Same as above but return the entire struct instead. virtual bool GetStatus(UpdateEngineStatus* out_status) const = 0; + // Sets the DLC as active or inactive. When set to active, the ping metadata + // for the DLC is updated accordingly. When set to inactive, the metadata + // for the DLC is deleted. + virtual bool SetDlcActiveValue(bool is_active, const std::string& dlc_id) = 0; + // Getter and setter for the cohort hint. virtual bool SetCohortHint(const std::string& cohort_hint) = 0; virtual bool GetCohortHint(std::string* cohort_hint) const = 0; diff --git a/common/constants.cc b/common/constants.cc index 58cf1b3d..793ce97c 100644 --- a/common/constants.cc +++ b/common/constants.cc @@ -18,7 +18,7 @@ namespace chromeos_update_engine { -// Keep this in sync with the one in dlcservice. +// TODO(andrewlassalle): Move this to the prefs directory. const char kDlcMetadataRootpath[] = "/var/lib/dlc/"; const char kPowerwashSafePrefsSubDirectory[] = "update_engine/prefs"; diff --git a/common_service.cc b/common_service.cc index a99d10c8..b94e7340 100644 --- a/common_service.cc +++ b/common_service.cc @@ -143,6 +143,17 @@ bool UpdateEngineService::ResetStatus(ErrorPtr* error) { return true; } +bool UpdateEngineService::SetDlcActiveValue(brillo::ErrorPtr* error, + bool is_active, + const string& dlc_id) { + if (!system_state_->update_attempter()->SetDlcActiveValue(is_active, + dlc_id)) { + LogAndSetError(error, FROM_HERE, "SetDlcActiveValue failed."); + return false; + } + return true; +} + bool UpdateEngineService::GetStatus(ErrorPtr* error, UpdateEngineStatus* out_status) { if (!system_state_->update_attempter()->GetStatus(out_status)) { diff --git a/common_service.h b/common_service.h index 3349244e..a74c46bf 100644 --- a/common_service.h +++ b/common_service.h @@ -70,6 +70,13 @@ class UpdateEngineService { // update. This is used for development only. bool ResetStatus(brillo::ErrorPtr* error); + // Sets the DLC as active or inactive. When set to active, the ping metadata + // for the DLC is updated accordingly. When set to inactive, the metadata + // for the DLC is deleted. + bool SetDlcActiveValue(brillo::ErrorPtr* error, + bool is_active, + const std::string& dlc_id); + // Returns the current status of the Update Engine. If an update is in // progress, the number of operations, size to download and overall progress // is reported. diff --git a/common_service_unittest.cc b/common_service_unittest.cc index 00c4357e..3dc8a227 100644 --- a/common_service_unittest.cc +++ b/common_service_unittest.cc @@ -100,6 +100,20 @@ TEST_F(UpdateEngineServiceTest, AttemptInstallReturnsFalse) { EXPECT_FALSE(common_service_.AttemptInstall(&error_, "", {})); } +TEST_F(UpdateEngineServiceTest, SetDlcActiveValue) { + EXPECT_CALL(*mock_update_attempter_, SetDlcActiveValue(_, _)) + .WillOnce(Return(true)); + + EXPECT_TRUE(common_service_.SetDlcActiveValue(&error_, true, "dlc0")); +} + +TEST_F(UpdateEngineServiceTest, SetDlcActiveValueReturnsFalse) { + EXPECT_CALL(*mock_update_attempter_, SetDlcActiveValue(_, _)) + .WillOnce(Return(false)); + + EXPECT_FALSE(common_service_.SetDlcActiveValue(&error_, true, "dlc0")); +} + // SetChannel is allowed when there's no device policy (the device is not // enterprise enrolled). TEST_F(UpdateEngineServiceTest, SetChannelWithNoPolicy) { diff --git a/dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml b/dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml index afa34d76..51457e5e 100644 --- a/dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml +++ b/dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml @@ -51,6 +51,18 @@ + + + + If the DLC is being set to active or inactive. + + + + + The ID of the DLC module that will be set to active/inactive. + + + diff --git a/dbus_service.cc b/dbus_service.cc index b1cc2980..cd714881 100644 --- a/dbus_service.cc +++ b/dbus_service.cc @@ -110,6 +110,12 @@ bool DBusUpdateEngineService::ResetStatus(ErrorPtr* error) { return common_->ResetStatus(error); } +bool DBusUpdateEngineService::SetDlcActiveValue(brillo::ErrorPtr* error, + bool is_active, + const string& dlc_id) { + return common_->SetDlcActiveValue(error, is_active, dlc_id); +} + bool DBusUpdateEngineService::GetStatusAdvanced(ErrorPtr* error, StatusResult* out_status) { UpdateEngineStatus status; diff --git a/dbus_service.h b/dbus_service.h index 28ba268f..86f5b93c 100644 --- a/dbus_service.h +++ b/dbus_service.h @@ -64,6 +64,13 @@ class DBusUpdateEngineService // update. This is used for development only. bool ResetStatus(brillo::ErrorPtr* error) override; + // Sets the DLC as active or inactive. When set to active, the ping metadata + // for the DLC is updated accordingly. When set to inactive, the metadata + // for the DLC is deleted. + bool SetDlcActiveValue(brillo::ErrorPtr* error, + bool is_active, + const std::string& dlc_id) override; + // Similar to Above, but returns a protobuffer instead. In the future it will // have more features and is easily extendable. bool GetStatusAdvanced(brillo::ErrorPtr* error, diff --git a/mock_update_attempter.h b/mock_update_attempter.h index c39fb621..9d966d73 100644 --- a/mock_update_attempter.h +++ b/mock_update_attempter.h @@ -58,6 +58,8 @@ class MockUpdateAttempter : public UpdateAttempter { bool(const std::vector& dlc_module_ids, const std::string& omaha_url)); + MOCK_METHOD2(SetDlcActiveValue, bool(bool, const std::string&)); + MOCK_METHOD0(RefreshDevicePolicy, void(void)); MOCK_CONST_METHOD0(consecutive_failed_update_checks, unsigned int(void)); diff --git a/omaha_request_action.cc b/omaha_request_action.cc index 58e7f470..8890c7c9 100644 --- a/omaha_request_action.cc +++ b/omaha_request_action.cc @@ -414,7 +414,6 @@ int OmahaRequestAction::GetInstallDate(SystemState* system_state) { return num_days; } -// static void OmahaRequestAction::StorePingReply( const OmahaParserData& parser_data) const { for (const auto& app : parser_data.apps) { @@ -430,24 +429,15 @@ void OmahaRequestAction::StorePingReply( base::FilePath metadata_path = base::FilePath(params_->dlc_prefs_root()).Append(dlc_params.name); - if (!base::PathExists(metadata_path)) { - LOG(ERROR) << "Metadata path (" << metadata_path.value() << ") " - << "doesn't exist."; - // Skip this DLC if the metadata directory is missing. - continue; - } Prefs prefs; - if (!prefs.Init(metadata_path)) { + if (!base::CreateDirectory(metadata_path) || !prefs.Init(metadata_path)) { LOG(ERROR) << "Failed to initialize the preferences path:" << metadata_path.value() << "."; continue; } // Reset the active metadata value to |kPingInactiveValue|. - // Only write into this file if the file exists, otherwise the file will be - // created with different owner/permissions. - if (prefs.Exists(kPrefsPingActive) && - !prefs.SetInt64(kPrefsPingActive, kPingInactiveValue)) + if (!prefs.SetInt64(kPrefsPingActive, kPingInactiveValue)) LOG(ERROR) << "Failed to set the value of ping metadata '" << kPrefsPingActive << "'."; diff --git a/update_attempter.cc b/update_attempter.cc index 8e320913..f5885c91 100644 --- a/update_attempter.cc +++ b/update_attempter.cc @@ -22,11 +22,13 @@ #include #include #include +#include #include #include #include #include +#include #include #include #include @@ -656,6 +658,47 @@ void UpdateAttempter::CalculateStagingParams(bool interactive) { } } +bool UpdateAttempter::SetDlcActiveValue(bool is_active, const string& dlc_id) { + if (dlc_id.empty()) { + LOG(ERROR) << "Empty DLC ID passed."; + return false; + } + LOG(INFO) << "Set DLC (" << dlc_id << ") to " + << (is_active ? "Active" : "Inactive"); + // TODO(andrewlassalle): Should dlc_prefs_root be in systemstate instead of + // omaha_request_params_? + base::FilePath metadata_path = + base::FilePath(omaha_request_params_->dlc_prefs_root()).Append(dlc_id); + if (is_active) { + base::File::Error error; + if (!base::CreateDirectoryAndGetError(metadata_path, &error)) { + PLOG(ERROR) << "Failed to create metadata directory for DLC (" << dlc_id + << "). Error:" << error; + return false; + } + + Prefs prefs; + if (!prefs.Init(metadata_path)) { + LOG(ERROR) << "Failed to initialize the preferences path:" + << metadata_path.value() << "."; + return false; + } + + if (!prefs.SetInt64(kPrefsPingActive, kPingActiveValue)) { + LOG(ERROR) << "Failed to set the value of ping metadata '" + << kPrefsPingActive << "'."; + return false; + } + } else { + if (!base::DeleteFile(metadata_path, true)) { + PLOG(ERROR) << "Failed to delete metadata directory(" + << metadata_path.value() << ") for DLC (" << dlc_id << ")."; + return false; + } + } + return true; +} + int64_t UpdateAttempter::GetPingMetadata( const PrefsInterface& prefs, const std::string& metadata_name) const { // The first time a ping is sent, the metadata files containing the values @@ -681,6 +724,23 @@ void UpdateAttempter::CalculateDlcParams() { LOG(INFO) << "Failed to retrieve DLC module IDs from dlcservice. Check the " "state of dlcservice, will not update DLC modules."; } + base::FilePath metadata_root_path = + base::FilePath(omaha_request_params_->dlc_prefs_root()); + // Cleanup any leftover metadata for DLCs which don't exist. + base::FileEnumerator dir_enum(metadata_root_path, + false /* recursive */, + base::FileEnumerator::DIRECTORIES); + std::unordered_set dlc_ids(dlc_module_ids_.begin(), + dlc_module_ids_.end()); + for (base::FilePath name = dir_enum.Next(); !name.empty(); + name = dir_enum.Next()) { + string id = name.BaseName().value(); + if (dlc_ids.find(id) == dlc_ids.end()) { + LOG(INFO) << "Deleting stale metadata for DLC:" << id; + if (!base::DeleteFile(name, true)) + PLOG(WARNING) << "Failed to delete DLC prefs path:" << name.value(); + } + } std::map dlc_apps_params; for (auto dlc_id : dlc_module_ids_) { OmahaRequestParams::AppParams dlc_params{ @@ -691,11 +751,9 @@ void UpdateAttempter::CalculateDlcParams() { // DLCs, we don't want to send the ping yet, since the DLCs might fail to // install or might not really be active yet. if (!is_install_) { - base::FilePath metadata_path = - base::FilePath(omaha_request_params_->dlc_prefs_root()) - .Append(dlc_id); + base::FilePath metadata_path = metadata_root_path.Append(dlc_id); Prefs prefs; - if (!prefs.Init(metadata_path)) { + if (!base::CreateDirectory(metadata_path) || !prefs.Init(metadata_path)) { LOG(ERROR) << "Failed to initialize the preferences path:" << metadata_path.value() << "."; } else { diff --git a/update_attempter.h b/update_attempter.h index 91e072a2..c364de36 100644 --- a/update_attempter.h +++ b/update_attempter.h @@ -162,6 +162,9 @@ class UpdateAttempter : public ActionProcessorDelegate, // UPDATED_NEED_REBOOT. Returns true on success, false otherwise. bool RebootIfNeeded(); + // Sets the DLC as active or inactive. See common_service.h + virtual bool SetDlcActiveValue(bool is_active, const std::string& dlc_id); + // DownloadActionDelegate methods: void BytesReceived(uint64_t bytes_progressed, uint64_t bytes_received, @@ -253,6 +256,7 @@ class UpdateAttempter : public ActionProcessorDelegate, FRIEND_TEST(UpdateAttempterTest, CalculateDlcParamsNoPrefFilesTest); FRIEND_TEST(UpdateAttempterTest, CalculateDlcParamsNonParseableValuesTest); FRIEND_TEST(UpdateAttempterTest, CalculateDlcParamsValidValuesTest); + FRIEND_TEST(UpdateAttempterTest, CalculateDlcParamsRemoveStaleMetadata); FRIEND_TEST(UpdateAttempterTest, ChangeToDownloadingOnReceivedBytesTest); FRIEND_TEST(UpdateAttempterTest, CheckForInstallNotIdleFails); FRIEND_TEST(UpdateAttempterTest, CheckForUpdateAUDlcTest); diff --git a/update_attempter_unittest.cc b/update_attempter_unittest.cc index d468f56f..d65a5563 100644 --- a/update_attempter_unittest.cc +++ b/update_attempter_unittest.cc @@ -2401,4 +2401,60 @@ TEST_F(UpdateAttempterTest, CalculateDlcParamsValidValuesTest) { EXPECT_EQ(78, dlc_app_params.ping_date_last_active); EXPECT_EQ(99, dlc_app_params.ping_date_last_rollcall); } + +TEST_F(UpdateAttempterTest, CalculateDlcParamsRemoveStaleMetadata) { + base::ScopedTempDir tempdir; + ASSERT_TRUE(tempdir.CreateUniqueTempDir()); + fake_system_state_.request_params()->set_root(tempdir.GetPath().value()); + string dlc_id = "dlc0"; + base::FilePath metadata_path_dlc0 = + base::FilePath(fake_system_state_.request_params()->dlc_prefs_root()) + .Append(dlc_id); + ASSERT_TRUE(base::CreateDirectory(metadata_path_dlc0)); + base::FilePath metadata_path_dlc_stale = + base::FilePath(fake_system_state_.request_params()->dlc_prefs_root()) + .Append("stale"); + ASSERT_TRUE(base::CreateDirectory(metadata_path_dlc_stale)); + EXPECT_CALL(mock_dlcservice_, GetInstalled(_)) + .WillOnce( + DoAll(SetArgPointee<0>(std::vector({dlc_id})), Return(true))); + + attempter_.is_install_ = false; + attempter_.CalculateDlcParams(); + + EXPECT_TRUE(base::PathExists(metadata_path_dlc0)); + EXPECT_FALSE(base::PathExists(metadata_path_dlc_stale)); +} + +TEST_F(UpdateAttempterTest, SetDlcActiveValue) { + base::ScopedTempDir tempdir; + ASSERT_TRUE(tempdir.CreateUniqueTempDir()); + fake_system_state_.request_params()->set_root(tempdir.GetPath().value()); + string dlc_id = "dlc0"; + base::FilePath metadata_path_dlc0 = + base::FilePath(fake_system_state_.request_params()->dlc_prefs_root()) + .Append(dlc_id); + attempter_.SetDlcActiveValue(true, dlc_id); + Prefs prefs; + ASSERT_TRUE(base::PathExists(metadata_path_dlc0)); + ASSERT_TRUE(prefs.Init(metadata_path_dlc0)); + int64_t temp_int; + EXPECT_TRUE(prefs.GetInt64(kPrefsPingActive, &temp_int)); + EXPECT_EQ(temp_int, kPingActiveValue); +} + +TEST_F(UpdateAttempterTest, SetDlcInactive) { + base::ScopedTempDir tempdir; + ASSERT_TRUE(tempdir.CreateUniqueTempDir()); + fake_system_state_.request_params()->set_root(tempdir.GetPath().value()); + string dlc_id = "dlc0"; + base::FilePath metadata_path_dlc0 = + base::FilePath(fake_system_state_.request_params()->dlc_prefs_root()) + .Append(dlc_id); + base::CreateDirectory(metadata_path_dlc0); + EXPECT_TRUE(base::PathExists(metadata_path_dlc0)); + attempter_.SetDlcActiveValue(false, dlc_id); + EXPECT_FALSE(base::PathExists(metadata_path_dlc0)); +} + } // namespace chromeos_update_engine From 37ae85653198123fc28a4ce1de25d2e56ee6a3a2 Mon Sep 17 00:00:00 2001 From: Tianjie Date: Wed, 25 Mar 2020 15:16:01 -0700 Subject: [PATCH 248/624] Allow parsing boot image with header version 3 The boot image header version no longer contains a page_size field; and it assumes the block size is always 4096. Also, the order of the header fields are different in version 3 from the previous versions. While the position of "header_version" is fixed at position #9 across all image headers. This cl handles header version 3 separately from the previous versions. Also create a stub implementation since the parsing of boot image is only used in android. Bug: 152371989 Test: unit tests, generate incremental OTA for the affected package, build with stub file Change-Id: Iea2145e0c234216f9ee42c571bd53dc93b4b9d2e --- Android.bp | 3 + payload_generator/boot_img_filesystem.cc | 68 ++++++++++++++++--- payload_generator/boot_img_filesystem.h | 20 +----- payload_generator/boot_img_filesystem_stub.cc | 48 +++++++++++++ .../boot_img_filesystem_unittest.cc | 58 +++++++++++++--- 5 files changed, 160 insertions(+), 37 deletions(-) create mode 100644 payload_generator/boot_img_filesystem_stub.cc diff --git a/Android.bp b/Android.bp index 21ba5071..e3116f57 100644 --- a/Android.bp +++ b/Android.bp @@ -492,6 +492,9 @@ cc_defaults { "update_metadata-protos_exports", ], + header_libs: [ + "bootimg_headers", + ], static_libs: [ "libavb", "libbrotli", diff --git a/payload_generator/boot_img_filesystem.cc b/payload_generator/boot_img_filesystem.cc index 19de4106..89b175ec 100644 --- a/payload_generator/boot_img_filesystem.cc +++ b/payload_generator/boot_img_filesystem.cc @@ -17,6 +17,7 @@ #include "update_engine/payload_generator/boot_img_filesystem.h" #include +#include #include #include @@ -35,16 +36,61 @@ unique_ptr BootImgFilesystem::CreateFromFile( if (filename.empty()) return nullptr; - brillo::Blob header; - if (!utils::ReadFileChunk(filename, 0, sizeof(boot_img_hdr), &header) || - header.size() != sizeof(boot_img_hdr) || - memcmp(header.data(), BOOT_MAGIC, BOOT_MAGIC_SIZE) != 0) { + if (brillo::Blob header_magic; + !utils::ReadFileChunk(filename, 0, BOOT_MAGIC_SIZE, &header_magic) || + memcmp(header_magic.data(), BOOT_MAGIC, BOOT_MAGIC_SIZE) != 0) { + return nullptr; + } + + // The order of image header fields are different in version 3 from the + // previous versions. But the position of "header_version" is fixed at #9 + // across all image headers. + // See details in system/tools/mkbootimg/include/bootimg/bootimg.h + constexpr size_t header_version_offset = + BOOT_MAGIC_SIZE + 8 * sizeof(uint32_t); + brillo::Blob header_version_blob; + if (!utils::ReadFileChunk(filename, + header_version_offset, + sizeof(uint32_t), + &header_version_blob)) { + return nullptr; + } + uint32_t header_version = + *reinterpret_cast(header_version_blob.data()); + if (header_version > 3) { + LOG(WARNING) << "Boot image header version " << header_version + << " isn't supported for parsing"; + return nullptr; + } + + // Read the bytes of boot image header based on the header version. + size_t header_size = + header_version == 3 ? sizeof(boot_img_hdr_v3) : sizeof(boot_img_hdr_v0); + brillo::Blob header_blob; + if (!utils::ReadFileChunk(filename, 0, header_size, &header_blob)) { return nullptr; } unique_ptr result(new BootImgFilesystem()); result->filename_ = filename; - memcpy(&result->hdr_, header.data(), header.size()); + if (header_version < 3) { + auto hdr_v0 = reinterpret_cast(header_blob.data()); + CHECK_EQ(0, memcmp(hdr_v0->magic, BOOT_MAGIC, BOOT_MAGIC_SIZE)); + CHECK_LT(hdr_v0->header_version, 3u); + result->kernel_size_ = hdr_v0->kernel_size; + result->ramdisk_size_ = hdr_v0->ramdisk_size; + result->page_size_ = hdr_v0->page_size; + } else { + auto hdr_v3 = reinterpret_cast(header_blob.data()); + CHECK_EQ(0, memcmp(hdr_v3->magic, BOOT_MAGIC, BOOT_MAGIC_SIZE)); + CHECK_EQ(3u, hdr_v3->header_version); + result->kernel_size_ = hdr_v3->kernel_size; + result->ramdisk_size_ = hdr_v3->ramdisk_size; + result->page_size_ = 4096; + } + + CHECK_GT(result->page_size_, 0u); + return result; } @@ -87,13 +133,13 @@ bool BootImgFilesystem::GetFiles(vector* files) const { files->clear(); const uint64_t file_size = utils::FileSize(filename_); // The first page is header. - uint64_t offset = hdr_.page_size; - if (hdr_.kernel_size > 0 && offset + hdr_.kernel_size <= file_size) { - files->emplace_back(GetFile("", offset, hdr_.kernel_size)); + uint64_t offset = page_size_; + if (kernel_size_ > 0 && offset + kernel_size_ <= file_size) { + files->emplace_back(GetFile("", offset, kernel_size_)); } - offset += utils::RoundUp(hdr_.kernel_size, hdr_.page_size); - if (hdr_.ramdisk_size > 0 && offset + hdr_.ramdisk_size <= file_size) { - files->emplace_back(GetFile("", offset, hdr_.ramdisk_size)); + offset += utils::RoundUp(kernel_size_, page_size_); + if (ramdisk_size_ > 0 && offset + ramdisk_size_ <= file_size) { + files->emplace_back(GetFile("", offset, ramdisk_size_)); } return true; } diff --git a/payload_generator/boot_img_filesystem.h b/payload_generator/boot_img_filesystem.h index 87725d42..61f755c4 100644 --- a/payload_generator/boot_img_filesystem.h +++ b/payload_generator/boot_img_filesystem.h @@ -52,23 +52,9 @@ class BootImgFilesystem : public FilesystemInterface { // The boot.img file path. std::string filename_; -// https://android.googlesource.com/platform/system/core/+/master/mkbootimg/include/bootimg/bootimg.h -#define BOOT_MAGIC "ANDROID!" -#define BOOT_MAGIC_SIZE 8 - struct boot_img_hdr { - // Must be BOOT_MAGIC. - uint8_t magic[BOOT_MAGIC_SIZE]; - uint32_t kernel_size; /* size in bytes */ - uint32_t kernel_addr; /* physical load addr */ - uint32_t ramdisk_size; /* size in bytes */ - uint32_t ramdisk_addr; /* physical load addr */ - uint32_t second_size; /* size in bytes */ - uint32_t second_addr; /* physical load addr */ - uint32_t tags_addr; /* physical addr for kernel tags */ - uint32_t page_size; /* flash page size we assume */ - } __attribute__((packed)); - // The boot image header. - boot_img_hdr hdr_; + uint32_t kernel_size_; /* size in bytes */ + uint32_t ramdisk_size_; /* size in bytes */ + uint32_t page_size_; /* flash page size we assume */ DISALLOW_COPY_AND_ASSIGN(BootImgFilesystem); }; diff --git a/payload_generator/boot_img_filesystem_stub.cc b/payload_generator/boot_img_filesystem_stub.cc new file mode 100644 index 00000000..4928fa1f --- /dev/null +++ b/payload_generator/boot_img_filesystem_stub.cc @@ -0,0 +1,48 @@ +// +// Copyright (C) 2020 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include "update_engine/payload_generator/boot_img_filesystem.h" + +namespace chromeos_update_engine { +std::unique_ptr BootImgFilesystem::CreateFromFile( + const std::string& /* filename */) { + return nullptr; +} + +size_t BootImgFilesystem::GetBlockSize() const { + return 4096; +} + +size_t BootImgFilesystem::GetBlockCount() const { + return 0; +} + +FilesystemInterface::File BootImgFilesystem::GetFile( + const std::string& /* name */, + uint64_t /* offset */, + uint64_t /* size */) const { + return {}; +} + +bool BootImgFilesystem::GetFiles(std::vector* /* files */) const { + return false; +} + +bool BootImgFilesystem::LoadSettings(brillo::KeyValueStore* /* store */) const { + return false; +} + +} // namespace chromeos_update_engine diff --git a/payload_generator/boot_img_filesystem_unittest.cc b/payload_generator/boot_img_filesystem_unittest.cc index b1e0d992..0b115e02 100644 --- a/payload_generator/boot_img_filesystem_unittest.cc +++ b/payload_generator/boot_img_filesystem_unittest.cc @@ -18,6 +18,7 @@ #include +#include #include #include @@ -32,18 +33,32 @@ using std::vector; class BootImgFilesystemTest : public ::testing::Test { protected: brillo::Blob GetBootImg(const brillo::Blob& kernel, - const brillo::Blob& ramdisk) { + const brillo::Blob& ramdisk, + bool header_version3 = false) { brillo::Blob boot_img(16 * 1024); - BootImgFilesystem::boot_img_hdr hdr; - memcpy(hdr.magic, BOOT_MAGIC, BOOT_MAGIC_SIZE); - hdr.kernel_size = kernel.size(); - hdr.ramdisk_size = ramdisk.size(); - hdr.page_size = 4096; + constexpr uint32_t page_size = 4096; + size_t offset = 0; - memcpy(boot_img.data() + offset, &hdr, sizeof(hdr)); - offset += utils::RoundUp(sizeof(hdr), hdr.page_size); + if (header_version3) { + boot_img_hdr_v3 hdr_v3{}; + memcpy(hdr_v3.magic, BOOT_MAGIC, BOOT_MAGIC_SIZE); + hdr_v3.kernel_size = kernel.size(); + hdr_v3.ramdisk_size = ramdisk.size(); + hdr_v3.header_version = 3; + memcpy(boot_img.data() + offset, &hdr_v3, sizeof(hdr_v3)); + offset += utils::RoundUp(sizeof(hdr_v3), page_size); + } else { + boot_img_hdr_v0 hdr_v0{}; + memcpy(hdr_v0.magic, BOOT_MAGIC, BOOT_MAGIC_SIZE); + hdr_v0.kernel_size = kernel.size(); + hdr_v0.ramdisk_size = ramdisk.size(); + hdr_v0.page_size = page_size; + hdr_v0.header_version = 0; + memcpy(boot_img.data() + offset, &hdr_v0, sizeof(hdr_v0)); + offset += utils::RoundUp(sizeof(hdr_v0), page_size); + } memcpy(boot_img.data() + offset, kernel.data(), kernel.size()); - offset += utils::RoundUp(kernel.size(), hdr.page_size); + offset += utils::RoundUp(kernel.size(), page_size); memcpy(boot_img.data() + offset, ramdisk.data(), ramdisk.size()); return boot_img; } @@ -76,6 +91,31 @@ TEST_F(BootImgFilesystemTest, SimpleTest) { EXPECT_TRUE(files[1].deflates.empty()); } +TEST_F(BootImgFilesystemTest, ImageHeaderVersion3) { + test_utils::WriteFileVector( + boot_file_.path(), + GetBootImg(brillo::Blob(1000, 'k'), brillo::Blob(5000, 'r'), true)); + unique_ptr fs = + BootImgFilesystem::CreateFromFile(boot_file_.path()); + EXPECT_NE(nullptr, fs); + + vector files; + EXPECT_TRUE(fs->GetFiles(&files)); + ASSERT_EQ(2u, files.size()); + + EXPECT_EQ("", files[0].name); + EXPECT_EQ(1u, files[0].extents.size()); + EXPECT_EQ(1u, files[0].extents[0].start_block()); + EXPECT_EQ(1u, files[0].extents[0].num_blocks()); + EXPECT_TRUE(files[0].deflates.empty()); + + EXPECT_EQ("", files[1].name); + EXPECT_EQ(1u, files[1].extents.size()); + EXPECT_EQ(2u, files[1].extents[0].start_block()); + EXPECT_EQ(2u, files[1].extents[0].num_blocks()); + EXPECT_TRUE(files[1].deflates.empty()); +} + TEST_F(BootImgFilesystemTest, BadImageTest) { brillo::Blob boot_img = GetBootImg({}, {}); boot_img[7] = '?'; From 9406118b6d486901f6ae8f042d0761ed2206d611 Mon Sep 17 00:00:00 2001 From: Tianjie Date: Wed, 25 Mar 2020 15:16:01 -0700 Subject: [PATCH 249/624] Allow parsing boot image with header version 3 The boot image header version no longer contains a page_size field; and it assumes the block size is always 4096. Also, the order of the header fields are different in version 3 from the previous versions. While the position of "header_version" is fixed at position #9 across all image headers. This cl handles header version 3 separately from the previous versions. Also create a stub implementation since the parsing of boot image is only used in android. Bug: 152371989 Test: unit tests, generate incremental OTA for the affected package, build with stub file Change-Id: Iea2145e0c234216f9ee42c571bd53dc93b4b9d2e (cherry picked from commit 37ae85653198123fc28a4ce1de25d2e56ee6a3a2) --- Android.bp | 3 + payload_generator/boot_img_filesystem.cc | 68 ++++++++++++++++--- payload_generator/boot_img_filesystem.h | 20 +----- payload_generator/boot_img_filesystem_stub.cc | 48 +++++++++++++ .../boot_img_filesystem_unittest.cc | 58 +++++++++++++--- 5 files changed, 160 insertions(+), 37 deletions(-) create mode 100644 payload_generator/boot_img_filesystem_stub.cc diff --git a/Android.bp b/Android.bp index 21ba5071..e3116f57 100644 --- a/Android.bp +++ b/Android.bp @@ -492,6 +492,9 @@ cc_defaults { "update_metadata-protos_exports", ], + header_libs: [ + "bootimg_headers", + ], static_libs: [ "libavb", "libbrotli", diff --git a/payload_generator/boot_img_filesystem.cc b/payload_generator/boot_img_filesystem.cc index 19de4106..89b175ec 100644 --- a/payload_generator/boot_img_filesystem.cc +++ b/payload_generator/boot_img_filesystem.cc @@ -17,6 +17,7 @@ #include "update_engine/payload_generator/boot_img_filesystem.h" #include +#include #include #include @@ -35,16 +36,61 @@ unique_ptr BootImgFilesystem::CreateFromFile( if (filename.empty()) return nullptr; - brillo::Blob header; - if (!utils::ReadFileChunk(filename, 0, sizeof(boot_img_hdr), &header) || - header.size() != sizeof(boot_img_hdr) || - memcmp(header.data(), BOOT_MAGIC, BOOT_MAGIC_SIZE) != 0) { + if (brillo::Blob header_magic; + !utils::ReadFileChunk(filename, 0, BOOT_MAGIC_SIZE, &header_magic) || + memcmp(header_magic.data(), BOOT_MAGIC, BOOT_MAGIC_SIZE) != 0) { + return nullptr; + } + + // The order of image header fields are different in version 3 from the + // previous versions. But the position of "header_version" is fixed at #9 + // across all image headers. + // See details in system/tools/mkbootimg/include/bootimg/bootimg.h + constexpr size_t header_version_offset = + BOOT_MAGIC_SIZE + 8 * sizeof(uint32_t); + brillo::Blob header_version_blob; + if (!utils::ReadFileChunk(filename, + header_version_offset, + sizeof(uint32_t), + &header_version_blob)) { + return nullptr; + } + uint32_t header_version = + *reinterpret_cast(header_version_blob.data()); + if (header_version > 3) { + LOG(WARNING) << "Boot image header version " << header_version + << " isn't supported for parsing"; + return nullptr; + } + + // Read the bytes of boot image header based on the header version. + size_t header_size = + header_version == 3 ? sizeof(boot_img_hdr_v3) : sizeof(boot_img_hdr_v0); + brillo::Blob header_blob; + if (!utils::ReadFileChunk(filename, 0, header_size, &header_blob)) { return nullptr; } unique_ptr result(new BootImgFilesystem()); result->filename_ = filename; - memcpy(&result->hdr_, header.data(), header.size()); + if (header_version < 3) { + auto hdr_v0 = reinterpret_cast(header_blob.data()); + CHECK_EQ(0, memcmp(hdr_v0->magic, BOOT_MAGIC, BOOT_MAGIC_SIZE)); + CHECK_LT(hdr_v0->header_version, 3u); + result->kernel_size_ = hdr_v0->kernel_size; + result->ramdisk_size_ = hdr_v0->ramdisk_size; + result->page_size_ = hdr_v0->page_size; + } else { + auto hdr_v3 = reinterpret_cast(header_blob.data()); + CHECK_EQ(0, memcmp(hdr_v3->magic, BOOT_MAGIC, BOOT_MAGIC_SIZE)); + CHECK_EQ(3u, hdr_v3->header_version); + result->kernel_size_ = hdr_v3->kernel_size; + result->ramdisk_size_ = hdr_v3->ramdisk_size; + result->page_size_ = 4096; + } + + CHECK_GT(result->page_size_, 0u); + return result; } @@ -87,13 +133,13 @@ bool BootImgFilesystem::GetFiles(vector* files) const { files->clear(); const uint64_t file_size = utils::FileSize(filename_); // The first page is header. - uint64_t offset = hdr_.page_size; - if (hdr_.kernel_size > 0 && offset + hdr_.kernel_size <= file_size) { - files->emplace_back(GetFile("", offset, hdr_.kernel_size)); + uint64_t offset = page_size_; + if (kernel_size_ > 0 && offset + kernel_size_ <= file_size) { + files->emplace_back(GetFile("", offset, kernel_size_)); } - offset += utils::RoundUp(hdr_.kernel_size, hdr_.page_size); - if (hdr_.ramdisk_size > 0 && offset + hdr_.ramdisk_size <= file_size) { - files->emplace_back(GetFile("", offset, hdr_.ramdisk_size)); + offset += utils::RoundUp(kernel_size_, page_size_); + if (ramdisk_size_ > 0 && offset + ramdisk_size_ <= file_size) { + files->emplace_back(GetFile("", offset, ramdisk_size_)); } return true; } diff --git a/payload_generator/boot_img_filesystem.h b/payload_generator/boot_img_filesystem.h index 87725d42..61f755c4 100644 --- a/payload_generator/boot_img_filesystem.h +++ b/payload_generator/boot_img_filesystem.h @@ -52,23 +52,9 @@ class BootImgFilesystem : public FilesystemInterface { // The boot.img file path. std::string filename_; -// https://android.googlesource.com/platform/system/core/+/master/mkbootimg/include/bootimg/bootimg.h -#define BOOT_MAGIC "ANDROID!" -#define BOOT_MAGIC_SIZE 8 - struct boot_img_hdr { - // Must be BOOT_MAGIC. - uint8_t magic[BOOT_MAGIC_SIZE]; - uint32_t kernel_size; /* size in bytes */ - uint32_t kernel_addr; /* physical load addr */ - uint32_t ramdisk_size; /* size in bytes */ - uint32_t ramdisk_addr; /* physical load addr */ - uint32_t second_size; /* size in bytes */ - uint32_t second_addr; /* physical load addr */ - uint32_t tags_addr; /* physical addr for kernel tags */ - uint32_t page_size; /* flash page size we assume */ - } __attribute__((packed)); - // The boot image header. - boot_img_hdr hdr_; + uint32_t kernel_size_; /* size in bytes */ + uint32_t ramdisk_size_; /* size in bytes */ + uint32_t page_size_; /* flash page size we assume */ DISALLOW_COPY_AND_ASSIGN(BootImgFilesystem); }; diff --git a/payload_generator/boot_img_filesystem_stub.cc b/payload_generator/boot_img_filesystem_stub.cc new file mode 100644 index 00000000..4928fa1f --- /dev/null +++ b/payload_generator/boot_img_filesystem_stub.cc @@ -0,0 +1,48 @@ +// +// Copyright (C) 2020 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include "update_engine/payload_generator/boot_img_filesystem.h" + +namespace chromeos_update_engine { +std::unique_ptr BootImgFilesystem::CreateFromFile( + const std::string& /* filename */) { + return nullptr; +} + +size_t BootImgFilesystem::GetBlockSize() const { + return 4096; +} + +size_t BootImgFilesystem::GetBlockCount() const { + return 0; +} + +FilesystemInterface::File BootImgFilesystem::GetFile( + const std::string& /* name */, + uint64_t /* offset */, + uint64_t /* size */) const { + return {}; +} + +bool BootImgFilesystem::GetFiles(std::vector* /* files */) const { + return false; +} + +bool BootImgFilesystem::LoadSettings(brillo::KeyValueStore* /* store */) const { + return false; +} + +} // namespace chromeos_update_engine diff --git a/payload_generator/boot_img_filesystem_unittest.cc b/payload_generator/boot_img_filesystem_unittest.cc index b1e0d992..0b115e02 100644 --- a/payload_generator/boot_img_filesystem_unittest.cc +++ b/payload_generator/boot_img_filesystem_unittest.cc @@ -18,6 +18,7 @@ #include +#include #include #include @@ -32,18 +33,32 @@ using std::vector; class BootImgFilesystemTest : public ::testing::Test { protected: brillo::Blob GetBootImg(const brillo::Blob& kernel, - const brillo::Blob& ramdisk) { + const brillo::Blob& ramdisk, + bool header_version3 = false) { brillo::Blob boot_img(16 * 1024); - BootImgFilesystem::boot_img_hdr hdr; - memcpy(hdr.magic, BOOT_MAGIC, BOOT_MAGIC_SIZE); - hdr.kernel_size = kernel.size(); - hdr.ramdisk_size = ramdisk.size(); - hdr.page_size = 4096; + constexpr uint32_t page_size = 4096; + size_t offset = 0; - memcpy(boot_img.data() + offset, &hdr, sizeof(hdr)); - offset += utils::RoundUp(sizeof(hdr), hdr.page_size); + if (header_version3) { + boot_img_hdr_v3 hdr_v3{}; + memcpy(hdr_v3.magic, BOOT_MAGIC, BOOT_MAGIC_SIZE); + hdr_v3.kernel_size = kernel.size(); + hdr_v3.ramdisk_size = ramdisk.size(); + hdr_v3.header_version = 3; + memcpy(boot_img.data() + offset, &hdr_v3, sizeof(hdr_v3)); + offset += utils::RoundUp(sizeof(hdr_v3), page_size); + } else { + boot_img_hdr_v0 hdr_v0{}; + memcpy(hdr_v0.magic, BOOT_MAGIC, BOOT_MAGIC_SIZE); + hdr_v0.kernel_size = kernel.size(); + hdr_v0.ramdisk_size = ramdisk.size(); + hdr_v0.page_size = page_size; + hdr_v0.header_version = 0; + memcpy(boot_img.data() + offset, &hdr_v0, sizeof(hdr_v0)); + offset += utils::RoundUp(sizeof(hdr_v0), page_size); + } memcpy(boot_img.data() + offset, kernel.data(), kernel.size()); - offset += utils::RoundUp(kernel.size(), hdr.page_size); + offset += utils::RoundUp(kernel.size(), page_size); memcpy(boot_img.data() + offset, ramdisk.data(), ramdisk.size()); return boot_img; } @@ -76,6 +91,31 @@ TEST_F(BootImgFilesystemTest, SimpleTest) { EXPECT_TRUE(files[1].deflates.empty()); } +TEST_F(BootImgFilesystemTest, ImageHeaderVersion3) { + test_utils::WriteFileVector( + boot_file_.path(), + GetBootImg(brillo::Blob(1000, 'k'), brillo::Blob(5000, 'r'), true)); + unique_ptr fs = + BootImgFilesystem::CreateFromFile(boot_file_.path()); + EXPECT_NE(nullptr, fs); + + vector files; + EXPECT_TRUE(fs->GetFiles(&files)); + ASSERT_EQ(2u, files.size()); + + EXPECT_EQ("", files[0].name); + EXPECT_EQ(1u, files[0].extents.size()); + EXPECT_EQ(1u, files[0].extents[0].start_block()); + EXPECT_EQ(1u, files[0].extents[0].num_blocks()); + EXPECT_TRUE(files[0].deflates.empty()); + + EXPECT_EQ("", files[1].name); + EXPECT_EQ(1u, files[1].extents.size()); + EXPECT_EQ(2u, files[1].extents[0].start_block()); + EXPECT_EQ(2u, files[1].extents[0].num_blocks()); + EXPECT_TRUE(files[1].deflates.empty()); +} + TEST_F(BootImgFilesystemTest, BadImageTest) { brillo::Blob boot_img = GetBootImg({}, {}); boot_img[7] = '?'; From 48ec4c85c00fc7be778dd2d1ce37d8f2908f8f17 Mon Sep 17 00:00:00 2001 From: Miriam Polzer Date: Thu, 26 Mar 2020 18:52:55 +0100 Subject: [PATCH 250/624] update_engine: Correct test command in README.md BUG=None TEST=cros_run_unit_tests --board ${BOARD} --packages update_engine Change-Id: I51552c7ca7b3155db6dad846b80f95c403815025 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2122354 Reviewed-by: Amin Hassani Commit-Queue: Amin Hassani Tested-by: Amin Hassani --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 82642f72..71f271b2 100644 --- a/README.md +++ b/README.md @@ -478,7 +478,7 @@ or or ```bash -(chroot) $ cros_run_unit_tests --board ${BOARD} --test --packages update_engine +(chroot) $ cros_run_unit_tests --board ${BOARD} --packages update_engine ``` The above commands run all the unit tests, but `update_engine` package is quite From 7b3910a58bd0b8d9f0bd316fad5776ef0a8494ba Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Tue, 24 Mar 2020 17:47:32 -0700 Subject: [PATCH 251/624] Allow forward merge on powerwash. If package schedules powerwash, snapshots on a Virtual A/B device will be deleted. Tell SnapshotManager to handle this case. Test: apply downgrade package Test: apply package with wipe Test: apply package then manually wipe Bug: 152094219 Change-Id: I8601b661924abcc82956788e2f33cdb70d71f778 --- common/dynamic_partition_control_interface.h | 2 +- common/dynamic_partition_control_stub.cc | 2 +- common/dynamic_partition_control_stub.h | 2 +- dynamic_partition_control_android.cc | 4 ++-- dynamic_partition_control_android.h | 2 +- mock_dynamic_partition_control.h | 4 ++-- payload_consumer/postinstall_runner_action.cc | 3 ++- 7 files changed, 10 insertions(+), 9 deletions(-) diff --git a/common/dynamic_partition_control_interface.h b/common/dynamic_partition_control_interface.h index d5db9bcd..58ebfe46 100644 --- a/common/dynamic_partition_control_interface.h +++ b/common/dynamic_partition_control_interface.h @@ -87,7 +87,7 @@ class DynamicPartitionControlInterface { // After writing to new partitions, before rebooting into the new slot, call // this function to indicate writes to new partitions are done. - virtual bool FinishUpdate() = 0; + virtual bool FinishUpdate(bool powerwash_required) = 0; // Get an action to clean up previous update. // Return NoOpAction on non-Virtual A/B devices. diff --git a/common/dynamic_partition_control_stub.cc b/common/dynamic_partition_control_stub.cc index 1239eab0..903b7ee0 100644 --- a/common/dynamic_partition_control_stub.cc +++ b/common/dynamic_partition_control_stub.cc @@ -51,7 +51,7 @@ bool DynamicPartitionControlStub::PreparePartitionsForUpdate( return true; } -bool DynamicPartitionControlStub::FinishUpdate() { +bool DynamicPartitionControlStub::FinishUpdate(bool powerwash_required) { return true; } diff --git a/common/dynamic_partition_control_stub.h b/common/dynamic_partition_control_stub.h index 679d028e..d8e254ea 100644 --- a/common/dynamic_partition_control_stub.h +++ b/common/dynamic_partition_control_stub.h @@ -40,7 +40,7 @@ class DynamicPartitionControlStub : public DynamicPartitionControlInterface { bool update, uint64_t* required_size) override; - bool FinishUpdate() override; + bool FinishUpdate(bool powerwash_required) override; std::unique_ptr GetCleanupPreviousUpdateAction( BootControlInterface* boot_control, PrefsInterface* prefs, diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc index 81d0d770..09f61adb 100644 --- a/dynamic_partition_control_android.cc +++ b/dynamic_partition_control_android.cc @@ -624,11 +624,11 @@ bool DynamicPartitionControlAndroid::UpdatePartitionMetadata( return true; } -bool DynamicPartitionControlAndroid::FinishUpdate() { +bool DynamicPartitionControlAndroid::FinishUpdate(bool powerwash_required) { if (GetVirtualAbFeatureFlag().IsEnabled() && snapshot_->GetUpdateState() == UpdateState::Initiated) { LOG(INFO) << "Snapshot writes are done."; - return snapshot_->FinishedSnapshotWrites(); + return snapshot_->FinishedSnapshotWrites(powerwash_required); } return true; } diff --git a/dynamic_partition_control_android.h b/dynamic_partition_control_android.h index a11889a7..6dbe3704 100644 --- a/dynamic_partition_control_android.h +++ b/dynamic_partition_control_android.h @@ -45,7 +45,7 @@ class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { const DeltaArchiveManifest& manifest, bool update, uint64_t* required_size) override; - bool FinishUpdate() override; + bool FinishUpdate(bool powerwash_required) override; std::unique_ptr GetCleanupPreviousUpdateAction( BootControlInterface* boot_control, PrefsInterface* prefs, diff --git a/mock_dynamic_partition_control.h b/mock_dynamic_partition_control.h index b3e0c24d..169c2657 100644 --- a/mock_dynamic_partition_control.h +++ b/mock_dynamic_partition_control.h @@ -42,7 +42,7 @@ class MockDynamicPartitionControl : public DynamicPartitionControlInterface { PreparePartitionsForUpdate, bool(uint32_t, uint32_t, const DeltaArchiveManifest&, bool, uint64_t*)); MOCK_METHOD0(GetVirtualAbFeatureFlag, FeatureFlag()); - MOCK_METHOD0(FinishUpdate, bool()); + MOCK_METHOD1(FinishUpdate, bool(bool)); MOCK_METHOD0(CleanupSuccessfulUpdate, ErrorCode()); MOCK_METHOD3(GetCleanupPreviousUpdateAction, std::unique_ptr( @@ -76,7 +76,7 @@ class MockDynamicPartitionControlAndroid MOCK_METHOD0(GetDynamicPartitionsFeatureFlag, FeatureFlag()); MOCK_METHOD1(GetSuperPartitionName, std::string(uint32_t)); MOCK_METHOD0(GetVirtualAbFeatureFlag, FeatureFlag()); - MOCK_METHOD0(FinishUpdate, bool()); + MOCK_METHOD1(FinishUpdate, bool(bool)); void set_fake_mapped_devices(const std::set& fake) override { DynamicPartitionControlAndroid::set_fake_mapped_devices(fake); diff --git a/payload_consumer/postinstall_runner_action.cc b/payload_consumer/postinstall_runner_action.cc index 0f484936..c08cfc20 100644 --- a/payload_consumer/postinstall_runner_action.cc +++ b/payload_consumer/postinstall_runner_action.cc @@ -341,7 +341,8 @@ void PostinstallRunnerAction::CompletePostinstall(ErrorCode error_code) { // steps succeeded. if (error_code == ErrorCode::kSuccess) { if (install_plan_.switch_slot_on_reboot) { - if (!boot_control_->GetDynamicPartitionControl()->FinishUpdate() || + if (!boot_control_->GetDynamicPartitionControl()->FinishUpdate( + install_plan_.powerwash_required) || !boot_control_->SetActiveBootSlot(install_plan_.target_slot)) { error_code = ErrorCode::kPostinstallRunnerError; } else { From 6950e02536ca247d35ff14cfb2214ab9463a9e87 Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Tue, 24 Mar 2020 17:47:32 -0700 Subject: [PATCH 252/624] Allow forward merge on powerwash. If package schedules powerwash, snapshots on a Virtual A/B device will be deleted. Tell SnapshotManager to handle this case. Test: apply downgrade package Test: apply package with wipe Test: apply package then manually wipe Bug: 152094219 Change-Id: I8601b661924abcc82956788e2f33cdb70d71f778 (cherry picked from commit 7b3910a58bd0b8d9f0bd316fad5776ef0a8494ba) Merged-In: I8601b661924abcc82956788e2f33cdb70d71f778 --- common/dynamic_partition_control_interface.h | 2 +- common/dynamic_partition_control_stub.cc | 2 +- common/dynamic_partition_control_stub.h | 2 +- dynamic_partition_control_android.cc | 4 ++-- dynamic_partition_control_android.h | 2 +- mock_dynamic_partition_control.h | 4 ++-- payload_consumer/postinstall_runner_action.cc | 3 ++- 7 files changed, 10 insertions(+), 9 deletions(-) diff --git a/common/dynamic_partition_control_interface.h b/common/dynamic_partition_control_interface.h index d5db9bcd..58ebfe46 100644 --- a/common/dynamic_partition_control_interface.h +++ b/common/dynamic_partition_control_interface.h @@ -87,7 +87,7 @@ class DynamicPartitionControlInterface { // After writing to new partitions, before rebooting into the new slot, call // this function to indicate writes to new partitions are done. - virtual bool FinishUpdate() = 0; + virtual bool FinishUpdate(bool powerwash_required) = 0; // Get an action to clean up previous update. // Return NoOpAction on non-Virtual A/B devices. diff --git a/common/dynamic_partition_control_stub.cc b/common/dynamic_partition_control_stub.cc index 1239eab0..903b7ee0 100644 --- a/common/dynamic_partition_control_stub.cc +++ b/common/dynamic_partition_control_stub.cc @@ -51,7 +51,7 @@ bool DynamicPartitionControlStub::PreparePartitionsForUpdate( return true; } -bool DynamicPartitionControlStub::FinishUpdate() { +bool DynamicPartitionControlStub::FinishUpdate(bool powerwash_required) { return true; } diff --git a/common/dynamic_partition_control_stub.h b/common/dynamic_partition_control_stub.h index 679d028e..d8e254ea 100644 --- a/common/dynamic_partition_control_stub.h +++ b/common/dynamic_partition_control_stub.h @@ -40,7 +40,7 @@ class DynamicPartitionControlStub : public DynamicPartitionControlInterface { bool update, uint64_t* required_size) override; - bool FinishUpdate() override; + bool FinishUpdate(bool powerwash_required) override; std::unique_ptr GetCleanupPreviousUpdateAction( BootControlInterface* boot_control, PrefsInterface* prefs, diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc index 81d0d770..09f61adb 100644 --- a/dynamic_partition_control_android.cc +++ b/dynamic_partition_control_android.cc @@ -624,11 +624,11 @@ bool DynamicPartitionControlAndroid::UpdatePartitionMetadata( return true; } -bool DynamicPartitionControlAndroid::FinishUpdate() { +bool DynamicPartitionControlAndroid::FinishUpdate(bool powerwash_required) { if (GetVirtualAbFeatureFlag().IsEnabled() && snapshot_->GetUpdateState() == UpdateState::Initiated) { LOG(INFO) << "Snapshot writes are done."; - return snapshot_->FinishedSnapshotWrites(); + return snapshot_->FinishedSnapshotWrites(powerwash_required); } return true; } diff --git a/dynamic_partition_control_android.h b/dynamic_partition_control_android.h index a11889a7..6dbe3704 100644 --- a/dynamic_partition_control_android.h +++ b/dynamic_partition_control_android.h @@ -45,7 +45,7 @@ class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { const DeltaArchiveManifest& manifest, bool update, uint64_t* required_size) override; - bool FinishUpdate() override; + bool FinishUpdate(bool powerwash_required) override; std::unique_ptr GetCleanupPreviousUpdateAction( BootControlInterface* boot_control, PrefsInterface* prefs, diff --git a/mock_dynamic_partition_control.h b/mock_dynamic_partition_control.h index b3e0c24d..169c2657 100644 --- a/mock_dynamic_partition_control.h +++ b/mock_dynamic_partition_control.h @@ -42,7 +42,7 @@ class MockDynamicPartitionControl : public DynamicPartitionControlInterface { PreparePartitionsForUpdate, bool(uint32_t, uint32_t, const DeltaArchiveManifest&, bool, uint64_t*)); MOCK_METHOD0(GetVirtualAbFeatureFlag, FeatureFlag()); - MOCK_METHOD0(FinishUpdate, bool()); + MOCK_METHOD1(FinishUpdate, bool(bool)); MOCK_METHOD0(CleanupSuccessfulUpdate, ErrorCode()); MOCK_METHOD3(GetCleanupPreviousUpdateAction, std::unique_ptr( @@ -76,7 +76,7 @@ class MockDynamicPartitionControlAndroid MOCK_METHOD0(GetDynamicPartitionsFeatureFlag, FeatureFlag()); MOCK_METHOD1(GetSuperPartitionName, std::string(uint32_t)); MOCK_METHOD0(GetVirtualAbFeatureFlag, FeatureFlag()); - MOCK_METHOD0(FinishUpdate, bool()); + MOCK_METHOD1(FinishUpdate, bool(bool)); void set_fake_mapped_devices(const std::set& fake) override { DynamicPartitionControlAndroid::set_fake_mapped_devices(fake); diff --git a/payload_consumer/postinstall_runner_action.cc b/payload_consumer/postinstall_runner_action.cc index 0f484936..c08cfc20 100644 --- a/payload_consumer/postinstall_runner_action.cc +++ b/payload_consumer/postinstall_runner_action.cc @@ -341,7 +341,8 @@ void PostinstallRunnerAction::CompletePostinstall(ErrorCode error_code) { // steps succeeded. if (error_code == ErrorCode::kSuccess) { if (install_plan_.switch_slot_on_reboot) { - if (!boot_control_->GetDynamicPartitionControl()->FinishUpdate() || + if (!boot_control_->GetDynamicPartitionControl()->FinishUpdate( + install_plan_.powerwash_required) || !boot_control_->SetActiveBootSlot(install_plan_.target_slot)) { error_code = ErrorCode::kPostinstallRunnerError; } else { From 9d5a61d7c71beb655e67eaf5c685e7add8f7768d Mon Sep 17 00:00:00 2001 From: Andrew Date: Thu, 26 Mar 2020 13:40:37 -0700 Subject: [PATCH 253/624] update_engine: Fix issues reported by clang-tidy Fix uninitialized variables and some other minor issues reported by clang-tidy. BUG=chromium:982837 TEST=cros_workon_make update_engine --test Change-Id: I305dedb058c9b0787ba2f68feff42afe6810a276 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2122683 Reviewed-by: Amin Hassani Tested-by: Andrew Lassalle Commit-Queue: Andrew Lassalle --- common/hwid_override.cc | 2 -- common/subprocess.cc | 1 - common_service.cc | 2 +- common_service.h | 3 ++- dbus_service.cc | 1 - omaha_request_action.cc | 1 - omaha_request_params.cc | 2 -- p2p_manager.cc | 1 - payload_consumer/delta_performer.cc | 2 +- payload_consumer/postinstall_runner_action.cc | 1 - payload_generator/deflate_utils.cc | 2 +- payload_generator/delta_diff_utils.cc | 14 ++++++++++---- payload_generator/delta_diff_utils_unittest.cc | 2 ++ payload_generator/extent_ranges.cc | 1 - update_attempter.cc | 2 +- update_engine_client.cc | 2 +- update_manager/staging_utils.cc | 1 - 17 files changed, 19 insertions(+), 21 deletions(-) diff --git a/common/hwid_override.cc b/common/hwid_override.cc index 8800e948..1bb0f8f5 100644 --- a/common/hwid_override.cc +++ b/common/hwid_override.cc @@ -16,14 +16,12 @@ #include "update_engine/common/hwid_override.h" -#include #include #include #include #include -using std::map; using std::string; namespace chromeos_update_engine { diff --git a/common/subprocess.cc b/common/subprocess.cc index 52065030..24ad2d9b 100644 --- a/common/subprocess.cc +++ b/common/subprocess.cc @@ -36,7 +36,6 @@ #include "update_engine/common/utils.h" -using brillo::MessageLoop; using std::string; using std::unique_ptr; using std::vector; diff --git a/common_service.cc b/common_service.cc index b94e7340..347833b4 100644 --- a/common_service.cc +++ b/common_service.cc @@ -217,7 +217,7 @@ bool UpdateEngineService::GetChannel(ErrorPtr* /* error */, } bool UpdateEngineService::SetCohortHint(ErrorPtr* error, - string in_cohort_hint) { + const string& in_cohort_hint) { PrefsInterface* prefs = system_state_->prefs(); // It is ok to override the cohort hint with an invalid value since it is diff --git a/common_service.h b/common_service.h index a74c46bf..6c742a5d 100644 --- a/common_service.h +++ b/common_service.h @@ -109,7 +109,8 @@ class UpdateEngineService { // Sets the current "cohort hint" value to |in_cohort_hint|. The cohort hint // is sent back to Omaha on every request and can be used as a hint of what // cohort should we be put on. - bool SetCohortHint(brillo::ErrorPtr* error, std::string in_cohort_hint); + bool SetCohortHint(brillo::ErrorPtr* error, + const std::string& in_cohort_hint); // Return the current cohort hint. This value can be set with SetCohortHint() // and can also be updated from Omaha on every update check request. diff --git a/dbus_service.cc b/dbus_service.cc index cd714881..a5fec740 100644 --- a/dbus_service.cc +++ b/dbus_service.cc @@ -36,7 +36,6 @@ using std::vector; using update_engine::Operation; using update_engine::StatusResult; using update_engine::UpdateEngineStatus; -using update_engine::UpdateStatus; namespace { // Converts the internal |UpdateEngineStatus| to the protobuf |StatusResult|. diff --git a/omaha_request_action.cc b/omaha_request_action.cc index 8890c7c9..50fe3cc9 100644 --- a/omaha_request_action.cc +++ b/omaha_request_action.cc @@ -59,7 +59,6 @@ using base::Time; using base::TimeDelta; using chromeos_update_manager::kRollforwardInfinity; using std::map; -using std::numeric_limits; using std::string; using std::vector; diff --git a/omaha_request_params.cc b/omaha_request_params.cc index 1cfbc9c8..b6c18a6c 100644 --- a/omaha_request_params.cc +++ b/omaha_request_params.cc @@ -39,9 +39,7 @@ #define CALL_MEMBER_FN(object, member) ((object).*(member)) -using std::map; using std::string; -using std::vector; namespace chromeos_update_engine { diff --git a/p2p_manager.cc b/p2p_manager.cc index 5de91d13..00ff8cec 100644 --- a/p2p_manager.cc +++ b/p2p_manager.cc @@ -65,7 +65,6 @@ using brillo::MessageLoop; using chromeos_update_manager::EvalStatus; using chromeos_update_manager::Policy; using chromeos_update_manager::UpdateManager; -using std::map; using std::pair; using std::string; using std::unique_ptr; diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc index 3263ff7d..11cf0069 100644 --- a/payload_consumer/delta_performer.cc +++ b/payload_consumer/delta_performer.cc @@ -533,7 +533,7 @@ MetadataParseResult DeltaPerformer::ParsePayloadMetadata( #define OP_DURATION_HISTOGRAM(_op_name, _start_time) \ LOCAL_HISTOGRAM_CUSTOM_TIMES( \ "UpdateEngine.DownloadAction.InstallOperation::" _op_name ".Duration", \ - base::TimeTicks::Now() - _start_time, \ + (base::TimeTicks::Now() - _start_time), \ base::TimeDelta::FromMilliseconds(10), \ base::TimeDelta::FromMinutes(5), \ 20); diff --git a/payload_consumer/postinstall_runner_action.cc b/payload_consumer/postinstall_runner_action.cc index 264161ca..9ecda488 100644 --- a/payload_consumer/postinstall_runner_action.cc +++ b/payload_consumer/postinstall_runner_action.cc @@ -49,7 +49,6 @@ const int kPostinstallStatusFd = 3; namespace chromeos_update_engine { -using brillo::MessageLoop; using std::string; using std::vector; diff --git a/payload_generator/deflate_utils.cc b/payload_generator/deflate_utils.cc index 8db67ce0..5d7a766a 100644 --- a/payload_generator/deflate_utils.cc +++ b/payload_generator/deflate_utils.cc @@ -46,7 +46,7 @@ const uint64_t kMinimumSquashfsImageSize = 1 * 1024 * 1024; // bytes // TODO(*): Optimize this so we don't have to read all extents into memory in // case it is large. bool CopyExtentsToFile(const string& in_path, - const vector extents, + const vector& extents, const string& out_path, size_t block_size) { brillo::Blob data(utils::BlocksInExtents(extents) * block_size); diff --git a/payload_generator/delta_diff_utils.cc b/payload_generator/delta_diff_utils.cc index db69d740..53a3cf17 100644 --- a/payload_generator/delta_diff_utils.cc +++ b/payload_generator/delta_diff_utils.cc @@ -250,12 +250,13 @@ FilesystemInterface::File GetOldFile( if (old_file_iter != old_files_map.end()) return old_file_iter->second; - // No old file match for the new file name, use a similar file with the - // shortest levenshtein distance. + // No old file matches the new file name. Use a similar file with the + // shortest levenshtein distance instead. // This works great if the file has version number in it, but even for // a completely new file, using a similar file can still help. - int min_distance = new_file_name.size(); - const FilesystemInterface::File* old_file; + int min_distance = + LevenshteinDistance(new_file_name, old_files_map.begin()->first); + const FilesystemInterface::File* old_file = &old_files_map.begin()->second; for (const auto& pair : old_files_map) { int distance = LevenshteinDistance(new_file_name, pair.first); if (distance < min_distance) { @@ -580,6 +581,11 @@ bool DeltaReadFile(vector* aops, InstallOperation operation; uint64_t total_blocks = utils::BlocksInExtents(new_extents); + if (chunk_blocks == 0) { + LOG(ERROR) << "Invalid number of chunk_blocks. Cannot be 0."; + return false; + } + if (chunk_blocks == -1) chunk_blocks = total_blocks; diff --git a/payload_generator/delta_diff_utils_unittest.cc b/payload_generator/delta_diff_utils_unittest.cc index bc3dca57..8a97b1bf 100644 --- a/payload_generator/delta_diff_utils_unittest.cc +++ b/payload_generator/delta_diff_utils_unittest.cc @@ -595,6 +595,8 @@ TEST_F(DeltaDiffUtilsTest, GetOldFileTest) { "update_engine"); EXPECT_EQ(diff_utils::GetOldFile(old_files_map, "bin/delta_generator").name, "delta_generator"); + // Check file name with minimum size. + EXPECT_EQ(diff_utils::GetOldFile(old_files_map, "a").name, "filename"); } } // namespace chromeos_update_engine diff --git a/payload_generator/extent_ranges.cc b/payload_generator/extent_ranges.cc index 0e3f087e..4600efe7 100644 --- a/payload_generator/extent_ranges.cc +++ b/payload_generator/extent_ranges.cc @@ -27,7 +27,6 @@ #include "update_engine/payload_consumer/payload_constants.h" #include "update_engine/payload_generator/extent_utils.h" -using std::set; using std::vector; namespace chromeos_update_engine { diff --git a/update_attempter.cc b/update_attempter.cc index f5885c91..29d256cb 100644 --- a/update_attempter.cc +++ b/update_attempter.cc @@ -742,7 +742,7 @@ void UpdateAttempter::CalculateDlcParams() { } } std::map dlc_apps_params; - for (auto dlc_id : dlc_module_ids_) { + for (const auto& dlc_id : dlc_module_ids_) { OmahaRequestParams::AppParams dlc_params{ .active_counting_type = OmahaRequestParams::kDateBased, .name = dlc_id, diff --git a/update_engine_client.cc b/update_engine_client.cc index eb718410..a721f7a7 100644 --- a/update_engine_client.cc +++ b/update_engine_client.cc @@ -202,7 +202,7 @@ class UpdateWaitHandler : public ExitingStatusUpdateHandler { void UpdateWaitHandler::HandleStatusUpdate(const UpdateEngineStatus& status) { if (exit_on_error_ && status.status == UpdateStatus::IDLE) { - int last_attempt_error; + int last_attempt_error = static_cast(ErrorCode::kSuccess); ErrorCode code = ErrorCode::kSuccess; if (client_ && client_->GetLastAttemptError(&last_attempt_error)) code = static_cast(last_attempt_error); diff --git a/update_manager/staging_utils.cc b/update_manager/staging_utils.cc index 4835ab22..f4f685c6 100644 --- a/update_manager/staging_utils.cc +++ b/update_manager/staging_utils.cc @@ -32,7 +32,6 @@ using base::TimeDelta; using chromeos_update_engine::kPrefsWallClockStagingWaitPeriod; using chromeos_update_engine::PrefsInterface; -using chromeos_update_engine::SystemState; using policy::DevicePolicy; namespace chromeos_update_manager { From 2969290920696611a67aed184baf71cac062b416 Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Thu, 26 Mar 2020 12:47:05 -0700 Subject: [PATCH 254/624] Add DynamicPartitionControl::EraseSystemOtherAvbFooter Erase AVB footer of system other partition prior to any updates so that if an update overwrites it partially, and the device rolled back (or even before we finish writing the partition), and the device factory resets, mapping system_other as /postinstall won't trigger verity errors and reboots the device. Bug: 152444348 Test: apply update, rollback, then FDR Test: apply update, then set sys.cppreopt=requested; observe that /postinstall cannot be mounted. Change-Id: I62e5bb8f4c31d9a1beff485c47fc4b07a3a5686b --- Android.bp | 3 + dynamic_partition_control_android.cc | 214 +++++++++++++++++- dynamic_partition_control_android.h | 37 +++ dynamic_partition_control_android_unittest.cc | 141 +++++++++++- dynamic_partition_test_utils.h | 4 +- mock_dynamic_partition_control.h | 24 ++ 6 files changed, 413 insertions(+), 10 deletions(-) diff --git a/Android.bp b/Android.bp index e3116f57..07eee639 100644 --- a/Android.bp +++ b/Android.bp @@ -211,6 +211,9 @@ cc_defaults { "android.hardware.boot@1.0", "android.hardware.boot@1.1", ], + header_libs: [ + "avb_headers", + ], target: { recovery: { static_libs: [ diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc index 09f61adb..1e92f45b 100644 --- a/dynamic_partition_control_android.cc +++ b/dynamic_partition_control_android.cc @@ -32,6 +32,7 @@ #include #include #include +#include #include #include @@ -42,12 +43,14 @@ #include "update_engine/payload_consumer/delta_performer.h" using android::base::GetBoolProperty; +using android::base::GetProperty; using android::base::Join; using android::dm::DeviceMapper; using android::dm::DmDeviceState; using android::fs_mgr::CreateLogicalPartition; using android::fs_mgr::CreateLogicalPartitionParams; using android::fs_mgr::DestroyLogicalPartition; +using android::fs_mgr::Fstab; using android::fs_mgr::MetadataBuilder; using android::fs_mgr::Partition; using android::fs_mgr::PartitionOpener; @@ -64,6 +67,7 @@ constexpr char kRetrfoitDynamicPartitions[] = "ro.boot.dynamic_partitions_retrofit"; constexpr char kVirtualAbEnabled[] = "ro.virtual_ab.enabled"; constexpr char kVirtualAbRetrofit[] = "ro.virtual_ab.retrofit"; +constexpr char kPostinstallFstabPrefix[] = "ro.postinstall.fstab.prefix"; // Map timeout for dynamic partitions. constexpr std::chrono::milliseconds kMapTimeout{1000}; // Map timeout for dynamic partitions with snapshots. Since several devices @@ -401,6 +405,15 @@ bool DynamicPartitionControlAndroid::PreparePartitionsForUpdate( << "run adb enable-verity to deactivate if required and try again."; } + if (GetVirtualAbFeatureFlag().IsEnabled() && metadata_device_ == nullptr) { + metadata_device_ = snapshot_->EnsureMetadataMounted(); + TEST_AND_RETURN_FALSE(metadata_device_ != nullptr); + } + + if (update) { + TEST_AND_RETURN_FALSE(EraseSystemOtherAvbFooter(source_slot, target_slot)); + } + if (!GetDynamicPartitionsFeatureFlag().IsEnabled()) { return true; } @@ -421,11 +434,6 @@ bool DynamicPartitionControlAndroid::PreparePartitionsForUpdate( target_supports_snapshot_ = manifest.dynamic_partition_metadata().snapshot_enabled(); - if (GetVirtualAbFeatureFlag().IsEnabled()) { - metadata_device_ = snapshot_->EnsureMetadataMounted(); - TEST_AND_RETURN_FALSE(metadata_device_ != nullptr); - } - if (!update) return true; @@ -471,6 +479,202 @@ bool DynamicPartitionControlAndroid::PreparePartitionsForUpdate( source_slot, target_slot, manifest, delete_source); } +namespace { +// Try our best to erase AVB footer. +class AvbFooterEraser { + public: + explicit AvbFooterEraser(const std::string& path) : path_(path) {} + bool Erase() { + // Try to mark the block device read-only. Ignore any + // failure since this won't work when passing regular files. + ignore_result(utils::SetBlockDeviceReadOnly(path_, false /* readonly */)); + + fd_.reset(new EintrSafeFileDescriptor()); + int flags = O_WRONLY | O_TRUNC | O_CLOEXEC | O_SYNC; + TEST_AND_RETURN_FALSE(fd_->Open(path_.c_str(), flags)); + + // Need to write end-AVB_FOOTER_SIZE to end. + static_assert(AVB_FOOTER_SIZE > 0); + off64_t offset = fd_->Seek(-AVB_FOOTER_SIZE, SEEK_END); + TEST_AND_RETURN_FALSE_ERRNO(offset >= 0); + uint64_t write_size = AVB_FOOTER_SIZE; + LOG(INFO) << "Zeroing " << path_ << " @ [" << offset << ", " + << (offset + write_size) << "] (" << write_size << " bytes)"; + brillo::Blob zeros(write_size); + TEST_AND_RETURN_FALSE(utils::WriteAll(fd_, zeros.data(), zeros.size())); + return true; + } + ~AvbFooterEraser() { + TEST_AND_RETURN(fd_ != nullptr && fd_->IsOpen()); + if (!fd_->Close()) { + LOG(WARNING) << "Failed to close fd for " << path_; + } + } + + private: + std::string path_; + FileDescriptorPtr fd_; +}; + +} // namespace + +std::optional +DynamicPartitionControlAndroid::IsAvbEnabledOnSystemOther() { + auto prefix = GetProperty(kPostinstallFstabPrefix, ""); + if (prefix.empty()) { + LOG(WARNING) << "Cannot get " << kPostinstallFstabPrefix; + return std::nullopt; + } + auto path = base::FilePath(prefix).Append("etc/fstab.postinstall").value(); + return IsAvbEnabledInFstab(path); +} + +std::optional DynamicPartitionControlAndroid::IsAvbEnabledInFstab( + const std::string& path) { + Fstab fstab; + if (!ReadFstabFromFile(path, &fstab)) { + LOG(WARNING) << "Cannot read fstab from " << path; + return std::nullopt; + } + for (const auto& entry : fstab) { + if (!entry.avb_keys.empty()) { + return true; + } + } + return false; +} + +bool DynamicPartitionControlAndroid::GetSystemOtherPath( + uint32_t source_slot, + uint32_t target_slot, + const std::string& partition_name_suffix, + std::string* path, + bool* should_unmap) { + path->clear(); + *should_unmap = false; + + // In recovery, just erase no matter what. + // - On devices with retrofit dynamic partitions, no logical partitions + // should be mounted at this point. Hence it should be safe to erase. + // Otherwise, do check that AVB is enabled on system_other before erasing. + if (!IsRecovery()) { + auto has_avb = IsAvbEnabledOnSystemOther(); + TEST_AND_RETURN_FALSE(has_avb.has_value()); + if (!has_avb.value()) { + LOG(INFO) << "AVB is not enabled on system_other. Skip erasing."; + return true; + } + + // Found unexpected avb_keys for system_other on devices retrofitting + // dynamic partitions. Previous crash in update_engine may leave logical + // partitions mapped on physical system_other partition. It is difficult to + // handle these cases. Just fail. + if (GetDynamicPartitionsFeatureFlag().IsRetrofit()) { + LOG(ERROR) << "Cannot erase AVB footer on system_other on devices with " + << "retrofit dynamic partitions. They should not have AVB " + << "enabled on system_other."; + return false; + } + } + + std::string device_dir_str; + TEST_AND_RETURN_FALSE(GetDeviceDir(&device_dir_str)); + base::FilePath device_dir(device_dir_str); + + // On devices without dynamic partition, search for static partitions. + if (!GetDynamicPartitionsFeatureFlag().IsEnabled()) { + *path = device_dir.Append(partition_name_suffix).value(); + TEST_AND_RETURN_FALSE(DeviceExists(*path)); + return true; + } + + auto source_super_device = + device_dir.Append(GetSuperPartitionName(source_slot)).value(); + + auto builder = LoadMetadataBuilder(source_super_device, source_slot); + if (builder == nullptr) { + if (IsRecovery()) { + // It might be corrupted for some reason. It should still be able to + // sideload. + LOG(WARNING) << "Super partition metadata cannot be read from the source " + << "slot, skip erasing."; + return true; + } else { + // Device has booted into Android mode, indicating that the super + // partition metadata should be there. + LOG(ERROR) << "Super partition metadata cannot be read from the source " + << "slot. This is unexpected on devices with dynamic " + << "partitions enabled."; + return false; + } + } + auto p = builder->FindPartition(partition_name_suffix); + if (p == nullptr) { + // If the source slot is flashed without system_other, it does not exist + // in super partition metadata at source slot. It is safe to skip it. + LOG(INFO) << "Can't find " << partition_name_suffix + << " in metadata source slot, skip erasing."; + return true; + } + // System_other created by flashing tools should be erased. + // If partition is created by update_engine (via NewForUpdate), it is a + // left-over partition from the previous update and does not contain + // system_other, hence there is no need to erase. + // Note the reverse is not necessary true. If the flag is not set, we don't + // know if the partition is created by update_engine or by flashing tools + // because older versions of super partition metadata does not contain this + // flag. It is okay to erase the AVB footer anyways. + if (p->attributes() & LP_PARTITION_ATTR_UPDATED) { + LOG(INFO) << partition_name_suffix + << " does not contain system_other, skip erasing."; + return true; + } + + // Delete any pre-existing device with name |partition_name_suffix| and + // also remove it from |mapped_devices_|. + TEST_AND_RETURN_FALSE(UnmapPartitionOnDeviceMapper(partition_name_suffix)); + // Use CreateLogicalPartition directly to avoid mapping with existing + // snapshots. + CreateLogicalPartitionParams params = { + .block_device = source_super_device, + .metadata_slot = source_slot, + .partition_name = partition_name_suffix, + .force_writable = true, + .timeout_ms = kMapTimeout, + }; + TEST_AND_RETURN_FALSE(CreateLogicalPartition(params, path)); + *should_unmap = true; + return true; +} + +bool DynamicPartitionControlAndroid::EraseSystemOtherAvbFooter( + uint32_t source_slot, uint32_t target_slot) { + LOG(INFO) << "Erasing AVB footer of system_other partition before update."; + + const std::string target_suffix = SlotSuffixForSlotNumber(target_slot); + const std::string partition_name_suffix = "system" + target_suffix; + + std::string path; + bool should_unmap = false; + + TEST_AND_RETURN_FALSE(GetSystemOtherPath( + source_slot, target_slot, partition_name_suffix, &path, &should_unmap)); + + if (path.empty()) { + return true; + } + + bool ret = AvbFooterEraser(path).Erase(); + + // Delete |partition_name_suffix| from device mapper and from + // |mapped_devices_| again so that it does not interfere with update process. + if (should_unmap) { + TEST_AND_RETURN_FALSE(UnmapPartitionOnDeviceMapper(partition_name_suffix)); + } + + return ret; +} + bool DynamicPartitionControlAndroid::PrepareDynamicPartitionsForUpdate( uint32_t source_slot, uint32_t target_slot, diff --git a/dynamic_partition_control_android.h b/dynamic_partition_control_android.h index 6dbe3704..9dcdcf1a 100644 --- a/dynamic_partition_control_android.h +++ b/dynamic_partition_control_android.h @@ -136,6 +136,43 @@ class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { // Allow mock objects to override this to test recovery mode. virtual bool IsRecovery(); + // Determine path for system_other partition. + // |source_slot| should be current slot. + // |target_slot| should be "other" slot. + // |partition_name_suffix| should be "system" + suffix(|target_slot|). + // Return true and set |path| if successful. + // Set |path| to empty if no need to erase system_other. + // Set |should_unmap| to true if path needs to be unmapped later. + // + // Note: system_other cannot use GetPartitionDevice or + // GetDynamicPartitionDevice because: + // - super partition metadata may be loaded from the source slot + // - UPDATED flag needs to be check to skip erasing if partition is not + // created by flashing tools + // - Snapshots from previous update attempts should not be used. + virtual bool GetSystemOtherPath(uint32_t source_slot, + uint32_t target_slot, + const std::string& partition_name_suffix, + std::string* path, + bool* should_unmap); + + // Returns true if any entry in the fstab file in |path| has AVB enabled, + // false if not enabled, and nullopt for any error. + virtual std::optional IsAvbEnabledInFstab(const std::string& path); + + // Returns true if system_other has AVB enabled, false if not enabled, and + // nullopt for any error. + virtual std::optional IsAvbEnabledOnSystemOther(); + + // Erase system_other partition that may contain system_other.img. + // After the update, the content of system_other may be corrupted but with + // valid AVB footer. If the update is rolled back and factory data reset is + // triggered, system_b fails to be mapped with verity errors (see + // b/152444348). Erase the system_other so that mapping system_other is + // skipped. + virtual bool EraseSystemOtherAvbFooter(uint32_t source_slot, + uint32_t target_slot); + private: friend class DynamicPartitionControlAndroidTest; diff --git a/dynamic_partition_control_android_unittest.cc b/dynamic_partition_control_android_unittest.cc index 457ea108..20819182 100644 --- a/dynamic_partition_control_android_unittest.cc +++ b/dynamic_partition_control_android_unittest.cc @@ -23,12 +23,16 @@ #include #include #include +#include #include "update_engine/common/mock_prefs.h" +#include "update_engine/common/test_utils.h" #include "update_engine/dynamic_partition_test_utils.h" #include "update_engine/mock_dynamic_partition_control.h" using android::dm::DmDeviceState; +using chromeos_update_engine::test_utils::ScopedLoopbackDeviceBinder; +using chromeos_update_engine::test_utils::ScopedTempFile; using std::string; using testing::_; using testing::AnyNumber; @@ -36,6 +40,7 @@ using testing::AnyOf; using testing::Invoke; using testing::NiceMock; using testing::Not; +using testing::Optional; using testing::Return; namespace chromeos_update_engine { @@ -64,6 +69,9 @@ class DynamicPartitionControlAndroidTest : public ::testing::Test { *device = GetDmDevice(partition_name_suffix); return true; })); + + ON_CALL(dynamicControl(), EraseSystemOtherAvbFooter(_, _)) + .WillByDefault(Return(true)); } // Return the mocked DynamicPartitionControlInterface. @@ -90,12 +98,15 @@ class DynamicPartitionControlAndroidTest : public ::testing::Test { // Set the fake metadata to return when LoadMetadataBuilder is called on // |slot|. - void SetMetadata(uint32_t slot, const PartitionSuffixSizes& sizes) { + void SetMetadata(uint32_t slot, + const PartitionSuffixSizes& sizes, + uint32_t partition_attr = 0) { EXPECT_CALL(dynamicControl(), LoadMetadataBuilder(GetSuperDevice(slot), slot, _)) .Times(AnyNumber()) - .WillRepeatedly(Invoke([sizes](auto, auto, auto) { - return NewFakeMetadata(PartitionSuffixSizesToManifest(sizes)); + .WillRepeatedly(Invoke([sizes, partition_attr](auto, auto, auto) { + return NewFakeMetadata(PartitionSuffixSizesToManifest(sizes), + partition_attr); })); } @@ -757,4 +768,128 @@ TEST_F(DynamicPartitionControlAndroidTest, ResetUpdate) { ASSERT_TRUE(dynamicControl().ResetUpdate(&prefs)); } +TEST_F(DynamicPartitionControlAndroidTest, IsAvbNotEnabledInFstab) { + // clang-format off + std::string fstab_content = + "system /postinstall ext4 ro,nosuid,nodev,noexec slotselect_other,logical\n" // NOLINT(whitespace/line_length) + "/dev/block/by-name/system /postinstall ext4 ro,nosuid,nodev,noexec slotselect_other\n"; // NOLINT(whitespace/line_length) + // clang-format on + ScopedTempFile fstab; + ASSERT_TRUE(test_utils::WriteFileString(fstab.path(), fstab_content)); + ASSERT_THAT(dynamicControl().RealIsAvbEnabledInFstab(fstab.path()), + Optional(false)); +} + +TEST_F(DynamicPartitionControlAndroidTest, IsAvbEnabledInFstab) { + // clang-format off + std::string fstab_content = + "system /postinstall ext4 ro,nosuid,nodev,noexec slotselect_other,logical,avb_keys=/foo\n"; // NOLINT(whitespace/line_length) + // clang-format on + ScopedTempFile fstab; + ASSERT_TRUE(test_utils::WriteFileString(fstab.path(), fstab_content)); + ASSERT_THAT(dynamicControl().RealIsAvbEnabledInFstab(fstab.path()), + Optional(true)); +} + +TEST_P(DynamicPartitionControlAndroidTestP, AvbNotEnabledOnSystemOther) { + ON_CALL(dynamicControl(), GetSystemOtherPath(_, _, _, _, _)) + .WillByDefault(Invoke([&](auto source_slot, + auto target_slot, + const auto& name, + auto path, + auto should_unmap) { + return dynamicControl().RealGetSystemOtherPath( + source_slot, target_slot, name, path, should_unmap); + })); + ON_CALL(dynamicControl(), IsAvbEnabledOnSystemOther()) + .WillByDefault(Return(false)); + EXPECT_TRUE( + dynamicControl().RealEraseSystemOtherAvbFooter(source(), target())); +} + +TEST_P(DynamicPartitionControlAndroidTestP, NoSystemOtherToErase) { + SetMetadata(source(), {{S("system"), 100_MiB}}); + ON_CALL(dynamicControl(), IsAvbEnabledOnSystemOther()) + .WillByDefault(Return(true)); + std::string path; + bool should_unmap; + ASSERT_TRUE(dynamicControl().RealGetSystemOtherPath( + source(), target(), T("system"), &path, &should_unmap)); + ASSERT_TRUE(path.empty()) << path; + ASSERT_FALSE(should_unmap); + ON_CALL(dynamicControl(), GetSystemOtherPath(_, _, _, _, _)) + .WillByDefault(Invoke([&](auto source_slot, + auto target_slot, + const auto& name, + auto path, + auto should_unmap) { + return dynamicControl().RealGetSystemOtherPath( + source_slot, target_slot, name, path, should_unmap); + })); + EXPECT_TRUE( + dynamicControl().RealEraseSystemOtherAvbFooter(source(), target())); +} + +TEST_P(DynamicPartitionControlAndroidTestP, SkipEraseUpdatedSystemOther) { + PartitionSuffixSizes sizes{{S("system"), 100_MiB}, {T("system"), 100_MiB}}; + SetMetadata(source(), sizes, LP_PARTITION_ATTR_UPDATED); + ON_CALL(dynamicControl(), IsAvbEnabledOnSystemOther()) + .WillByDefault(Return(true)); + std::string path; + bool should_unmap; + ASSERT_TRUE(dynamicControl().RealGetSystemOtherPath( + source(), target(), T("system"), &path, &should_unmap)); + ASSERT_TRUE(path.empty()) << path; + ASSERT_FALSE(should_unmap); + ON_CALL(dynamicControl(), GetSystemOtherPath(_, _, _, _, _)) + .WillByDefault(Invoke([&](auto source_slot, + auto target_slot, + const auto& name, + auto path, + auto should_unmap) { + return dynamicControl().RealGetSystemOtherPath( + source_slot, target_slot, name, path, should_unmap); + })); + EXPECT_TRUE( + dynamicControl().RealEraseSystemOtherAvbFooter(source(), target())); +} + +TEST_P(DynamicPartitionControlAndroidTestP, EraseSystemOtherAvbFooter) { + constexpr uint64_t file_size = 1_MiB; + static_assert(file_size > AVB_FOOTER_SIZE); + ScopedTempFile system_other; + brillo::Blob original(file_size, 'X'); + ASSERT_TRUE(test_utils::WriteFileVector(system_other.path(), original)); + std::string mnt_path; + ScopedLoopbackDeviceBinder dev(system_other.path(), true, &mnt_path); + ASSERT_TRUE(dev.is_bound()); + + brillo::Blob device_content; + ASSERT_TRUE(utils::ReadFile(mnt_path, &device_content)); + ASSERT_EQ(original, device_content); + + PartitionSuffixSizes sizes{{S("system"), 100_MiB}, {T("system"), file_size}}; + SetMetadata(source(), sizes); + ON_CALL(dynamicControl(), IsAvbEnabledOnSystemOther()) + .WillByDefault(Return(true)); + EXPECT_CALL(dynamicControl(), + GetSystemOtherPath(source(), target(), T("system"), _, _)) + .WillRepeatedly( + Invoke([&](auto, auto, const auto&, auto path, auto should_unmap) { + *path = mnt_path; + *should_unmap = false; + return true; + })); + ASSERT_TRUE( + dynamicControl().RealEraseSystemOtherAvbFooter(source(), target())); + + device_content.clear(); + ASSERT_TRUE(utils::ReadFile(mnt_path, &device_content)); + brillo::Blob new_expected(original); + // Clear the last AVB_FOOTER_SIZE bytes. + new_expected.resize(file_size - AVB_FOOTER_SIZE); + new_expected.resize(file_size, '\0'); + ASSERT_EQ(new_expected, device_content); +} + } // namespace chromeos_update_engine diff --git a/dynamic_partition_test_utils.h b/dynamic_partition_test_utils.h index 346998fc..70a176b5 100644 --- a/dynamic_partition_test_utils.h +++ b/dynamic_partition_test_utils.h @@ -175,7 +175,7 @@ inline DeltaArchiveManifest PartitionSizesToManifest( } inline std::unique_ptr NewFakeMetadata( - const DeltaArchiveManifest& manifest) { + const DeltaArchiveManifest& manifest, uint32_t partition_attr = 0) { auto builder = MetadataBuilder::New(kDefaultSuperSize, kFakeMetadataSize, kMaxNumSlots); for (const auto& group : manifest.dynamic_partition_metadata().groups()) { @@ -183,7 +183,7 @@ inline std::unique_ptr NewFakeMetadata( for (const auto& partition_name : group.partition_names()) { EXPECT_NE( nullptr, - builder->AddPartition(partition_name, group.name(), 0 /* attr */)); + builder->AddPartition(partition_name, group.name(), partition_attr)); } } for (const auto& partition : manifest.partitions()) { diff --git a/mock_dynamic_partition_control.h b/mock_dynamic_partition_control.h index 169c2657..1e4e5fd8 100644 --- a/mock_dynamic_partition_control.h +++ b/mock_dynamic_partition_control.h @@ -77,10 +77,34 @@ class MockDynamicPartitionControlAndroid MOCK_METHOD1(GetSuperPartitionName, std::string(uint32_t)); MOCK_METHOD0(GetVirtualAbFeatureFlag, FeatureFlag()); MOCK_METHOD1(FinishUpdate, bool(bool)); + MOCK_METHOD5( + GetSystemOtherPath, + bool(uint32_t, uint32_t, const std::string&, std::string*, bool*)); + MOCK_METHOD2(EraseSystemOtherAvbFooter, bool(uint32_t, uint32_t)); + MOCK_METHOD0(IsAvbEnabledOnSystemOther, std::optional()); void set_fake_mapped_devices(const std::set& fake) override { DynamicPartitionControlAndroid::set_fake_mapped_devices(fake); } + + bool RealGetSystemOtherPath(uint32_t source_slot, + uint32_t target_slot, + const std::string& partition_name_suffix, + std::string* path, + bool* should_unmap) { + return DynamicPartitionControlAndroid::GetSystemOtherPath( + source_slot, target_slot, partition_name_suffix, path, should_unmap); + } + + bool RealEraseSystemOtherAvbFooter(uint32_t source_slot, + uint32_t target_slot) { + return DynamicPartitionControlAndroid::EraseSystemOtherAvbFooter( + source_slot, target_slot); + } + + std::optional RealIsAvbEnabledInFstab(const std::string& path) { + return DynamicPartitionControlAndroid::IsAvbEnabledInFstab(path); + } }; } // namespace chromeos_update_engine From c48581a0b26c77f900aa7b82bd13f4c6691d474f Mon Sep 17 00:00:00 2001 From: George Burgess IV Date: Wed, 1 Apr 2020 13:34:21 -0700 Subject: [PATCH 255/624] update_engine: remove an unreachable return We're trying to turn -Wunreachable-code-return on by default, and this unreachable `return` statement was flagged. The code above this is pretty clearly an infloop, so this return statement doesn't seem super useful. If someone adds a `break` or similar later, the compiler will complain (in -Werror form) about a missing `return` statement. BUG=chromium:1066998 TEST=CQ Change-Id: I9c87e256fa0c850bcb8cec17cbe694fc6bb0ef66 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2133017 Reviewed-by: Sen Jiang Tested-by: George Burgess Commit-Queue: George Burgess --- test_http_server.cc | 1 - 1 file changed, 1 deletion(-) diff --git a/test_http_server.cc b/test_http_server.cc index 4536f372..4fc89e53 100644 --- a/test_http_server.cc +++ b/test_http_server.cc @@ -658,5 +658,4 @@ int main(int argc, char** argv) { LOG(FATAL) << "ERROR on accept"; HandleConnection(client_fd); } - return 0; } From 00b6aa24548cffae6e979e99722ea2bda95be8fe Mon Sep 17 00:00:00 2001 From: hscham Date: Thu, 20 Feb 2020 12:32:06 +0900 Subject: [PATCH 256/624] update_engine: changes for libchrome r680000 uprev Changes applied include: Replace arraysize by base::size. Replace base::MessageLoop::current()->task_runner by base::ThreadTaskRunnerHandle::Get, and base::MessageLoopForIO::current by base::MessageLoopCurrent::IsSet. Remove use of base::ContainsKey. Replace base::Int{,64}ToString by base::NumberTostring. The changes are all compatible with current libchrome r576279. BUG=chromium:1054279 TEST=unittest Change-Id: Ibb6027a5070e0e2d4554a6684350168542fedf5e Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2065691 Reviewed-by: Amin Hassani Tested-by: Qijiang Fan Commit-Queue: Qijiang Fan --- client_library/client_dbus.cc | 2 +- common/cpu_limiter.cc | 2 +- common/error_code_utils.cc | 2 +- common/http_common.cc | 5 +++-- common/http_fetcher_unittest.cc | 7 ++++--- common/prefs.cc | 2 +- common/subprocess.cc | 3 ++- connection_manager.cc | 3 ++- libcurl_http_fetcher.cc | 8 ++++---- omaha_request_params.cc | 3 ++- payload_consumer/delta_performer_integration_test.cc | 3 ++- payload_consumer/delta_performer_unittest.cc | 5 +++-- payload_consumer/postinstall_runner_action.cc | 3 ++- payload_generator/extent_ranges_unittest.cc | 3 ++- payload_generator/payload_signer_unittest.cc | 5 +++-- update_engine_client.cc | 3 ++- 16 files changed, 35 insertions(+), 24 deletions(-) diff --git a/client_library/client_dbus.cc b/client_library/client_dbus.cc index f16b7591..4ec76c5b 100644 --- a/client_library/client_dbus.cc +++ b/client_library/client_dbus.cc @@ -214,7 +214,7 @@ bool DBusUpdateEngineClient::UnregisterStatusUpdateHandler( bool DBusUpdateEngineClient::RegisterStatusUpdateHandler( StatusUpdateHandler* handler) { - if (!base::MessageLoopForIO::current()) { + if (!base::MessageLoopCurrent::IsSet()) { LOG(FATAL) << "Cannot get UpdateEngineClient outside of message loop."; return false; } diff --git a/common/cpu_limiter.cc b/common/cpu_limiter.cc index 1d14764f..5f1ae6f0 100644 --- a/common/cpu_limiter.cc +++ b/common/cpu_limiter.cc @@ -67,7 +67,7 @@ bool CPULimiter::SetCpuShares(CpuShares shares) { if (shares_ == shares) return true; - std::string string_shares = base::IntToString(static_cast(shares)); + std::string string_shares = base::NumberToString(static_cast(shares)); LOG(INFO) << "Setting cgroup cpu shares to " << string_shares; if (!utils::WriteFile( kCGroupSharesPath, string_shares.c_str(), string_shares.size())) { diff --git a/common/error_code_utils.cc b/common/error_code_utils.cc index 5bcbaa40..397cdf24 100644 --- a/common/error_code_utils.cc +++ b/common/error_code_utils.cc @@ -171,7 +171,7 @@ string ErrorCodeToString(ErrorCode code) { // error codes which should be added here. } - return "Unknown error: " + base::UintToString(static_cast(code)); + return "Unknown error: " + base::NumberToString(static_cast(code)); } } // namespace utils diff --git a/common/http_common.cc b/common/http_common.cc index 5f234b0b..c8bac477 100644 --- a/common/http_common.cc +++ b/common/http_common.cc @@ -21,6 +21,7 @@ #include #include +#include namespace chromeos_update_engine { @@ -56,7 +57,7 @@ const char* GetHttpResponseDescription(HttpResponseCode code) { bool is_found = false; size_t i; - for (i = 0; i < arraysize(http_response_table); i++) + for (i = 0; i < base::size(http_response_table); i++) if ((is_found = (http_response_table[i].code == code))) break; @@ -77,7 +78,7 @@ const char* GetHttpContentTypeString(HttpContentType type) { bool is_found = false; size_t i; - for (i = 0; i < arraysize(http_content_type_table); i++) + for (i = 0; i < base::size(http_content_type_table); i++) if ((is_found = (http_content_type_table[i].type == type))) break; diff --git a/common/http_fetcher_unittest.cc b/common/http_fetcher_unittest.cc index 237ea209..589579e3 100644 --- a/common/http_fetcher_unittest.cc +++ b/common/http_fetcher_unittest.cc @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -1049,7 +1050,7 @@ TYPED_TEST(HttpFetcherTest, SimpleRedirectTest) { unique_ptr server(this->test_.CreateServer()); ASSERT_TRUE(server->started_); - for (size_t c = 0; c < arraysize(kRedirectCodes); ++c) { + for (size_t c = 0; c < base::size(kRedirectCodes); ++c) { const string url = base::StringPrintf( "/redirect/%d/download/%d", kRedirectCodes[c], kMediumLength); RedirectTest(server.get(), true, url, this->test_.NewLargeFetcher()); @@ -1066,7 +1067,7 @@ TYPED_TEST(HttpFetcherTest, MaxRedirectTest) { string url; for (int r = 0; r < kDownloadMaxRedirects; r++) { url += base::StringPrintf("/redirect/%d", - kRedirectCodes[r % arraysize(kRedirectCodes)]); + kRedirectCodes[r % base::size(kRedirectCodes)]); } url += base::StringPrintf("/download/%d", kMediumLength); RedirectTest(server.get(), true, url, this->test_.NewLargeFetcher()); @@ -1082,7 +1083,7 @@ TYPED_TEST(HttpFetcherTest, BeyondMaxRedirectTest) { string url; for (int r = 0; r < kDownloadMaxRedirects + 1; r++) { url += base::StringPrintf("/redirect/%d", - kRedirectCodes[r % arraysize(kRedirectCodes)]); + kRedirectCodes[r % base::size(kRedirectCodes)]); } url += base::StringPrintf("/download/%d", kMediumLength); RedirectTest(server.get(), false, url, this->test_.NewLargeFetcher()); diff --git a/common/prefs.cc b/common/prefs.cc index 71838619..194bbd8b 100644 --- a/common/prefs.cc +++ b/common/prefs.cc @@ -54,7 +54,7 @@ bool PrefsBase::GetInt64(const string& key, int64_t* value) const { } bool PrefsBase::SetInt64(const string& key, const int64_t value) { - return SetString(key, base::Int64ToString(value)); + return SetString(key, base::NumberToString(value)); } bool PrefsBase::GetBoolean(const string& key, bool* value) const { diff --git a/common/subprocess.cc b/common/subprocess.cc index 24ad2d9b..45dff923 100644 --- a/common/subprocess.cc +++ b/common/subprocess.cc @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -122,7 +123,7 @@ void Subprocess::OnStdoutReady(SubprocessRecord* record) { bytes_read = 0; bool eof; bool ok = utils::ReadAll( - record->stdout_fd, buf, arraysize(buf), &bytes_read, &eof); + record->stdout_fd, buf, base::size(buf), &bytes_read, &eof); record->stdout.append(buf, bytes_read); if (!ok || eof) { // There was either an error or an EOF condition, so we are done watching diff --git a/connection_manager.cc b/connection_manager.cc index ad7e5f65..fe43f37b 100644 --- a/connection_manager.cc +++ b/connection_manager.cc @@ -89,7 +89,8 @@ bool ConnectionManager::IsUpdateAllowedOver( if (device_policy->GetAllowedConnectionTypesForUpdate(&allowed_types)) { // The update setting is enforced by the device policy. - if (!base::ContainsKey(allowed_types, shill::kTypeCellular)) { + // TODO(crbug.com/1054279): Use base::Contains after uprev to r680000. + if (allowed_types.find(shill::kTypeCellular) == allowed_types.end()) { LOG(INFO) << "Disabling updates over cellular connection as it's not " "allowed in the device policy."; return false; diff --git a/libcurl_http_fetcher.cc b/libcurl_http_fetcher.cc index 4bea4eff..d317d489 100644 --- a/libcurl_http_fetcher.cc +++ b/libcurl_http_fetcher.cc @@ -80,7 +80,7 @@ int LibcurlHttpFetcher::LibcurlCloseSocketCallback(void* clientp, #endif // __ANDROID__ LibcurlHttpFetcher* fetcher = static_cast(clientp); // Stop watching the socket before closing it. - for (size_t t = 0; t < arraysize(fetcher->fd_controller_maps_); ++t) { + for (size_t t = 0; t < base::size(fetcher->fd_controller_maps_); ++t) { fetcher->fd_controller_maps_[t].erase(item); } @@ -676,7 +676,7 @@ void LibcurlHttpFetcher::SetupMessageLoopSources() { // We should iterate through all file descriptors up to libcurl's fd_max or // the highest one we're tracking, whichever is larger. - for (size_t t = 0; t < arraysize(fd_controller_maps_); ++t) { + for (size_t t = 0; t < base::size(fd_controller_maps_); ++t) { if (!fd_controller_maps_[t].empty()) fd_max = max(fd_max, fd_controller_maps_[t].rbegin()->first); } @@ -694,7 +694,7 @@ void LibcurlHttpFetcher::SetupMessageLoopSources() { is_exc || (FD_ISSET(fd, &fd_write) != 0) // track 1 -- write }; - for (size_t t = 0; t < arraysize(fd_controller_maps_); ++t) { + for (size_t t = 0; t < base::size(fd_controller_maps_); ++t) { bool tracked = fd_controller_maps_[t].find(fd) != fd_controller_maps_[t].end(); @@ -775,7 +775,7 @@ void LibcurlHttpFetcher::CleanUp() { MessageLoop::current()->CancelTask(timeout_id_); timeout_id_ = MessageLoop::kTaskIdNull; - for (size_t t = 0; t < arraysize(fd_controller_maps_); ++t) { + for (size_t t = 0; t < base::size(fd_controller_maps_); ++t) { fd_controller_maps_[t].clear(); } diff --git a/omaha_request_params.cc b/omaha_request_params.cc index b6c18a6c..88633926 100644 --- a/omaha_request_params.cc +++ b/omaha_request_params.cc @@ -25,6 +25,7 @@ #include #include +#include #include #include #include @@ -217,7 +218,7 @@ void OmahaRequestParams::set_root(const string& root) { } int OmahaRequestParams::GetChannelIndex(const string& channel) const { - for (size_t t = 0; t < arraysize(kChannelsByStability); ++t) + for (size_t t = 0; t < base::size(kChannelsByStability); ++t) if (channel == kChannelsByStability[t]) return t; diff --git a/payload_consumer/delta_performer_integration_test.cc b/payload_consumer/delta_performer_integration_test.cc index f1a492b5..af6682a4 100644 --- a/payload_consumer/delta_performer_integration_test.cc +++ b/payload_consumer/delta_performer_integration_test.cc @@ -25,6 +25,7 @@ #include #include +#include #include #include #include @@ -848,7 +849,7 @@ void VerifyPayloadResult(DeltaPerformer* performer, brillo::Blob updated_kernel_partition; EXPECT_TRUE(utils::ReadFile(state->result_kernel, &updated_kernel_partition)); - ASSERT_GE(updated_kernel_partition.size(), arraysize(kNewData)); + ASSERT_GE(updated_kernel_partition.size(), base::size(kNewData)); EXPECT_TRUE(std::equal(std::begin(kNewData), std::end(kNewData), updated_kernel_partition.begin())); diff --git a/payload_consumer/delta_performer_unittest.cc b/payload_consumer/delta_performer_unittest.cc index 47cb0e78..39011957 100644 --- a/payload_consumer/delta_performer_unittest.cc +++ b/payload_consumer/delta_performer_unittest.cc @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -715,12 +716,12 @@ TEST_F(DeltaPerformerTest, ChooseSourceFDTest) { TEST_F(DeltaPerformerTest, ExtentsToByteStringTest) { uint64_t test[] = {1, 1, 4, 2, 0, 1}; - static_assert(arraysize(test) % 2 == 0, "Array size uneven"); + static_assert(base::size(test) % 2 == 0, "Array size uneven"); const uint64_t block_size = 4096; const uint64_t file_length = 4 * block_size - 13; google::protobuf::RepeatedPtrField extents; - for (size_t i = 0; i < arraysize(test); i += 2) { + for (size_t i = 0; i < base::size(test); i += 2) { *(extents.Add()) = ExtentForRange(test[i], test[i + 1]); } diff --git a/payload_consumer/postinstall_runner_action.cc b/payload_consumer/postinstall_runner_action.cc index 9ecda488..a0b67eac 100644 --- a/payload_consumer/postinstall_runner_action.cc +++ b/payload_consumer/postinstall_runner_action.cc @@ -28,6 +28,7 @@ #include #include #include +#include #include #include @@ -229,7 +230,7 @@ void PostinstallRunnerAction::OnProgressFdReady() { bytes_read = 0; bool eof; bool ok = - utils::ReadAll(progress_fd_, buf, arraysize(buf), &bytes_read, &eof); + utils::ReadAll(progress_fd_, buf, base::size(buf), &bytes_read, &eof); progress_buffer_.append(buf, bytes_read); // Process every line. vector lines = base::SplitString( diff --git a/payload_generator/extent_ranges_unittest.cc b/payload_generator/extent_ranges_unittest.cc index 2bcffed2..326e9360 100644 --- a/payload_generator/extent_ranges_unittest.cc +++ b/payload_generator/extent_ranges_unittest.cc @@ -18,6 +18,7 @@ #include +#include #include #include "update_engine/common/test_utils.h" @@ -53,7 +54,7 @@ void ExpectRangeEq(const ExtentRanges& ranges, #define EXPECT_RANGE_EQ(ranges, var) \ do { \ - ExpectRangeEq(ranges, var, arraysize(var), __LINE__); \ + ExpectRangeEq(ranges, var, base::size(var), __LINE__); \ } while (0) void ExpectRangesOverlapOrTouch(uint64_t a_start, diff --git a/payload_generator/payload_signer_unittest.cc b/payload_generator/payload_signer_unittest.cc index 52d51bc4..f7f9c696 100644 --- a/payload_generator/payload_signer_unittest.cc +++ b/payload_generator/payload_signer_unittest.cc @@ -20,6 +20,7 @@ #include #include +#include #include #include "update_engine/common/hash_calculator.h" @@ -124,8 +125,8 @@ TEST_F(PayloadSignerTest, SignSimpleTextTest) { const Signatures_Signature& signature = signatures.signatures(0); EXPECT_EQ(1U, signature.version()); const string& sig_data = signature.data(); - ASSERT_EQ(arraysize(kDataSignature), sig_data.size()); - for (size_t i = 0; i < arraysize(kDataSignature); i++) { + ASSERT_EQ(base::size(kDataSignature), sig_data.size()); + for (size_t i = 0; i < base::size(kDataSignature); i++) { EXPECT_EQ(kDataSignature[i], static_cast(sig_data[i])); } } diff --git a/update_engine_client.cc b/update_engine_client.cc index a721f7a7..31448eaa 100644 --- a/update_engine_client.cc +++ b/update_engine_client.cc @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -86,7 +87,7 @@ class UpdateEngineClient : public brillo::Daemon { // We can't call QuitWithExitCode from OnInit(), so we delay the execution // of the ProcessFlags method after the Daemon initialization is done. - base::MessageLoop::current()->task_runner()->PostTask( + base::ThreadTaskRunnerHandle::Get()->PostTask( FROM_HERE, base::Bind(&UpdateEngineClient::ProcessFlagsAndExit, base::Unretained(this))); From 29ebd7a61c49b9f1c70d5bab99ea2fc3905c2e5d Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Mon, 6 Apr 2020 13:36:44 -0700 Subject: [PATCH 257/624] Revert "Setup android-base logging in sideload." Now that SetupLogging sets up android-base logging too, there's no need to set it up again. Bug: 153355820 Test: enter recovery and manual inspect This reverts commit c73cb8ce94c14270c7e932bedb1889f5f150f038. Change-Id: I93f1319027e2f5b52bf683af1814d71cbb4b343f --- Android.bp | 1 - sideload_logging_android.cc | 27 --------------------------- sideload_logging_android.h | 25 ------------------------- sideload_main.cc | 2 -- 4 files changed, 55 deletions(-) delete mode 100644 sideload_logging_android.cc delete mode 100644 sideload_logging_android.h diff --git a/Android.bp b/Android.bp index 07eee639..0eb2837c 100644 --- a/Android.bp +++ b/Android.bp @@ -359,7 +359,6 @@ cc_binary { "metrics_reporter_stub.cc", "metrics_utils.cc", "network_selector_stub.cc", - "sideload_logging_android.cc", "sideload_main.cc", "update_attempter_android.cc", "update_boot_flags_action.cc", diff --git a/sideload_logging_android.cc b/sideload_logging_android.cc deleted file mode 100644 index f82259f3..00000000 --- a/sideload_logging_android.cc +++ /dev/null @@ -1,27 +0,0 @@ -// -// Copyright (C) 2019 The Android Open Source Project -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -#include "update_engine/sideload_logging_android.h" - -#include - -namespace chromeos_update_engine { - -void SetupAndroidLogging(char* argv[]) { - android::base::InitLogging(argv, android::base::StdioLogger); -} - -} // namespace chromeos_update_engine diff --git a/sideload_logging_android.h b/sideload_logging_android.h deleted file mode 100644 index 0bb87146..00000000 --- a/sideload_logging_android.h +++ /dev/null @@ -1,25 +0,0 @@ -// -// Copyright (C) 2019 The Android Open Source Project -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -#pragma once - -namespace chromeos_update_engine { - -// Some depending modules uses logging functions from android-base. -// Redirect android-base logging to stdio, which redirects to /tmp/recovery.log. -void SetupAndroidLogging(char* argv[]); - -} // namespace chromeos_update_engine diff --git a/sideload_main.cc b/sideload_main.cc index 29d6f2ce..818fa5c9 100644 --- a/sideload_main.cc +++ b/sideload_main.cc @@ -36,7 +36,6 @@ #include "update_engine/common/subprocess.h" #include "update_engine/common/terminator.h" #include "update_engine/common/utils.h" -#include "update_engine/sideload_logging_android.h" #include "update_engine/update_attempter_android.h" using std::string; @@ -197,7 +196,6 @@ int main(int argc, char** argv) { chromeos_update_engine::Terminator::Init(); chromeos_update_engine::SetupLogging(); - chromeos_update_engine::SetupAndroidLogging(argv); brillo::FlagHelper::Init(argc, argv, "Update Engine Sideload"); LOG(INFO) << "Update Engine Sideloading starting"; From e757420cd6042fb68d9a3630a0d00e795c3f2ffe Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Mon, 6 Apr 2020 15:08:24 -0700 Subject: [PATCH 258/624] sideload: fix duplicated logging Also align logging behavior in sideload and regular Android. - Use logging_android.cc as well - Add a logging handler to libchrome's logging so that it won't explicitly write to stderr after calling __android_log_write. Test: sideload and manually inspect /tmp/recovery.log Bug: 153355820 Change-Id: Iffaf245e8dec598ae8fd82052e2183a35afd604e --- Android.bp | 1 + logging_android.cc | 58 ++++++++++++++++++++++++++++++++++++++++------ sideload_main.cc | 15 ++---------- 3 files changed, 54 insertions(+), 20 deletions(-) diff --git a/Android.bp b/Android.bp index 0eb2837c..3287b7b4 100644 --- a/Android.bp +++ b/Android.bp @@ -356,6 +356,7 @@ cc_binary { srcs: [ "hardware_android.cc", + "logging_android.cc", "metrics_reporter_stub.cc", "metrics_utils.cc", "network_selector_stub.cc", diff --git a/logging_android.cc b/logging_android.cc index d5aac6df..88b068bc 100644 --- a/logging_android.cc +++ b/logging_android.cc @@ -39,6 +39,12 @@ using std::string; +#ifdef _UE_SIDELOAD +constexpr bool kSideload = true; +#else +constexpr bool kSideload = false; +#endif + namespace chromeos_update_engine { namespace { @@ -141,13 +147,11 @@ class FileLogger { return; } - // libchrome add a newline character to |message|. Strip it. - std::string_view message_no_newline = + std::string_view message_str = log_message->message != nullptr ? log_message->message : ""; - ignore_result(android::base::ConsumeSuffix(&message_no_newline, "\n")); WriteToFd(GetPrefix(log_message)); - WriteToFd(message_no_newline); + WriteToFd(message_str); WriteToFd("\n"); } @@ -187,7 +191,13 @@ class CombinedLogger { public: CombinedLogger(bool log_to_system, bool log_to_file) { if (log_to_system) { - loggers_.push_back(__android_log_logd_logger); + if (kSideload) { + // No logd in sideload. Use stdout. + // recovery has already redirected stdio properly. + loggers_.push_back(__android_log_stderr_logger); + } else { + loggers_.push_back(__android_log_logd_logger); + } } if (log_to_file) { loggers_.push_back(std::move(FileLogger(SetupLogFile(kSystemLogsRoot)))); @@ -203,6 +213,39 @@ class CombinedLogger { std::vector loggers_; }; +// Redirect all libchrome logs to liblog using our custom handler that does +// not call __android_log_write and explicitly write to stderr at the same +// time. The preset CombinedLogger already writes to stderr properly. +bool RedirectToLiblog(int severity, + const char* file, + int line, + size_t message_start, + const std::string& str_newline) { + android_LogPriority priority = + (severity < 0) ? ANDROID_LOG_VERBOSE : ANDROID_LOG_UNKNOWN; + switch (severity) { + case logging::LOG_INFO: + priority = ANDROID_LOG_INFO; + break; + case logging::LOG_WARNING: + priority = ANDROID_LOG_WARN; + break; + case logging::LOG_ERROR: + priority = ANDROID_LOG_ERROR; + break; + case logging::LOG_FATAL: + priority = ANDROID_LOG_FATAL; + break; + } + std::string_view sv = str_newline; + ignore_result(android::base::ConsumeSuffix(&sv, "\n")); + std::string str(sv.data(), sv.size()); + // This will eventually be redirected to CombinedLogger. + // |tag| is ignored by CombinedLogger, so just leave it empty. + __android_log_write(priority, "" /* tag */, str.c_str()); + return true; +} + } // namespace void SetupLogging(bool log_to_system, bool log_to_file) { @@ -219,14 +262,15 @@ void SetupLogging(bool log_to_system, bool log_to_file) { // libchrome logging should not log to file. logging::LoggingSettings log_settings; log_settings.lock_log = logging::DONT_LOCK_LOG_FILE; - log_settings.logging_dest = static_cast( - logging::LOG_TO_SYSTEM_DEBUG_LOG); + log_settings.logging_dest = + static_cast(logging::LOG_NONE); log_settings.log_file = nullptr; logging::InitLogging(log_settings); logging::SetLogItems(false /* enable_process_id */, false /* enable_thread_id */, false /* enable_timestamp */, false /* enable_tickcount */); + logging::SetLogMessageHandler(&RedirectToLiblog); } } // namespace chromeos_update_engine diff --git a/sideload_main.cc b/sideload_main.cc index 818fa5c9..27967cda 100644 --- a/sideload_main.cc +++ b/sideload_main.cc @@ -20,7 +20,6 @@ #include #include -#include #include #include #include @@ -36,6 +35,7 @@ #include "update_engine/common/subprocess.h" #include "update_engine/common/terminator.h" #include "update_engine/common/utils.h" +#include "update_engine/logging.h" #include "update_engine/update_attempter_android.h" using std::string; @@ -46,17 +46,6 @@ using update_engine::UpdateStatus; namespace chromeos_update_engine { namespace { -void SetupLogging() { - string log_file; - logging::LoggingSettings log_settings; - log_settings.lock_log = logging::DONT_LOCK_LOG_FILE; - log_settings.delete_old = logging::APPEND_TO_OLD_LOG_FILE; - log_settings.log_file = nullptr; - log_settings.logging_dest = logging::LOG_TO_SYSTEM_DEBUG_LOG; - - logging::InitLogging(log_settings); -} - class SideloadDaemonState : public DaemonStateInterface, public ServiceObserverInterface { public: @@ -195,7 +184,7 @@ int main(int argc, char** argv) { DEFINE_int64(status_fd, -1, "A file descriptor to notify the update status."); chromeos_update_engine::Terminator::Init(); - chromeos_update_engine::SetupLogging(); + chromeos_update_engine::SetupLogging(true /* stderr */, false /* file */); brillo::FlagHelper::Init(argc, argv, "Update Engine Sideload"); LOG(INFO) << "Update Engine Sideloading starting"; From 38093bbb34a6af672541a79fe16a16c888fafd0c Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Mon, 6 Apr 2020 13:36:44 -0700 Subject: [PATCH 259/624] Revert "Setup android-base logging in sideload." Now that SetupLogging sets up android-base logging too, there's no need to set it up again. Bug: 153355820 Test: enter recovery and manual inspect This reverts commit c73cb8ce94c14270c7e932bedb1889f5f150f038. Change-Id: I93f1319027e2f5b52bf683af1814d71cbb4b343f (cherry picked from commit 29ebd7a61c49b9f1c70d5bab99ea2fc3905c2e5d) Merged-In: I93f1319027e2f5b52bf683af1814d71cbb4b343f --- Android.bp | 1 - sideload_logging_android.cc | 27 --------------------------- sideload_logging_android.h | 25 ------------------------- sideload_main.cc | 2 -- 4 files changed, 55 deletions(-) delete mode 100644 sideload_logging_android.cc delete mode 100644 sideload_logging_android.h diff --git a/Android.bp b/Android.bp index e3116f57..39a2121a 100644 --- a/Android.bp +++ b/Android.bp @@ -356,7 +356,6 @@ cc_binary { "metrics_reporter_stub.cc", "metrics_utils.cc", "network_selector_stub.cc", - "sideload_logging_android.cc", "sideload_main.cc", "update_attempter_android.cc", "update_boot_flags_action.cc", diff --git a/sideload_logging_android.cc b/sideload_logging_android.cc deleted file mode 100644 index f82259f3..00000000 --- a/sideload_logging_android.cc +++ /dev/null @@ -1,27 +0,0 @@ -// -// Copyright (C) 2019 The Android Open Source Project -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -#include "update_engine/sideload_logging_android.h" - -#include - -namespace chromeos_update_engine { - -void SetupAndroidLogging(char* argv[]) { - android::base::InitLogging(argv, android::base::StdioLogger); -} - -} // namespace chromeos_update_engine diff --git a/sideload_logging_android.h b/sideload_logging_android.h deleted file mode 100644 index 0bb87146..00000000 --- a/sideload_logging_android.h +++ /dev/null @@ -1,25 +0,0 @@ -// -// Copyright (C) 2019 The Android Open Source Project -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -#pragma once - -namespace chromeos_update_engine { - -// Some depending modules uses logging functions from android-base. -// Redirect android-base logging to stdio, which redirects to /tmp/recovery.log. -void SetupAndroidLogging(char* argv[]); - -} // namespace chromeos_update_engine diff --git a/sideload_main.cc b/sideload_main.cc index 29d6f2ce..818fa5c9 100644 --- a/sideload_main.cc +++ b/sideload_main.cc @@ -36,7 +36,6 @@ #include "update_engine/common/subprocess.h" #include "update_engine/common/terminator.h" #include "update_engine/common/utils.h" -#include "update_engine/sideload_logging_android.h" #include "update_engine/update_attempter_android.h" using std::string; @@ -197,7 +196,6 @@ int main(int argc, char** argv) { chromeos_update_engine::Terminator::Init(); chromeos_update_engine::SetupLogging(); - chromeos_update_engine::SetupAndroidLogging(argv); brillo::FlagHelper::Init(argc, argv, "Update Engine Sideload"); LOG(INFO) << "Update Engine Sideloading starting"; From cba9c46108cc230c20ecdebfd7d48b597a4e3faf Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Thu, 26 Mar 2020 12:47:05 -0700 Subject: [PATCH 260/624] Add DynamicPartitionControl::EraseSystemOtherAvbFooter Erase AVB footer of system other partition prior to any updates so that if an update overwrites it partially, and the device rolled back (or even before we finish writing the partition), and the device factory resets, mapping system_other as /postinstall won't trigger verity errors and reboots the device. Fixes: 152444348 Test: apply update, rollback, then FDR Test: apply update, then set sys.cppreopt=requested; observe that /postinstall cannot be mounted. Change-Id: I62e5bb8f4c31d9a1beff485c47fc4b07a3a5686b (cherry picked from commit 2969290920696611a67aed184baf71cac062b416) Merged-In: I62e5bb8f4c31d9a1beff485c47fc4b07a3a5686b --- Android.bp | 3 + dynamic_partition_control_android.cc | 214 +++++++++++++++++- dynamic_partition_control_android.h | 37 +++ dynamic_partition_control_android_unittest.cc | 141 +++++++++++- dynamic_partition_test_utils.h | 4 +- mock_dynamic_partition_control.h | 24 ++ 6 files changed, 413 insertions(+), 10 deletions(-) diff --git a/Android.bp b/Android.bp index e3116f57..07eee639 100644 --- a/Android.bp +++ b/Android.bp @@ -211,6 +211,9 @@ cc_defaults { "android.hardware.boot@1.0", "android.hardware.boot@1.1", ], + header_libs: [ + "avb_headers", + ], target: { recovery: { static_libs: [ diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc index 09f61adb..1e92f45b 100644 --- a/dynamic_partition_control_android.cc +++ b/dynamic_partition_control_android.cc @@ -32,6 +32,7 @@ #include #include #include +#include #include #include @@ -42,12 +43,14 @@ #include "update_engine/payload_consumer/delta_performer.h" using android::base::GetBoolProperty; +using android::base::GetProperty; using android::base::Join; using android::dm::DeviceMapper; using android::dm::DmDeviceState; using android::fs_mgr::CreateLogicalPartition; using android::fs_mgr::CreateLogicalPartitionParams; using android::fs_mgr::DestroyLogicalPartition; +using android::fs_mgr::Fstab; using android::fs_mgr::MetadataBuilder; using android::fs_mgr::Partition; using android::fs_mgr::PartitionOpener; @@ -64,6 +67,7 @@ constexpr char kRetrfoitDynamicPartitions[] = "ro.boot.dynamic_partitions_retrofit"; constexpr char kVirtualAbEnabled[] = "ro.virtual_ab.enabled"; constexpr char kVirtualAbRetrofit[] = "ro.virtual_ab.retrofit"; +constexpr char kPostinstallFstabPrefix[] = "ro.postinstall.fstab.prefix"; // Map timeout for dynamic partitions. constexpr std::chrono::milliseconds kMapTimeout{1000}; // Map timeout for dynamic partitions with snapshots. Since several devices @@ -401,6 +405,15 @@ bool DynamicPartitionControlAndroid::PreparePartitionsForUpdate( << "run adb enable-verity to deactivate if required and try again."; } + if (GetVirtualAbFeatureFlag().IsEnabled() && metadata_device_ == nullptr) { + metadata_device_ = snapshot_->EnsureMetadataMounted(); + TEST_AND_RETURN_FALSE(metadata_device_ != nullptr); + } + + if (update) { + TEST_AND_RETURN_FALSE(EraseSystemOtherAvbFooter(source_slot, target_slot)); + } + if (!GetDynamicPartitionsFeatureFlag().IsEnabled()) { return true; } @@ -421,11 +434,6 @@ bool DynamicPartitionControlAndroid::PreparePartitionsForUpdate( target_supports_snapshot_ = manifest.dynamic_partition_metadata().snapshot_enabled(); - if (GetVirtualAbFeatureFlag().IsEnabled()) { - metadata_device_ = snapshot_->EnsureMetadataMounted(); - TEST_AND_RETURN_FALSE(metadata_device_ != nullptr); - } - if (!update) return true; @@ -471,6 +479,202 @@ bool DynamicPartitionControlAndroid::PreparePartitionsForUpdate( source_slot, target_slot, manifest, delete_source); } +namespace { +// Try our best to erase AVB footer. +class AvbFooterEraser { + public: + explicit AvbFooterEraser(const std::string& path) : path_(path) {} + bool Erase() { + // Try to mark the block device read-only. Ignore any + // failure since this won't work when passing regular files. + ignore_result(utils::SetBlockDeviceReadOnly(path_, false /* readonly */)); + + fd_.reset(new EintrSafeFileDescriptor()); + int flags = O_WRONLY | O_TRUNC | O_CLOEXEC | O_SYNC; + TEST_AND_RETURN_FALSE(fd_->Open(path_.c_str(), flags)); + + // Need to write end-AVB_FOOTER_SIZE to end. + static_assert(AVB_FOOTER_SIZE > 0); + off64_t offset = fd_->Seek(-AVB_FOOTER_SIZE, SEEK_END); + TEST_AND_RETURN_FALSE_ERRNO(offset >= 0); + uint64_t write_size = AVB_FOOTER_SIZE; + LOG(INFO) << "Zeroing " << path_ << " @ [" << offset << ", " + << (offset + write_size) << "] (" << write_size << " bytes)"; + brillo::Blob zeros(write_size); + TEST_AND_RETURN_FALSE(utils::WriteAll(fd_, zeros.data(), zeros.size())); + return true; + } + ~AvbFooterEraser() { + TEST_AND_RETURN(fd_ != nullptr && fd_->IsOpen()); + if (!fd_->Close()) { + LOG(WARNING) << "Failed to close fd for " << path_; + } + } + + private: + std::string path_; + FileDescriptorPtr fd_; +}; + +} // namespace + +std::optional +DynamicPartitionControlAndroid::IsAvbEnabledOnSystemOther() { + auto prefix = GetProperty(kPostinstallFstabPrefix, ""); + if (prefix.empty()) { + LOG(WARNING) << "Cannot get " << kPostinstallFstabPrefix; + return std::nullopt; + } + auto path = base::FilePath(prefix).Append("etc/fstab.postinstall").value(); + return IsAvbEnabledInFstab(path); +} + +std::optional DynamicPartitionControlAndroid::IsAvbEnabledInFstab( + const std::string& path) { + Fstab fstab; + if (!ReadFstabFromFile(path, &fstab)) { + LOG(WARNING) << "Cannot read fstab from " << path; + return std::nullopt; + } + for (const auto& entry : fstab) { + if (!entry.avb_keys.empty()) { + return true; + } + } + return false; +} + +bool DynamicPartitionControlAndroid::GetSystemOtherPath( + uint32_t source_slot, + uint32_t target_slot, + const std::string& partition_name_suffix, + std::string* path, + bool* should_unmap) { + path->clear(); + *should_unmap = false; + + // In recovery, just erase no matter what. + // - On devices with retrofit dynamic partitions, no logical partitions + // should be mounted at this point. Hence it should be safe to erase. + // Otherwise, do check that AVB is enabled on system_other before erasing. + if (!IsRecovery()) { + auto has_avb = IsAvbEnabledOnSystemOther(); + TEST_AND_RETURN_FALSE(has_avb.has_value()); + if (!has_avb.value()) { + LOG(INFO) << "AVB is not enabled on system_other. Skip erasing."; + return true; + } + + // Found unexpected avb_keys for system_other on devices retrofitting + // dynamic partitions. Previous crash in update_engine may leave logical + // partitions mapped on physical system_other partition. It is difficult to + // handle these cases. Just fail. + if (GetDynamicPartitionsFeatureFlag().IsRetrofit()) { + LOG(ERROR) << "Cannot erase AVB footer on system_other on devices with " + << "retrofit dynamic partitions. They should not have AVB " + << "enabled on system_other."; + return false; + } + } + + std::string device_dir_str; + TEST_AND_RETURN_FALSE(GetDeviceDir(&device_dir_str)); + base::FilePath device_dir(device_dir_str); + + // On devices without dynamic partition, search for static partitions. + if (!GetDynamicPartitionsFeatureFlag().IsEnabled()) { + *path = device_dir.Append(partition_name_suffix).value(); + TEST_AND_RETURN_FALSE(DeviceExists(*path)); + return true; + } + + auto source_super_device = + device_dir.Append(GetSuperPartitionName(source_slot)).value(); + + auto builder = LoadMetadataBuilder(source_super_device, source_slot); + if (builder == nullptr) { + if (IsRecovery()) { + // It might be corrupted for some reason. It should still be able to + // sideload. + LOG(WARNING) << "Super partition metadata cannot be read from the source " + << "slot, skip erasing."; + return true; + } else { + // Device has booted into Android mode, indicating that the super + // partition metadata should be there. + LOG(ERROR) << "Super partition metadata cannot be read from the source " + << "slot. This is unexpected on devices with dynamic " + << "partitions enabled."; + return false; + } + } + auto p = builder->FindPartition(partition_name_suffix); + if (p == nullptr) { + // If the source slot is flashed without system_other, it does not exist + // in super partition metadata at source slot. It is safe to skip it. + LOG(INFO) << "Can't find " << partition_name_suffix + << " in metadata source slot, skip erasing."; + return true; + } + // System_other created by flashing tools should be erased. + // If partition is created by update_engine (via NewForUpdate), it is a + // left-over partition from the previous update and does not contain + // system_other, hence there is no need to erase. + // Note the reverse is not necessary true. If the flag is not set, we don't + // know if the partition is created by update_engine or by flashing tools + // because older versions of super partition metadata does not contain this + // flag. It is okay to erase the AVB footer anyways. + if (p->attributes() & LP_PARTITION_ATTR_UPDATED) { + LOG(INFO) << partition_name_suffix + << " does not contain system_other, skip erasing."; + return true; + } + + // Delete any pre-existing device with name |partition_name_suffix| and + // also remove it from |mapped_devices_|. + TEST_AND_RETURN_FALSE(UnmapPartitionOnDeviceMapper(partition_name_suffix)); + // Use CreateLogicalPartition directly to avoid mapping with existing + // snapshots. + CreateLogicalPartitionParams params = { + .block_device = source_super_device, + .metadata_slot = source_slot, + .partition_name = partition_name_suffix, + .force_writable = true, + .timeout_ms = kMapTimeout, + }; + TEST_AND_RETURN_FALSE(CreateLogicalPartition(params, path)); + *should_unmap = true; + return true; +} + +bool DynamicPartitionControlAndroid::EraseSystemOtherAvbFooter( + uint32_t source_slot, uint32_t target_slot) { + LOG(INFO) << "Erasing AVB footer of system_other partition before update."; + + const std::string target_suffix = SlotSuffixForSlotNumber(target_slot); + const std::string partition_name_suffix = "system" + target_suffix; + + std::string path; + bool should_unmap = false; + + TEST_AND_RETURN_FALSE(GetSystemOtherPath( + source_slot, target_slot, partition_name_suffix, &path, &should_unmap)); + + if (path.empty()) { + return true; + } + + bool ret = AvbFooterEraser(path).Erase(); + + // Delete |partition_name_suffix| from device mapper and from + // |mapped_devices_| again so that it does not interfere with update process. + if (should_unmap) { + TEST_AND_RETURN_FALSE(UnmapPartitionOnDeviceMapper(partition_name_suffix)); + } + + return ret; +} + bool DynamicPartitionControlAndroid::PrepareDynamicPartitionsForUpdate( uint32_t source_slot, uint32_t target_slot, diff --git a/dynamic_partition_control_android.h b/dynamic_partition_control_android.h index 6dbe3704..9dcdcf1a 100644 --- a/dynamic_partition_control_android.h +++ b/dynamic_partition_control_android.h @@ -136,6 +136,43 @@ class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { // Allow mock objects to override this to test recovery mode. virtual bool IsRecovery(); + // Determine path for system_other partition. + // |source_slot| should be current slot. + // |target_slot| should be "other" slot. + // |partition_name_suffix| should be "system" + suffix(|target_slot|). + // Return true and set |path| if successful. + // Set |path| to empty if no need to erase system_other. + // Set |should_unmap| to true if path needs to be unmapped later. + // + // Note: system_other cannot use GetPartitionDevice or + // GetDynamicPartitionDevice because: + // - super partition metadata may be loaded from the source slot + // - UPDATED flag needs to be check to skip erasing if partition is not + // created by flashing tools + // - Snapshots from previous update attempts should not be used. + virtual bool GetSystemOtherPath(uint32_t source_slot, + uint32_t target_slot, + const std::string& partition_name_suffix, + std::string* path, + bool* should_unmap); + + // Returns true if any entry in the fstab file in |path| has AVB enabled, + // false if not enabled, and nullopt for any error. + virtual std::optional IsAvbEnabledInFstab(const std::string& path); + + // Returns true if system_other has AVB enabled, false if not enabled, and + // nullopt for any error. + virtual std::optional IsAvbEnabledOnSystemOther(); + + // Erase system_other partition that may contain system_other.img. + // After the update, the content of system_other may be corrupted but with + // valid AVB footer. If the update is rolled back and factory data reset is + // triggered, system_b fails to be mapped with verity errors (see + // b/152444348). Erase the system_other so that mapping system_other is + // skipped. + virtual bool EraseSystemOtherAvbFooter(uint32_t source_slot, + uint32_t target_slot); + private: friend class DynamicPartitionControlAndroidTest; diff --git a/dynamic_partition_control_android_unittest.cc b/dynamic_partition_control_android_unittest.cc index 457ea108..20819182 100644 --- a/dynamic_partition_control_android_unittest.cc +++ b/dynamic_partition_control_android_unittest.cc @@ -23,12 +23,16 @@ #include #include #include +#include #include "update_engine/common/mock_prefs.h" +#include "update_engine/common/test_utils.h" #include "update_engine/dynamic_partition_test_utils.h" #include "update_engine/mock_dynamic_partition_control.h" using android::dm::DmDeviceState; +using chromeos_update_engine::test_utils::ScopedLoopbackDeviceBinder; +using chromeos_update_engine::test_utils::ScopedTempFile; using std::string; using testing::_; using testing::AnyNumber; @@ -36,6 +40,7 @@ using testing::AnyOf; using testing::Invoke; using testing::NiceMock; using testing::Not; +using testing::Optional; using testing::Return; namespace chromeos_update_engine { @@ -64,6 +69,9 @@ class DynamicPartitionControlAndroidTest : public ::testing::Test { *device = GetDmDevice(partition_name_suffix); return true; })); + + ON_CALL(dynamicControl(), EraseSystemOtherAvbFooter(_, _)) + .WillByDefault(Return(true)); } // Return the mocked DynamicPartitionControlInterface. @@ -90,12 +98,15 @@ class DynamicPartitionControlAndroidTest : public ::testing::Test { // Set the fake metadata to return when LoadMetadataBuilder is called on // |slot|. - void SetMetadata(uint32_t slot, const PartitionSuffixSizes& sizes) { + void SetMetadata(uint32_t slot, + const PartitionSuffixSizes& sizes, + uint32_t partition_attr = 0) { EXPECT_CALL(dynamicControl(), LoadMetadataBuilder(GetSuperDevice(slot), slot, _)) .Times(AnyNumber()) - .WillRepeatedly(Invoke([sizes](auto, auto, auto) { - return NewFakeMetadata(PartitionSuffixSizesToManifest(sizes)); + .WillRepeatedly(Invoke([sizes, partition_attr](auto, auto, auto) { + return NewFakeMetadata(PartitionSuffixSizesToManifest(sizes), + partition_attr); })); } @@ -757,4 +768,128 @@ TEST_F(DynamicPartitionControlAndroidTest, ResetUpdate) { ASSERT_TRUE(dynamicControl().ResetUpdate(&prefs)); } +TEST_F(DynamicPartitionControlAndroidTest, IsAvbNotEnabledInFstab) { + // clang-format off + std::string fstab_content = + "system /postinstall ext4 ro,nosuid,nodev,noexec slotselect_other,logical\n" // NOLINT(whitespace/line_length) + "/dev/block/by-name/system /postinstall ext4 ro,nosuid,nodev,noexec slotselect_other\n"; // NOLINT(whitespace/line_length) + // clang-format on + ScopedTempFile fstab; + ASSERT_TRUE(test_utils::WriteFileString(fstab.path(), fstab_content)); + ASSERT_THAT(dynamicControl().RealIsAvbEnabledInFstab(fstab.path()), + Optional(false)); +} + +TEST_F(DynamicPartitionControlAndroidTest, IsAvbEnabledInFstab) { + // clang-format off + std::string fstab_content = + "system /postinstall ext4 ro,nosuid,nodev,noexec slotselect_other,logical,avb_keys=/foo\n"; // NOLINT(whitespace/line_length) + // clang-format on + ScopedTempFile fstab; + ASSERT_TRUE(test_utils::WriteFileString(fstab.path(), fstab_content)); + ASSERT_THAT(dynamicControl().RealIsAvbEnabledInFstab(fstab.path()), + Optional(true)); +} + +TEST_P(DynamicPartitionControlAndroidTestP, AvbNotEnabledOnSystemOther) { + ON_CALL(dynamicControl(), GetSystemOtherPath(_, _, _, _, _)) + .WillByDefault(Invoke([&](auto source_slot, + auto target_slot, + const auto& name, + auto path, + auto should_unmap) { + return dynamicControl().RealGetSystemOtherPath( + source_slot, target_slot, name, path, should_unmap); + })); + ON_CALL(dynamicControl(), IsAvbEnabledOnSystemOther()) + .WillByDefault(Return(false)); + EXPECT_TRUE( + dynamicControl().RealEraseSystemOtherAvbFooter(source(), target())); +} + +TEST_P(DynamicPartitionControlAndroidTestP, NoSystemOtherToErase) { + SetMetadata(source(), {{S("system"), 100_MiB}}); + ON_CALL(dynamicControl(), IsAvbEnabledOnSystemOther()) + .WillByDefault(Return(true)); + std::string path; + bool should_unmap; + ASSERT_TRUE(dynamicControl().RealGetSystemOtherPath( + source(), target(), T("system"), &path, &should_unmap)); + ASSERT_TRUE(path.empty()) << path; + ASSERT_FALSE(should_unmap); + ON_CALL(dynamicControl(), GetSystemOtherPath(_, _, _, _, _)) + .WillByDefault(Invoke([&](auto source_slot, + auto target_slot, + const auto& name, + auto path, + auto should_unmap) { + return dynamicControl().RealGetSystemOtherPath( + source_slot, target_slot, name, path, should_unmap); + })); + EXPECT_TRUE( + dynamicControl().RealEraseSystemOtherAvbFooter(source(), target())); +} + +TEST_P(DynamicPartitionControlAndroidTestP, SkipEraseUpdatedSystemOther) { + PartitionSuffixSizes sizes{{S("system"), 100_MiB}, {T("system"), 100_MiB}}; + SetMetadata(source(), sizes, LP_PARTITION_ATTR_UPDATED); + ON_CALL(dynamicControl(), IsAvbEnabledOnSystemOther()) + .WillByDefault(Return(true)); + std::string path; + bool should_unmap; + ASSERT_TRUE(dynamicControl().RealGetSystemOtherPath( + source(), target(), T("system"), &path, &should_unmap)); + ASSERT_TRUE(path.empty()) << path; + ASSERT_FALSE(should_unmap); + ON_CALL(dynamicControl(), GetSystemOtherPath(_, _, _, _, _)) + .WillByDefault(Invoke([&](auto source_slot, + auto target_slot, + const auto& name, + auto path, + auto should_unmap) { + return dynamicControl().RealGetSystemOtherPath( + source_slot, target_slot, name, path, should_unmap); + })); + EXPECT_TRUE( + dynamicControl().RealEraseSystemOtherAvbFooter(source(), target())); +} + +TEST_P(DynamicPartitionControlAndroidTestP, EraseSystemOtherAvbFooter) { + constexpr uint64_t file_size = 1_MiB; + static_assert(file_size > AVB_FOOTER_SIZE); + ScopedTempFile system_other; + brillo::Blob original(file_size, 'X'); + ASSERT_TRUE(test_utils::WriteFileVector(system_other.path(), original)); + std::string mnt_path; + ScopedLoopbackDeviceBinder dev(system_other.path(), true, &mnt_path); + ASSERT_TRUE(dev.is_bound()); + + brillo::Blob device_content; + ASSERT_TRUE(utils::ReadFile(mnt_path, &device_content)); + ASSERT_EQ(original, device_content); + + PartitionSuffixSizes sizes{{S("system"), 100_MiB}, {T("system"), file_size}}; + SetMetadata(source(), sizes); + ON_CALL(dynamicControl(), IsAvbEnabledOnSystemOther()) + .WillByDefault(Return(true)); + EXPECT_CALL(dynamicControl(), + GetSystemOtherPath(source(), target(), T("system"), _, _)) + .WillRepeatedly( + Invoke([&](auto, auto, const auto&, auto path, auto should_unmap) { + *path = mnt_path; + *should_unmap = false; + return true; + })); + ASSERT_TRUE( + dynamicControl().RealEraseSystemOtherAvbFooter(source(), target())); + + device_content.clear(); + ASSERT_TRUE(utils::ReadFile(mnt_path, &device_content)); + brillo::Blob new_expected(original); + // Clear the last AVB_FOOTER_SIZE bytes. + new_expected.resize(file_size - AVB_FOOTER_SIZE); + new_expected.resize(file_size, '\0'); + ASSERT_EQ(new_expected, device_content); +} + } // namespace chromeos_update_engine diff --git a/dynamic_partition_test_utils.h b/dynamic_partition_test_utils.h index 346998fc..70a176b5 100644 --- a/dynamic_partition_test_utils.h +++ b/dynamic_partition_test_utils.h @@ -175,7 +175,7 @@ inline DeltaArchiveManifest PartitionSizesToManifest( } inline std::unique_ptr NewFakeMetadata( - const DeltaArchiveManifest& manifest) { + const DeltaArchiveManifest& manifest, uint32_t partition_attr = 0) { auto builder = MetadataBuilder::New(kDefaultSuperSize, kFakeMetadataSize, kMaxNumSlots); for (const auto& group : manifest.dynamic_partition_metadata().groups()) { @@ -183,7 +183,7 @@ inline std::unique_ptr NewFakeMetadata( for (const auto& partition_name : group.partition_names()) { EXPECT_NE( nullptr, - builder->AddPartition(partition_name, group.name(), 0 /* attr */)); + builder->AddPartition(partition_name, group.name(), partition_attr)); } } for (const auto& partition : manifest.partitions()) { diff --git a/mock_dynamic_partition_control.h b/mock_dynamic_partition_control.h index 169c2657..1e4e5fd8 100644 --- a/mock_dynamic_partition_control.h +++ b/mock_dynamic_partition_control.h @@ -77,10 +77,34 @@ class MockDynamicPartitionControlAndroid MOCK_METHOD1(GetSuperPartitionName, std::string(uint32_t)); MOCK_METHOD0(GetVirtualAbFeatureFlag, FeatureFlag()); MOCK_METHOD1(FinishUpdate, bool(bool)); + MOCK_METHOD5( + GetSystemOtherPath, + bool(uint32_t, uint32_t, const std::string&, std::string*, bool*)); + MOCK_METHOD2(EraseSystemOtherAvbFooter, bool(uint32_t, uint32_t)); + MOCK_METHOD0(IsAvbEnabledOnSystemOther, std::optional()); void set_fake_mapped_devices(const std::set& fake) override { DynamicPartitionControlAndroid::set_fake_mapped_devices(fake); } + + bool RealGetSystemOtherPath(uint32_t source_slot, + uint32_t target_slot, + const std::string& partition_name_suffix, + std::string* path, + bool* should_unmap) { + return DynamicPartitionControlAndroid::GetSystemOtherPath( + source_slot, target_slot, partition_name_suffix, path, should_unmap); + } + + bool RealEraseSystemOtherAvbFooter(uint32_t source_slot, + uint32_t target_slot) { + return DynamicPartitionControlAndroid::EraseSystemOtherAvbFooter( + source_slot, target_slot); + } + + std::optional RealIsAvbEnabledInFstab(const std::string& path) { + return DynamicPartitionControlAndroid::IsAvbEnabledInFstab(path); + } }; } // namespace chromeos_update_engine From 700e6b0b4e9896ada433d48b7a2cb2ad468eb173 Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Fri, 3 Apr 2020 11:31:50 -0700 Subject: [PATCH 261/624] Allow to skip mounting metadata in recovery. After factory data reset, metadata has no valid ext4 fs, and it is not formatted when recovery is started. Hence, it is possible that recovery can't mount metadata. Use fallback path for sideloading full OTAs on Virtual A/B devices in this case. Test: the following: fastboot reboot fastboot -w fastboot reboot recovery adb root adb shell mount -t ext4 /dev/block/by-name/metadata /metadata # fails adb reboot sideload adb sideload ota.zip # successful Bug: 152352037 Change-Id: I51ae3e5918b0c00054f309832c45823d80e46c69 (cherry picked from commit 77141004ba643a1d6b9d8a663cfb0791c2fe6b92) Merged-In: I51ae3e5918b0c00054f309832c45823d80e46c69 --- cleanup_previous_update_action.cc | 5 +- dynamic_partition_control_android.cc | 86 +++++++++++++++++++++++----- dynamic_partition_control_android.h | 22 +++++++ 3 files changed, 98 insertions(+), 15 deletions(-) diff --git a/cleanup_previous_update_action.cc b/cleanup_previous_update_action.cc index ee689472..26cc6be9 100644 --- a/cleanup_previous_update_action.cc +++ b/cleanup_previous_update_action.cc @@ -160,7 +160,10 @@ void CleanupPreviousUpdateAction::CheckSlotMarkedSuccessfulOrSchedule() { if (metadata_device_ == nullptr) { LOG(ERROR) << "Failed to mount /metadata."; - processor_->ActionComplete(this, ErrorCode::kError); + // If metadata is erased but not formatted, it is possible to not mount + // it in recovery. It is safe to skip CleanupPreviousUpdateAction. + processor_->ActionComplete( + this, kIsRecovery ? ErrorCode::kSuccess : ErrorCode::kError); return; } diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc index 1e92f45b..a310f209 100644 --- a/dynamic_partition_control_android.cc +++ b/dynamic_partition_control_android.cc @@ -152,10 +152,12 @@ bool DynamicPartitionControlAndroid::MapPartitionInternal( }; bool success = false; if (GetVirtualAbFeatureFlag().IsEnabled() && target_supports_snapshot_ && - force_writable) { + force_writable && ExpectMetadataMounted()) { // Only target partitions are mapped with force_writable. On Virtual // A/B devices, target partitions may overlap with source partitions, so // they must be mapped with snapshot. + // One exception is when /metadata is not mounted. Fallback to + // CreateLogicalPartition as snapshots are not created in the first place. params.timeout_ms = kMapSnapshotTimeout; success = snapshot_->MapUpdateSnapshot(params, path); } else { @@ -232,8 +234,11 @@ bool DynamicPartitionControlAndroid::UnmapPartitionOnDeviceMapper( // On a Virtual A/B device, |target_partition_name| may be a leftover from // a paused update. Clean up any underlying devices. - if (GetVirtualAbFeatureFlag().IsEnabled()) { + if (ExpectMetadataMounted()) { success &= snapshot_->UnmapUpdateSnapshot(target_partition_name); + } else { + LOG(INFO) << "Skip UnmapUpdateSnapshot(" << target_partition_name + << ") because metadata is not mounted"; } if (!success) { @@ -405,10 +410,10 @@ bool DynamicPartitionControlAndroid::PreparePartitionsForUpdate( << "run adb enable-verity to deactivate if required and try again."; } - if (GetVirtualAbFeatureFlag().IsEnabled() && metadata_device_ == nullptr) { - metadata_device_ = snapshot_->EnsureMetadataMounted(); - TEST_AND_RETURN_FALSE(metadata_device_ != nullptr); - } + // If metadata is erased but not formatted, it is possible to not mount + // it in recovery. It is acceptable to skip mounting and choose fallback path + // (PrepareDynamicPartitionsForUpdate) when sideloading full OTAs. + TEST_AND_RETURN_FALSE(EnsureMetadataMounted() || IsRecovery()); if (update) { TEST_AND_RETURN_FALSE(EraseSystemOtherAvbFooter(source_slot, target_slot)); @@ -469,9 +474,18 @@ bool DynamicPartitionControlAndroid::PreparePartitionsForUpdate( << "snapshots."; } - if (!snapshot_->CancelUpdate()) { - LOG(ERROR) << "Cannot cancel previous update."; - return false; + // In recovery, if /metadata is not mounted, it is likely that metadata + // partition is erased and not formatted yet. After sideloading, when + // rebooting into the new version, init will erase metadata partition, + // hence the failure of CancelUpdate() can be ignored here. + // However, if metadata is mounted and CancelUpdate fails, sideloading + // should not proceed because during next boot, snapshots will overlay on + // the devices incorrectly. + if (ExpectMetadataMounted()) { + TEST_AND_RETURN_FALSE(snapshot_->CancelUpdate()); + } else { + LOG(INFO) << "Skip canceling previous update because metadata is not " + << "mounted"; } } @@ -632,6 +646,9 @@ bool DynamicPartitionControlAndroid::GetSystemOtherPath( // Delete any pre-existing device with name |partition_name_suffix| and // also remove it from |mapped_devices_|. + // In recovery, metadata might not be mounted, and + // UnmapPartitionOnDeviceMapper might fail. However, + // it is unusual that system_other has already been mapped. Hence, just skip. TEST_AND_RETURN_FALSE(UnmapPartitionOnDeviceMapper(partition_name_suffix)); // Use CreateLogicalPartition directly to avoid mapping with existing // snapshots. @@ -668,6 +685,10 @@ bool DynamicPartitionControlAndroid::EraseSystemOtherAvbFooter( // Delete |partition_name_suffix| from device mapper and from // |mapped_devices_| again so that it does not interfere with update process. + // In recovery, metadata might not be mounted, and + // UnmapPartitionOnDeviceMapper might fail. However, DestroyLogicalPartition + // should be called. If DestroyLogicalPartition does fail, it is still okay + // to skip the error here and let Prepare*() fail later. if (should_unmap) { TEST_AND_RETURN_FALSE(UnmapPartitionOnDeviceMapper(partition_name_suffix)); } @@ -726,6 +747,7 @@ bool DynamicPartitionControlAndroid::PrepareSnapshotPartitionsForUpdate( uint32_t target_slot, const DeltaArchiveManifest& manifest, uint64_t* required_size) { + TEST_AND_RETURN_FALSE(ExpectMetadataMounted()); if (!snapshot_->BeginUpdate()) { LOG(ERROR) << "Cannot begin new update."; return false; @@ -829,10 +851,14 @@ bool DynamicPartitionControlAndroid::UpdatePartitionMetadata( } bool DynamicPartitionControlAndroid::FinishUpdate(bool powerwash_required) { - if (GetVirtualAbFeatureFlag().IsEnabled() && - snapshot_->GetUpdateState() == UpdateState::Initiated) { - LOG(INFO) << "Snapshot writes are done."; - return snapshot_->FinishedSnapshotWrites(powerwash_required); + if (ExpectMetadataMounted()) { + if (snapshot_->GetUpdateState() == UpdateState::Initiated) { + LOG(INFO) << "Snapshot writes are done."; + return snapshot_->FinishedSnapshotWrites(powerwash_required); + } + } else { + LOG(INFO) << "Skip FinishedSnapshotWrites() because /metadata is not " + << "mounted"; } return true; } @@ -1006,9 +1032,41 @@ bool DynamicPartitionControlAndroid::ResetUpdate(PrefsInterface* prefs) { TEST_AND_RETURN_FALSE(DeltaPerformer::ResetUpdateProgress( prefs, false /* quick */, false /* skip dynamic partitions metadata */)); - TEST_AND_RETURN_FALSE(snapshot_->CancelUpdate()); + if (ExpectMetadataMounted()) { + TEST_AND_RETURN_FALSE(snapshot_->CancelUpdate()); + } else { + LOG(INFO) << "Skip cancelling update in ResetUpdate because /metadata is " + << "not mounted"; + } return true; } +bool DynamicPartitionControlAndroid::ExpectMetadataMounted() { + // No need to mount metadata for non-Virtual A/B devices. + if (!GetVirtualAbFeatureFlag().IsEnabled()) { + return false; + } + // Intentionally not checking |metadata_device_| in Android mode. + // /metadata should always be mounted in Android mode. If it isn't, let caller + // fails when calling into SnapshotManager. + if (!IsRecovery()) { + return true; + } + // In recovery mode, explicitly check |metadata_device_|. + return metadata_device_ != nullptr; +} + +bool DynamicPartitionControlAndroid::EnsureMetadataMounted() { + // No need to mount metadata for non-Virtual A/B devices. + if (!GetVirtualAbFeatureFlag().IsEnabled()) { + return true; + } + + if (metadata_device_ == nullptr) { + metadata_device_ = snapshot_->EnsureMetadataMounted(); + } + return metadata_device_ != nullptr; +} + } // namespace chromeos_update_engine diff --git a/dynamic_partition_control_android.h b/dynamic_partition_control_android.h index 9dcdcf1a..8ad75933 100644 --- a/dynamic_partition_control_android.h +++ b/dynamic_partition_control_android.h @@ -233,6 +233,28 @@ class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { uint32_t source_slot, const DeltaArchiveManifest& manifest); + // Returns true if metadata is expected to be mounted, false otherwise. + // Note that it returns false on non-Virtual A/B devices. + // + // Almost all functions of SnapshotManager depends on metadata being mounted. + // - In Android mode for Virtual A/B devices, assume it is mounted. If not, + // let caller fails when calling into SnapshotManager. + // - In recovery for Virtual A/B devices, it is possible that metadata is not + // formatted, hence it cannot be mounted. Caller should not call into + // SnapshotManager. + // - On non-Virtual A/B devices, updates do not depend on metadata partition. + // Caller should not call into SnapshotManager. + // + // This function does NOT mount metadata partition. Use EnsureMetadataMounted + // to mount metadata partition. + bool ExpectMetadataMounted(); + + // Ensure /metadata is mounted. Returns true if successful, false otherwise. + // + // Note that this function returns true on non-Virtual A/B devices without + // doing anything. + bool EnsureMetadataMounted(); + std::set mapped_devices_; const FeatureFlag dynamic_partitions_; const FeatureFlag virtual_ab_; From a73445f65eb8b639a279ceb58a067b916f59a1e9 Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Mon, 6 Apr 2020 15:08:24 -0700 Subject: [PATCH 262/624] sideload: fix duplicated logging Also align logging behavior in sideload and regular Android. - Use logging_android.cc as well - Add a logging handler to libchrome's logging so that it won't explicitly write to stderr after calling __android_log_write. Test: sideload and manually inspect /tmp/recovery.log Fixes: 153355820 Change-Id: Iffaf245e8dec598ae8fd82052e2183a35afd604e (cherry picked from commit e757420cd6042fb68d9a3630a0d00e795c3f2ffe) Merged-In: Iffaf245e8dec598ae8fd82052e2183a35afd604e --- Android.bp | 1 + logging_android.cc | 58 ++++++++++++++++++++++++++++++++++++++++------ sideload_main.cc | 15 ++---------- 3 files changed, 54 insertions(+), 20 deletions(-) diff --git a/Android.bp b/Android.bp index 39a2121a..0d5469c0 100644 --- a/Android.bp +++ b/Android.bp @@ -353,6 +353,7 @@ cc_binary { srcs: [ "hardware_android.cc", + "logging_android.cc", "metrics_reporter_stub.cc", "metrics_utils.cc", "network_selector_stub.cc", diff --git a/logging_android.cc b/logging_android.cc index d5aac6df..88b068bc 100644 --- a/logging_android.cc +++ b/logging_android.cc @@ -39,6 +39,12 @@ using std::string; +#ifdef _UE_SIDELOAD +constexpr bool kSideload = true; +#else +constexpr bool kSideload = false; +#endif + namespace chromeos_update_engine { namespace { @@ -141,13 +147,11 @@ class FileLogger { return; } - // libchrome add a newline character to |message|. Strip it. - std::string_view message_no_newline = + std::string_view message_str = log_message->message != nullptr ? log_message->message : ""; - ignore_result(android::base::ConsumeSuffix(&message_no_newline, "\n")); WriteToFd(GetPrefix(log_message)); - WriteToFd(message_no_newline); + WriteToFd(message_str); WriteToFd("\n"); } @@ -187,7 +191,13 @@ class CombinedLogger { public: CombinedLogger(bool log_to_system, bool log_to_file) { if (log_to_system) { - loggers_.push_back(__android_log_logd_logger); + if (kSideload) { + // No logd in sideload. Use stdout. + // recovery has already redirected stdio properly. + loggers_.push_back(__android_log_stderr_logger); + } else { + loggers_.push_back(__android_log_logd_logger); + } } if (log_to_file) { loggers_.push_back(std::move(FileLogger(SetupLogFile(kSystemLogsRoot)))); @@ -203,6 +213,39 @@ class CombinedLogger { std::vector loggers_; }; +// Redirect all libchrome logs to liblog using our custom handler that does +// not call __android_log_write and explicitly write to stderr at the same +// time. The preset CombinedLogger already writes to stderr properly. +bool RedirectToLiblog(int severity, + const char* file, + int line, + size_t message_start, + const std::string& str_newline) { + android_LogPriority priority = + (severity < 0) ? ANDROID_LOG_VERBOSE : ANDROID_LOG_UNKNOWN; + switch (severity) { + case logging::LOG_INFO: + priority = ANDROID_LOG_INFO; + break; + case logging::LOG_WARNING: + priority = ANDROID_LOG_WARN; + break; + case logging::LOG_ERROR: + priority = ANDROID_LOG_ERROR; + break; + case logging::LOG_FATAL: + priority = ANDROID_LOG_FATAL; + break; + } + std::string_view sv = str_newline; + ignore_result(android::base::ConsumeSuffix(&sv, "\n")); + std::string str(sv.data(), sv.size()); + // This will eventually be redirected to CombinedLogger. + // |tag| is ignored by CombinedLogger, so just leave it empty. + __android_log_write(priority, "" /* tag */, str.c_str()); + return true; +} + } // namespace void SetupLogging(bool log_to_system, bool log_to_file) { @@ -219,14 +262,15 @@ void SetupLogging(bool log_to_system, bool log_to_file) { // libchrome logging should not log to file. logging::LoggingSettings log_settings; log_settings.lock_log = logging::DONT_LOCK_LOG_FILE; - log_settings.logging_dest = static_cast( - logging::LOG_TO_SYSTEM_DEBUG_LOG); + log_settings.logging_dest = + static_cast(logging::LOG_NONE); log_settings.log_file = nullptr; logging::InitLogging(log_settings); logging::SetLogItems(false /* enable_process_id */, false /* enable_thread_id */, false /* enable_timestamp */, false /* enable_tickcount */); + logging::SetLogMessageHandler(&RedirectToLiblog); } } // namespace chromeos_update_engine diff --git a/sideload_main.cc b/sideload_main.cc index 818fa5c9..27967cda 100644 --- a/sideload_main.cc +++ b/sideload_main.cc @@ -20,7 +20,6 @@ #include #include -#include #include #include #include @@ -36,6 +35,7 @@ #include "update_engine/common/subprocess.h" #include "update_engine/common/terminator.h" #include "update_engine/common/utils.h" +#include "update_engine/logging.h" #include "update_engine/update_attempter_android.h" using std::string; @@ -46,17 +46,6 @@ using update_engine::UpdateStatus; namespace chromeos_update_engine { namespace { -void SetupLogging() { - string log_file; - logging::LoggingSettings log_settings; - log_settings.lock_log = logging::DONT_LOCK_LOG_FILE; - log_settings.delete_old = logging::APPEND_TO_OLD_LOG_FILE; - log_settings.log_file = nullptr; - log_settings.logging_dest = logging::LOG_TO_SYSTEM_DEBUG_LOG; - - logging::InitLogging(log_settings); -} - class SideloadDaemonState : public DaemonStateInterface, public ServiceObserverInterface { public: @@ -195,7 +184,7 @@ int main(int argc, char** argv) { DEFINE_int64(status_fd, -1, "A file descriptor to notify the update status."); chromeos_update_engine::Terminator::Init(); - chromeos_update_engine::SetupLogging(); + chromeos_update_engine::SetupLogging(true /* stderr */, false /* file */); brillo::FlagHelper::Init(argc, argv, "Update Engine Sideload"); LOG(INFO) << "Update Engine Sideloading starting"; From 4d7c5eb7a1903278b951069ba278484f8381c728 Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Fri, 3 Apr 2020 11:31:50 -0700 Subject: [PATCH 263/624] Allow to skip mounting metadata in recovery. After factory data reset, metadata has no valid ext4 fs, and it is not formatted when recovery is started. Hence, it is possible that recovery can't mount metadata. Use fallback path for sideloading full OTAs on Virtual A/B devices in this case. Test: the following: fastboot reboot fastboot -w fastboot reboot recovery adb root adb shell mount -t ext4 /dev/block/by-name/metadata /metadata # fails adb reboot sideload adb sideload ota.zip # successful Bug: 152352037 Change-Id: I51ae3e5918b0c00054f309832c45823d80e46c69 --- cleanup_previous_update_action.cc | 5 +- dynamic_partition_control_android.cc | 86 +++++++++++++++++++++++----- dynamic_partition_control_android.h | 22 +++++++ 3 files changed, 98 insertions(+), 15 deletions(-) diff --git a/cleanup_previous_update_action.cc b/cleanup_previous_update_action.cc index ee689472..26cc6be9 100644 --- a/cleanup_previous_update_action.cc +++ b/cleanup_previous_update_action.cc @@ -160,7 +160,10 @@ void CleanupPreviousUpdateAction::CheckSlotMarkedSuccessfulOrSchedule() { if (metadata_device_ == nullptr) { LOG(ERROR) << "Failed to mount /metadata."; - processor_->ActionComplete(this, ErrorCode::kError); + // If metadata is erased but not formatted, it is possible to not mount + // it in recovery. It is safe to skip CleanupPreviousUpdateAction. + processor_->ActionComplete( + this, kIsRecovery ? ErrorCode::kSuccess : ErrorCode::kError); return; } diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc index 1e92f45b..a310f209 100644 --- a/dynamic_partition_control_android.cc +++ b/dynamic_partition_control_android.cc @@ -152,10 +152,12 @@ bool DynamicPartitionControlAndroid::MapPartitionInternal( }; bool success = false; if (GetVirtualAbFeatureFlag().IsEnabled() && target_supports_snapshot_ && - force_writable) { + force_writable && ExpectMetadataMounted()) { // Only target partitions are mapped with force_writable. On Virtual // A/B devices, target partitions may overlap with source partitions, so // they must be mapped with snapshot. + // One exception is when /metadata is not mounted. Fallback to + // CreateLogicalPartition as snapshots are not created in the first place. params.timeout_ms = kMapSnapshotTimeout; success = snapshot_->MapUpdateSnapshot(params, path); } else { @@ -232,8 +234,11 @@ bool DynamicPartitionControlAndroid::UnmapPartitionOnDeviceMapper( // On a Virtual A/B device, |target_partition_name| may be a leftover from // a paused update. Clean up any underlying devices. - if (GetVirtualAbFeatureFlag().IsEnabled()) { + if (ExpectMetadataMounted()) { success &= snapshot_->UnmapUpdateSnapshot(target_partition_name); + } else { + LOG(INFO) << "Skip UnmapUpdateSnapshot(" << target_partition_name + << ") because metadata is not mounted"; } if (!success) { @@ -405,10 +410,10 @@ bool DynamicPartitionControlAndroid::PreparePartitionsForUpdate( << "run adb enable-verity to deactivate if required and try again."; } - if (GetVirtualAbFeatureFlag().IsEnabled() && metadata_device_ == nullptr) { - metadata_device_ = snapshot_->EnsureMetadataMounted(); - TEST_AND_RETURN_FALSE(metadata_device_ != nullptr); - } + // If metadata is erased but not formatted, it is possible to not mount + // it in recovery. It is acceptable to skip mounting and choose fallback path + // (PrepareDynamicPartitionsForUpdate) when sideloading full OTAs. + TEST_AND_RETURN_FALSE(EnsureMetadataMounted() || IsRecovery()); if (update) { TEST_AND_RETURN_FALSE(EraseSystemOtherAvbFooter(source_slot, target_slot)); @@ -469,9 +474,18 @@ bool DynamicPartitionControlAndroid::PreparePartitionsForUpdate( << "snapshots."; } - if (!snapshot_->CancelUpdate()) { - LOG(ERROR) << "Cannot cancel previous update."; - return false; + // In recovery, if /metadata is not mounted, it is likely that metadata + // partition is erased and not formatted yet. After sideloading, when + // rebooting into the new version, init will erase metadata partition, + // hence the failure of CancelUpdate() can be ignored here. + // However, if metadata is mounted and CancelUpdate fails, sideloading + // should not proceed because during next boot, snapshots will overlay on + // the devices incorrectly. + if (ExpectMetadataMounted()) { + TEST_AND_RETURN_FALSE(snapshot_->CancelUpdate()); + } else { + LOG(INFO) << "Skip canceling previous update because metadata is not " + << "mounted"; } } @@ -632,6 +646,9 @@ bool DynamicPartitionControlAndroid::GetSystemOtherPath( // Delete any pre-existing device with name |partition_name_suffix| and // also remove it from |mapped_devices_|. + // In recovery, metadata might not be mounted, and + // UnmapPartitionOnDeviceMapper might fail. However, + // it is unusual that system_other has already been mapped. Hence, just skip. TEST_AND_RETURN_FALSE(UnmapPartitionOnDeviceMapper(partition_name_suffix)); // Use CreateLogicalPartition directly to avoid mapping with existing // snapshots. @@ -668,6 +685,10 @@ bool DynamicPartitionControlAndroid::EraseSystemOtherAvbFooter( // Delete |partition_name_suffix| from device mapper and from // |mapped_devices_| again so that it does not interfere with update process. + // In recovery, metadata might not be mounted, and + // UnmapPartitionOnDeviceMapper might fail. However, DestroyLogicalPartition + // should be called. If DestroyLogicalPartition does fail, it is still okay + // to skip the error here and let Prepare*() fail later. if (should_unmap) { TEST_AND_RETURN_FALSE(UnmapPartitionOnDeviceMapper(partition_name_suffix)); } @@ -726,6 +747,7 @@ bool DynamicPartitionControlAndroid::PrepareSnapshotPartitionsForUpdate( uint32_t target_slot, const DeltaArchiveManifest& manifest, uint64_t* required_size) { + TEST_AND_RETURN_FALSE(ExpectMetadataMounted()); if (!snapshot_->BeginUpdate()) { LOG(ERROR) << "Cannot begin new update."; return false; @@ -829,10 +851,14 @@ bool DynamicPartitionControlAndroid::UpdatePartitionMetadata( } bool DynamicPartitionControlAndroid::FinishUpdate(bool powerwash_required) { - if (GetVirtualAbFeatureFlag().IsEnabled() && - snapshot_->GetUpdateState() == UpdateState::Initiated) { - LOG(INFO) << "Snapshot writes are done."; - return snapshot_->FinishedSnapshotWrites(powerwash_required); + if (ExpectMetadataMounted()) { + if (snapshot_->GetUpdateState() == UpdateState::Initiated) { + LOG(INFO) << "Snapshot writes are done."; + return snapshot_->FinishedSnapshotWrites(powerwash_required); + } + } else { + LOG(INFO) << "Skip FinishedSnapshotWrites() because /metadata is not " + << "mounted"; } return true; } @@ -1006,9 +1032,41 @@ bool DynamicPartitionControlAndroid::ResetUpdate(PrefsInterface* prefs) { TEST_AND_RETURN_FALSE(DeltaPerformer::ResetUpdateProgress( prefs, false /* quick */, false /* skip dynamic partitions metadata */)); - TEST_AND_RETURN_FALSE(snapshot_->CancelUpdate()); + if (ExpectMetadataMounted()) { + TEST_AND_RETURN_FALSE(snapshot_->CancelUpdate()); + } else { + LOG(INFO) << "Skip cancelling update in ResetUpdate because /metadata is " + << "not mounted"; + } return true; } +bool DynamicPartitionControlAndroid::ExpectMetadataMounted() { + // No need to mount metadata for non-Virtual A/B devices. + if (!GetVirtualAbFeatureFlag().IsEnabled()) { + return false; + } + // Intentionally not checking |metadata_device_| in Android mode. + // /metadata should always be mounted in Android mode. If it isn't, let caller + // fails when calling into SnapshotManager. + if (!IsRecovery()) { + return true; + } + // In recovery mode, explicitly check |metadata_device_|. + return metadata_device_ != nullptr; +} + +bool DynamicPartitionControlAndroid::EnsureMetadataMounted() { + // No need to mount metadata for non-Virtual A/B devices. + if (!GetVirtualAbFeatureFlag().IsEnabled()) { + return true; + } + + if (metadata_device_ == nullptr) { + metadata_device_ = snapshot_->EnsureMetadataMounted(); + } + return metadata_device_ != nullptr; +} + } // namespace chromeos_update_engine diff --git a/dynamic_partition_control_android.h b/dynamic_partition_control_android.h index 9dcdcf1a..8ad75933 100644 --- a/dynamic_partition_control_android.h +++ b/dynamic_partition_control_android.h @@ -233,6 +233,28 @@ class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { uint32_t source_slot, const DeltaArchiveManifest& manifest); + // Returns true if metadata is expected to be mounted, false otherwise. + // Note that it returns false on non-Virtual A/B devices. + // + // Almost all functions of SnapshotManager depends on metadata being mounted. + // - In Android mode for Virtual A/B devices, assume it is mounted. If not, + // let caller fails when calling into SnapshotManager. + // - In recovery for Virtual A/B devices, it is possible that metadata is not + // formatted, hence it cannot be mounted. Caller should not call into + // SnapshotManager. + // - On non-Virtual A/B devices, updates do not depend on metadata partition. + // Caller should not call into SnapshotManager. + // + // This function does NOT mount metadata partition. Use EnsureMetadataMounted + // to mount metadata partition. + bool ExpectMetadataMounted(); + + // Ensure /metadata is mounted. Returns true if successful, false otherwise. + // + // Note that this function returns true on non-Virtual A/B devices without + // doing anything. + bool EnsureMetadataMounted(); + std::set mapped_devices_; const FeatureFlag dynamic_partitions_; const FeatureFlag virtual_ab_; From d9b511d4ad5fa47552c035413dcf4b1e538fdba1 Mon Sep 17 00:00:00 2001 From: Andrew Date: Tue, 14 Apr 2020 15:26:32 -0700 Subject: [PATCH 264/624] update_engine: Fix typo in comment BUG=None TEST=None Change-Id: Ied3e106259e87b453cdb12600ea1ab90fb668f55 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2149850 Tested-by: Andrew Lassalle Auto-Submit: Andrew Lassalle Reviewed-by: Amin Hassani Commit-Queue: Amin Hassani --- real_system_state.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/real_system_state.h b/real_system_state.h index 47120088..807a205b 100644 --- a/real_system_state.h +++ b/real_system_state.h @@ -144,7 +144,7 @@ class RealSystemState : public SystemState, public DaemonStateInterface { // Interface for dlcservice. std::unique_ptr dlcservice_; - // Interface for the clock. + // Interface for the bootloader control. std::unique_ptr boot_control_; // Interface for the clock. From 88deeef7772883b13f770231631ca864caf0ccfb Mon Sep 17 00:00:00 2001 From: Jae Hoon Kim Date: Thu, 16 Apr 2020 11:52:37 -0700 Subject: [PATCH 265/624] update_engine: Disable SortIncludes in .clang-format clang-format does simple alphabetical sorting of #include lines, which conflicts with the fancier sorting used by cpplint.py BUG=none TEST=repo upload Change-Id: Ia053a932f08f38e18a1855302de5b36c80aeb7e5 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2153586 Tested-by: Jae Hoon Kim Commit-Queue: Amin Hassani Reviewed-by: Amin Hassani Auto-Submit: Jae Hoon Kim --- .clang-format | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.clang-format b/.clang-format index c1244fea..aed0ce8c 100644 --- a/.clang-format +++ b/.clang-format @@ -36,3 +36,7 @@ CommentPragmas: NOLINT:.* DerivePointerAlignment: false PointerAlignment: Left TabWidth: 2 + +# cpplint.py does smarter #include sorting than clang-format (the former ignores +# case and changes '-' to '_'). +SortIncludes: false From f9cb4492c2e8309e7c9892fb3ab1273e75b7eb4f Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Wed, 15 Apr 2020 13:00:20 -0700 Subject: [PATCH 266/624] Use libsnapshot stub classes ... to avoid crashes in update_engine. The stub classes does nothing but return a failure for all function calls, and act as if Virtual A/B is not enabled. Fixes: 148956645 Test: update_engine_unittests Change-Id: I48206958aba8d9a71cc7cf49f6b9d5d4f1c8735d --- cleanup_previous_update_action.cc | 12 +++++++----- cleanup_previous_update_action.h | 6 +++--- dynamic_partition_control_android.cc | 6 +++++- dynamic_partition_control_android.h | 2 +- 4 files changed, 16 insertions(+), 10 deletions(-) diff --git a/cleanup_previous_update_action.cc b/cleanup_previous_update_action.cc index 26cc6be9..6490ca8c 100644 --- a/cleanup_previous_update_action.cc +++ b/cleanup_previous_update_action.cc @@ -30,7 +30,7 @@ #include "update_engine/common/utils.h" #include "update_engine/payload_consumer/delta_performer.h" -using android::snapshot::SnapshotManager; +using android::snapshot::ISnapshotManager; using android::snapshot::SnapshotMergeStats; using android::snapshot::UpdateState; using brillo::MessageLoop; @@ -55,7 +55,7 @@ namespace chromeos_update_engine { CleanupPreviousUpdateAction::CleanupPreviousUpdateAction( PrefsInterface* prefs, BootControlInterface* boot_control, - android::snapshot::SnapshotManager* snapshot, + android::snapshot::ISnapshotManager* snapshot, CleanupPreviousUpdateActionDelegateInterface* delegate) : prefs_(prefs), boot_control_(boot_control), @@ -64,7 +64,7 @@ CleanupPreviousUpdateAction::CleanupPreviousUpdateAction( running_(false), cancel_failed_(false), last_percentage_(0), - merge_stats_(SnapshotMergeStats::GetInstance(*snapshot)) {} + merge_stats_(nullptr) {} void CleanupPreviousUpdateAction::PerformAction() { ResumeAction(); @@ -110,8 +110,10 @@ void CleanupPreviousUpdateAction::StartActionInternal() { processor_->ActionComplete(this, ErrorCode::kSuccess); return; } - // SnapshotManager is only available on VAB devices. - CHECK(snapshot_); + // SnapshotManager must be available on VAB devices. + CHECK(snapshot_ != nullptr); + merge_stats_ = snapshot_->GetSnapshotMergeStatsInstance(); + CHECK(merge_stats_ != nullptr); WaitBootCompletedOrSchedule(); } diff --git a/cleanup_previous_update_action.h b/cleanup_previous_update_action.h index 91e08b01..6f6ce078 100644 --- a/cleanup_previous_update_action.h +++ b/cleanup_previous_update_action.h @@ -49,7 +49,7 @@ class CleanupPreviousUpdateAction : public Action { CleanupPreviousUpdateAction( PrefsInterface* prefs, BootControlInterface* boot_control, - android::snapshot::SnapshotManager* snapshot, + android::snapshot::ISnapshotManager* snapshot, CleanupPreviousUpdateActionDelegateInterface* delegate); void PerformAction() override; @@ -67,13 +67,13 @@ class CleanupPreviousUpdateAction : public Action { private: PrefsInterface* prefs_; BootControlInterface* boot_control_; - android::snapshot::SnapshotManager* snapshot_; + android::snapshot::ISnapshotManager* snapshot_; CleanupPreviousUpdateActionDelegateInterface* delegate_; std::unique_ptr metadata_device_; bool running_{false}; bool cancel_failed_{false}; unsigned int last_percentage_{0}; - android::snapshot::SnapshotMergeStats* merge_stats_; + android::snapshot::ISnapshotMergeStats* merge_stats_; void StartActionInternal(); void ScheduleWaitBootCompleted(); diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc index a310f209..bf31244d 100644 --- a/dynamic_partition_control_android.cc +++ b/dynamic_partition_control_android.cc @@ -35,6 +35,7 @@ #include #include #include +#include #include "update_engine/cleanup_previous_update_action.h" #include "update_engine/common/boot_control_interface.h" @@ -58,6 +59,7 @@ using android::fs_mgr::SlotSuffixForSlotNumber; using android::snapshot::OptimizeSourceCopyOperation; using android::snapshot::Return; using android::snapshot::SnapshotManager; +using android::snapshot::SnapshotManagerStub; using android::snapshot::UpdateState; namespace chromeos_update_engine { @@ -108,8 +110,10 @@ DynamicPartitionControlAndroid::DynamicPartitionControlAndroid() virtual_ab_(GetFeatureFlag(kVirtualAbEnabled, kVirtualAbRetrofit)) { if (GetVirtualAbFeatureFlag().IsEnabled()) { snapshot_ = SnapshotManager::New(); - CHECK(snapshot_ != nullptr) << "Cannot initialize SnapshotManager."; + } else { + snapshot_ = SnapshotManagerStub::New(); } + CHECK(snapshot_ != nullptr) << "Cannot initialize SnapshotManager."; } FeatureFlag DynamicPartitionControlAndroid::GetDynamicPartitionsFeatureFlag() { diff --git a/dynamic_partition_control_android.h b/dynamic_partition_control_android.h index 8ad75933..4918ea64 100644 --- a/dynamic_partition_control_android.h +++ b/dynamic_partition_control_android.h @@ -258,7 +258,7 @@ class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { std::set mapped_devices_; const FeatureFlag dynamic_partitions_; const FeatureFlag virtual_ab_; - std::unique_ptr snapshot_; + std::unique_ptr snapshot_; std::unique_ptr metadata_device_; bool target_supports_snapshot_ = false; // Whether the target partitions should be loaded as dynamic partitions. Set From 59e52b25104ed228322a8eaca310228a6b16e598 Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Wed, 15 Apr 2020 18:20:34 -0700 Subject: [PATCH 267/624] Delete MockDynamicPartitionControl. Obsolete. The class was used for BootControlAndroidTest. The test was later absorbed into DynamicPartitionControlAndroidTest. Test: make Change-Id: I1bcfabd357ffbed94945b55031741d831f1010a6 --- mock_dynamic_partition_control.h | 23 ----------------------- 1 file changed, 23 deletions(-) diff --git a/mock_dynamic_partition_control.h b/mock_dynamic_partition_control.h index 1e4e5fd8..d015409a 100644 --- a/mock_dynamic_partition_control.h +++ b/mock_dynamic_partition_control.h @@ -28,29 +28,6 @@ namespace chromeos_update_engine { -class MockDynamicPartitionControl : public DynamicPartitionControlInterface { - public: - MOCK_METHOD5(MapPartitionOnDeviceMapper, - bool(const std::string&, - const std::string&, - uint32_t, - bool, - std::string*)); - MOCK_METHOD0(Cleanup, void()); - MOCK_METHOD0(GetDynamicPartitionsFeatureFlag, FeatureFlag()); - MOCK_METHOD5( - PreparePartitionsForUpdate, - bool(uint32_t, uint32_t, const DeltaArchiveManifest&, bool, uint64_t*)); - MOCK_METHOD0(GetVirtualAbFeatureFlag, FeatureFlag()); - MOCK_METHOD1(FinishUpdate, bool(bool)); - MOCK_METHOD0(CleanupSuccessfulUpdate, ErrorCode()); - MOCK_METHOD3(GetCleanupPreviousUpdateAction, - std::unique_ptr( - BootControlInterface*, - PrefsInterface*, - CleanupPreviousUpdateActionDelegateInterface*)); -}; - class MockDynamicPartitionControlAndroid : public DynamicPartitionControlAndroid { public: From 5f32ee2b9f2a6c188258d2163f554b52d2dfad09 Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Wed, 15 Apr 2020 18:22:39 -0700 Subject: [PATCH 268/624] MockDynamicPartitionControlAndroid: Use new MOCK_METHOD style. Test: update_engine_unittests Change-Id: Ia0e9f2a49049c91a8e27b94199fac6f3fdd42f76 --- mock_dynamic_partition_control.h | 69 +++++++++++++++++++------------- 1 file changed, 41 insertions(+), 28 deletions(-) diff --git a/mock_dynamic_partition_control.h b/mock_dynamic_partition_control.h index d015409a..c933e872 100644 --- a/mock_dynamic_partition_control.h +++ b/mock_dynamic_partition_control.h @@ -31,34 +31,47 @@ namespace chromeos_update_engine { class MockDynamicPartitionControlAndroid : public DynamicPartitionControlAndroid { public: - MOCK_METHOD5(MapPartitionOnDeviceMapper, - bool(const std::string&, - const std::string&, - uint32_t, - bool, - std::string*)); - MOCK_METHOD1(UnmapPartitionOnDeviceMapper, bool(const std::string&)); - MOCK_METHOD0(Cleanup, void()); - MOCK_METHOD1(DeviceExists, bool(const std::string&)); - MOCK_METHOD1(GetState, ::android::dm::DmDeviceState(const std::string&)); - MOCK_METHOD2(GetDmDevicePathByName, bool(const std::string&, std::string*)); - MOCK_METHOD3(LoadMetadataBuilder, - std::unique_ptr<::android::fs_mgr::MetadataBuilder>( - const std::string&, uint32_t, uint32_t)); - MOCK_METHOD3(StoreMetadata, - bool(const std::string&, - android::fs_mgr::MetadataBuilder*, - uint32_t)); - MOCK_METHOD1(GetDeviceDir, bool(std::string*)); - MOCK_METHOD0(GetDynamicPartitionsFeatureFlag, FeatureFlag()); - MOCK_METHOD1(GetSuperPartitionName, std::string(uint32_t)); - MOCK_METHOD0(GetVirtualAbFeatureFlag, FeatureFlag()); - MOCK_METHOD1(FinishUpdate, bool(bool)); - MOCK_METHOD5( - GetSystemOtherPath, - bool(uint32_t, uint32_t, const std::string&, std::string*, bool*)); - MOCK_METHOD2(EraseSystemOtherAvbFooter, bool(uint32_t, uint32_t)); - MOCK_METHOD0(IsAvbEnabledOnSystemOther, std::optional()); + MOCK_METHOD( + bool, + MapPartitionOnDeviceMapper, + (const std::string&, const std::string&, uint32_t, bool, std::string*), + (override)); + MOCK_METHOD(bool, + UnmapPartitionOnDeviceMapper, + (const std::string&), + (override)); + MOCK_METHOD(void, Cleanup, (), (override)); + MOCK_METHOD(bool, DeviceExists, (const std::string&), (override)); + MOCK_METHOD(::android::dm::DmDeviceState, + GetState, + (const std::string&), + (override)); + MOCK_METHOD(bool, + GetDmDevicePathByName, + (const std::string&, std::string*), + (override)); + MOCK_METHOD(std::unique_ptr<::android::fs_mgr::MetadataBuilder>, + LoadMetadataBuilder, + (const std::string&, uint32_t, uint32_t), + (override)); + MOCK_METHOD(bool, + StoreMetadata, + (const std::string&, android::fs_mgr::MetadataBuilder*, uint32_t), + (override)); + MOCK_METHOD(bool, GetDeviceDir, (std::string*), (override)); + MOCK_METHOD(FeatureFlag, GetDynamicPartitionsFeatureFlag, (), (override)); + MOCK_METHOD(std::string, GetSuperPartitionName, (uint32_t), (override)); + MOCK_METHOD(FeatureFlag, GetVirtualAbFeatureFlag, (), (override)); + MOCK_METHOD(bool, FinishUpdate, (bool), (override)); + MOCK_METHOD(bool, + GetSystemOtherPath, + (uint32_t, uint32_t, const std::string&, std::string*, bool*), + (override)); + MOCK_METHOD(bool, + EraseSystemOtherAvbFooter, + (uint32_t, uint32_t), + (override)); + MOCK_METHOD(std::optional, IsAvbEnabledOnSystemOther, (), (override)); void set_fake_mapped_devices(const std::set& fake) override { DynamicPartitionControlAndroid::set_fake_mapped_devices(fake); From 5c5743f6c6e1e3a7f3928265fe46ad73e132f098 Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Thu, 16 Apr 2020 12:59:07 -0700 Subject: [PATCH 269/624] Also clear required_size if successful On a VAB device, when sideloading a full package, if the device ended up using the semi-flashing path (i.e. source partitions are overwritten), clear required_size in the successful case as well. Test: update_engine_unittests Bug: 153555889 Change-Id: I881f578806b4bd5486f9d0d3b3cd631b7a752cc9 --- dynamic_partition_control_android.cc | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc index bf31244d..20e02ac8 100644 --- a/dynamic_partition_control_android.cc +++ b/dynamic_partition_control_android.cc @@ -493,8 +493,13 @@ bool DynamicPartitionControlAndroid::PreparePartitionsForUpdate( } } - return PrepareDynamicPartitionsForUpdate( - source_slot, target_slot, manifest, delete_source); + TEST_AND_RETURN_FALSE(PrepareDynamicPartitionsForUpdate( + source_slot, target_slot, manifest, delete_source)); + + if (required_size != nullptr) { + *required_size = 0; + } + return true; } namespace { From 302fa70565228bd5aeb6c0aab87b522d60a26008 Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Thu, 16 Apr 2020 09:48:29 -0700 Subject: [PATCH 270/624] Add DynamicPartitionControlAndroid unittest for Virtual A/B Test: run it Fixes: 153555889 Change-Id: I8f938987721c09ec263a82f223e4920365d203fc --- dynamic_partition_control_android.h | 20 +-- dynamic_partition_control_android_unittest.cc | 121 ++++++++++++++++++ mock_dynamic_partition_control.h | 14 ++ 3 files changed, 146 insertions(+), 9 deletions(-) diff --git a/dynamic_partition_control_android.h b/dynamic_partition_control_android.h index 4918ea64..18a05fb2 100644 --- a/dynamic_partition_control_android.h +++ b/dynamic_partition_control_android.h @@ -173,8 +173,19 @@ class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { virtual bool EraseSystemOtherAvbFooter(uint32_t source_slot, uint32_t target_slot); + // Helper for PreparePartitionsForUpdate. Used for devices with dynamic + // partitions updating without snapshots. + // If |delete_source| is set, source partitions are deleted before resizing + // target partitions (using DeleteSourcePartitions). + virtual bool PrepareDynamicPartitionsForUpdate( + uint32_t source_slot, + uint32_t target_slot, + const DeltaArchiveManifest& manifest, + bool delete_source); + private: friend class DynamicPartitionControlAndroidTest; + friend class SnapshotPartitionTestP; void UnmapAllPartitions(); bool MapPartitionInternal(const std::string& super_device, @@ -189,15 +200,6 @@ class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { uint32_t target_slot, const DeltaArchiveManifest& manifest); - // Helper for PreparePartitionsForUpdate. Used for devices with dynamic - // partitions updating without snapshots. - // If |delete_source| is set, source partitions are deleted before resizing - // target partitions (using DeleteSourcePartitions). - bool PrepareDynamicPartitionsForUpdate(uint32_t source_slot, - uint32_t target_slot, - const DeltaArchiveManifest& manifest, - bool delete_source); - // Helper for PreparePartitionsForUpdate. Used for snapshotted partitions for // Virtual A/B update. bool PrepareSnapshotPartitionsForUpdate(uint32_t source_slot, diff --git a/dynamic_partition_control_android_unittest.cc b/dynamic_partition_control_android_unittest.cc index 20819182..37381708 100644 --- a/dynamic_partition_control_android_unittest.cc +++ b/dynamic_partition_control_android_unittest.cc @@ -24,6 +24,7 @@ #include #include #include +#include #include "update_engine/common/mock_prefs.h" #include "update_engine/common/test_utils.h" @@ -31,6 +32,7 @@ #include "update_engine/mock_dynamic_partition_control.h" using android::dm::DmDeviceState; +using android::snapshot::MockSnapshotManager; using chromeos_update_engine::test_utils::ScopedLoopbackDeviceBinder; using chromeos_update_engine::test_utils::ScopedTempFile; using std::string; @@ -72,6 +74,17 @@ class DynamicPartitionControlAndroidTest : public ::testing::Test { ON_CALL(dynamicControl(), EraseSystemOtherAvbFooter(_, _)) .WillByDefault(Return(true)); + + ON_CALL(dynamicControl(), IsRecovery()).WillByDefault(Return(false)); + + ON_CALL(dynamicControl(), PrepareDynamicPartitionsForUpdate(_, _, _, _)) + .WillByDefault(Invoke([&](uint32_t source_slot, + uint32_t target_slot, + const DeltaArchiveManifest& manifest, + bool delete_source) { + return dynamicControl().RealPrepareDynamicPartitionsForUpdate( + source_slot, target_slot, manifest, delete_source); + })); } // Return the mocked DynamicPartitionControlInterface. @@ -892,4 +905,112 @@ TEST_P(DynamicPartitionControlAndroidTestP, EraseSystemOtherAvbFooter) { ASSERT_EQ(new_expected, device_content); } +class FakeAutoDevice : public android::snapshot::AutoDevice { + public: + FakeAutoDevice() : AutoDevice("") {} +}; + +class SnapshotPartitionTestP : public DynamicPartitionControlAndroidTestP { + public: + void SetUp() override { + DynamicPartitionControlAndroidTestP::SetUp(); + ON_CALL(dynamicControl(), GetVirtualAbFeatureFlag()) + .WillByDefault(Return(FeatureFlag(FeatureFlag::Value::LAUNCH))); + + snapshot_ = new NiceMock(); + dynamicControl().snapshot_.reset(snapshot_); // takes ownership + EXPECT_CALL(*snapshot_, BeginUpdate()).WillOnce(Return(true)); + EXPECT_CALL(*snapshot_, EnsureMetadataMounted()) + .WillRepeatedly( + Invoke([]() { return std::make_unique(); })); + + manifest_ = + PartitionSizesToManifest({{"system", 3_GiB}, {"vendor", 1_GiB}}); + } + void ExpectCreateUpdateSnapshots(android::snapshot::Return val) { + manifest_.mutable_dynamic_partition_metadata()->set_snapshot_enabled(true); + EXPECT_CALL(*snapshot_, CreateUpdateSnapshots(_)) + .WillRepeatedly(Invoke([&, val](const auto& manifest) { + // Deep comparison requires full protobuf library. Comparing the + // pointers are sufficient. + EXPECT_EQ(&manifest_, &manifest); + LOG(WARNING) << "CreateUpdateSnapshots returning " << val.string(); + return val; + })); + } + bool PreparePartitionsForUpdate(uint64_t* required_size) { + return dynamicControl().PreparePartitionsForUpdate( + source(), target(), manifest_, true /* update */, required_size); + } + MockSnapshotManager* snapshot_ = nullptr; + DeltaArchiveManifest manifest_; +}; + +// Test happy path of PreparePartitionsForUpdate on a Virtual A/B device. +TEST_P(SnapshotPartitionTestP, PreparePartitions) { + ExpectCreateUpdateSnapshots(android::snapshot::Return::Ok()); + uint64_t required_size = 0; + EXPECT_TRUE(PreparePartitionsForUpdate(&required_size)); + EXPECT_EQ(0u, required_size); +} + +// Test that if not enough space, required size returned by SnapshotManager is +// passed up. +TEST_P(SnapshotPartitionTestP, PreparePartitionsNoSpace) { + ExpectCreateUpdateSnapshots(android::snapshot::Return::NoSpace(1_GiB)); + uint64_t required_size = 0; + EXPECT_FALSE(PreparePartitionsForUpdate(&required_size)); + EXPECT_EQ(1_GiB, required_size); +} + +// Test that in recovery, use empty space in super partition for a snapshot +// update first. +TEST_P(SnapshotPartitionTestP, RecoveryUseSuperEmpty) { + ExpectCreateUpdateSnapshots(android::snapshot::Return::Ok()); + EXPECT_CALL(dynamicControl(), IsRecovery()).WillRepeatedly(Return(true)); + // Must not call PrepareDynamicPartitionsForUpdate if + // PrepareSnapshotPartitionsForUpdate succeeds. + EXPECT_CALL(dynamicControl(), PrepareDynamicPartitionsForUpdate(_, _, _, _)) + .Times(0); + uint64_t required_size = 0; + EXPECT_TRUE(PreparePartitionsForUpdate(&required_size)); + EXPECT_EQ(0u, required_size); +} + +// Test that in recovery, if CreateUpdateSnapshots throws an error, try +// the flashing path for full updates. +TEST_P(SnapshotPartitionTestP, RecoveryErrorShouldDeleteSource) { + // Expectation on PreparePartitionsForUpdate + ExpectCreateUpdateSnapshots(android::snapshot::Return::NoSpace(1_GiB)); + EXPECT_CALL(dynamicControl(), IsRecovery()).WillRepeatedly(Return(true)); + EXPECT_CALL(*snapshot_, CancelUpdate()).WillOnce(Return(true)); + EXPECT_CALL(dynamicControl(), PrepareDynamicPartitionsForUpdate(_, _, _, _)) + .WillRepeatedly(Invoke([&](auto source_slot, + auto target_slot, + const auto& manifest, + auto delete_source) { + EXPECT_EQ(source(), source_slot); + EXPECT_EQ(target(), target_slot); + // Deep comparison requires full protobuf library. Comparing the + // pointers are sufficient. + EXPECT_EQ(&manifest_, &manifest); + EXPECT_TRUE(delete_source); + return dynamicControl().RealPrepareDynamicPartitionsForUpdate( + source_slot, target_slot, manifest, delete_source); + })); + // Expectation on PrepareDynamicPartitionsForUpdate + SetMetadata(source(), {{S("system"), 2_GiB}, {S("vendor"), 1_GiB}}); + ExpectUnmap({T("system"), T("vendor")}); + // Expect that the source partitions aren't present in target super metadata. + ExpectStoreMetadata({{T("system"), 3_GiB}, {T("vendor"), 1_GiB}}); + + uint64_t required_size = 0; + EXPECT_TRUE(PreparePartitionsForUpdate(&required_size)); + EXPECT_EQ(0u, required_size); +} + +INSTANTIATE_TEST_CASE_P(DynamicPartitionControlAndroidTest, + SnapshotPartitionTestP, + testing::Values(TestParam{0, 1}, TestParam{1, 0})); + } // namespace chromeos_update_engine diff --git a/mock_dynamic_partition_control.h b/mock_dynamic_partition_control.h index c933e872..1aaebd8b 100644 --- a/mock_dynamic_partition_control.h +++ b/mock_dynamic_partition_control.h @@ -72,6 +72,11 @@ class MockDynamicPartitionControlAndroid (uint32_t, uint32_t), (override)); MOCK_METHOD(std::optional, IsAvbEnabledOnSystemOther, (), (override)); + MOCK_METHOD(bool, IsRecovery, (), (override)); + MOCK_METHOD(bool, + PrepareDynamicPartitionsForUpdate, + (uint32_t, uint32_t, const DeltaArchiveManifest&, bool), + (override)); void set_fake_mapped_devices(const std::set& fake) override { DynamicPartitionControlAndroid::set_fake_mapped_devices(fake); @@ -95,6 +100,15 @@ class MockDynamicPartitionControlAndroid std::optional RealIsAvbEnabledInFstab(const std::string& path) { return DynamicPartitionControlAndroid::IsAvbEnabledInFstab(path); } + + bool RealPrepareDynamicPartitionsForUpdate( + uint32_t source_slot, + uint32_t target_slot, + const DeltaArchiveManifest& manifest, + bool delete_source) { + return DynamicPartitionControlAndroid::PrepareDynamicPartitionsForUpdate( + source_slot, target_slot, manifest, delete_source); + } }; } // namespace chromeos_update_engine From 224aed945deb997fc6bcf38a8e25b8885833732f Mon Sep 17 00:00:00 2001 From: Howard Chen Date: Fri, 17 Apr 2020 11:22:13 +0800 Subject: [PATCH 271/624] Avoid VAB merge when running a DSU Bug: 147071959 Test: ota_e2etest.py Change-Id: I9c8dcb4a2648d814672a966ce951516c1682d5f2 --- cleanup_previous_update_action.cc | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/cleanup_previous_update_action.cc b/cleanup_previous_update_action.cc index 6490ca8c..dd9a1cad 100644 --- a/cleanup_previous_update_action.cc +++ b/cleanup_previous_update_action.cc @@ -30,6 +30,7 @@ #include "update_engine/common/utils.h" #include "update_engine/payload_consumer/delta_performer.h" +using android::base::GetBoolProperty; using android::snapshot::ISnapshotManager; using android::snapshot::SnapshotMergeStats; using android::snapshot::UpdateState; @@ -337,6 +338,12 @@ bool CleanupPreviousUpdateAction::BeforeCancel() { void CleanupPreviousUpdateAction::InitiateMergeAndWait() { TEST_AND_RETURN(running_); LOG(INFO) << "Attempting to initiate merge."; + // suspend the VAB merge when running a DSU + if (GetBoolProperty("ro.gsid.image_running", false)) { + LOG(WARNING) << "Suspend the VAB merge when running a DSU."; + processor_->ActionComplete(this, ErrorCode::kError); + return; + } if (snapshot_->InitiateMerge()) { WaitForMergeOrSchedule(); From 23279926785d1933954ca0cb65e9d99c5e793059 Mon Sep 17 00:00:00 2001 From: Howard Chen Date: Fri, 17 Apr 2020 11:22:13 +0800 Subject: [PATCH 272/624] Avoid VAB merge when running a DSU Bug: 147071959 Test: ota_e2etest.py Merged-In: I9c8dcb4a2648d814672a966ce951516c1682d5f2 Change-Id: I9c8dcb4a2648d814672a966ce951516c1682d5f2 --- cleanup_previous_update_action.cc | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/cleanup_previous_update_action.cc b/cleanup_previous_update_action.cc index 26cc6be9..e43730d3 100644 --- a/cleanup_previous_update_action.cc +++ b/cleanup_previous_update_action.cc @@ -30,6 +30,7 @@ #include "update_engine/common/utils.h" #include "update_engine/payload_consumer/delta_performer.h" +using android::base::GetBoolProperty; using android::snapshot::SnapshotManager; using android::snapshot::SnapshotMergeStats; using android::snapshot::UpdateState; @@ -335,6 +336,12 @@ bool CleanupPreviousUpdateAction::BeforeCancel() { void CleanupPreviousUpdateAction::InitiateMergeAndWait() { TEST_AND_RETURN(running_); LOG(INFO) << "Attempting to initiate merge."; + // suspend the VAB merge when running a DSU + if (GetBoolProperty("ro.gsid.image_running", false)) { + LOG(WARNING) << "Suspend the VAB merge when running a DSU."; + processor_->ActionComplete(this, ErrorCode::kError); + return; + } if (snapshot_->InitiateMerge()) { WaitForMergeOrSchedule(); From 7fdfbf14bb1183ae17bd74b703ad0d409e955f27 Mon Sep 17 00:00:00 2001 From: Jae Hoon Kim Date: Fri, 10 Apr 2020 18:15:50 -0700 Subject: [PATCH 273/624] update_engine: Call dlcservice's Install/Update Completion DBus APIs When an Install/Update completes, update_engine will now let dlcservice know all the DLCs that were installed/updated + verified. Update_engine will also track during install/update for DLCs which did not install/update so dlcservice receives the correct list of DLC IDs. BUG=chromium:1059126 TEST=FEATURES=test emerge-$B update_engine update_engine-client Cq-Depend: chromium:2141191 Change-Id: Id57f66c7c6957d34870d27119d9a6482fe902503 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2146104 Tested-by: Jae Hoon Kim Commit-Queue: Jae Hoon Kim Reviewed-by: Amin Hassani Reviewed-by: Andrew Lassalle Auto-Submit: Jae Hoon Kim --- common/dlcservice_interface.h | 8 ++++ common/dlcservice_stub.cc | 7 ++++ common/dlcservice_stub.h | 2 + dlcservice_chromeos.cc | 32 ++++++++++++-- dlcservice_chromeos.h | 9 +++- omaha_request_action.cc | 1 + omaha_request_params.cc | 7 ++++ omaha_request_params.h | 6 +++ update_attempter.cc | 12 ++++++ update_attempter.h | 5 +++ update_attempter_unittest.cc | 79 +++++++++++++++++++++++++++++++++++ 11 files changed, 163 insertions(+), 5 deletions(-) diff --git a/common/dlcservice_interface.h b/common/dlcservice_interface.h index aa241053..3524d500 100644 --- a/common/dlcservice_interface.h +++ b/common/dlcservice_interface.h @@ -34,6 +34,14 @@ class DlcServiceInterface { // On failure it returns false. virtual bool GetInstalled(std::vector* dlc_module_ids) = 0; + // Returns true if dlcservice successfully handled the install completion + // method call, otherwise false. + virtual bool InstallCompleted(const std::vector& ids) = 0; + + // Returns true if dlcservice successfully handled the update completion + // method call, otherwise false. + virtual bool UpdateCompleted(const std::vector& ids) = 0; + protected: DlcServiceInterface() = default; diff --git a/common/dlcservice_stub.cc b/common/dlcservice_stub.cc index c5f9306e..3dcb2e03 100644 --- a/common/dlcservice_stub.cc +++ b/common/dlcservice_stub.cc @@ -33,4 +33,11 @@ bool DlcServiceStub::GetInstalled(std::vector* dlc_module_ids) { return true; } +bool DlcServiceStub::InstallCompleted(const vector& ids) { + return true; +} +bool DlcServiceStub::UpdateCompleted(const vector& ids) { + return true; +} + } // namespace chromeos_update_engine diff --git a/common/dlcservice_stub.h b/common/dlcservice_stub.h index 4e12c113..9b27971c 100644 --- a/common/dlcservice_stub.h +++ b/common/dlcservice_stub.h @@ -32,6 +32,8 @@ class DlcServiceStub : public DlcServiceInterface { // BootControlInterface overrides. bool GetInstalled(std::vector* dlc_module_ids) override; + bool InstallCompleted(const std::vector& ids) override; + bool UpdateCompleted(const std::vector& ids) override; private: DISALLOW_COPY_AND_ASSIGN(DlcServiceStub); diff --git a/dlcservice_chromeos.cc b/dlcservice_chromeos.cc index ad5806ac..3c76b2ad 100644 --- a/dlcservice_chromeos.cc +++ b/dlcservice_chromeos.cc @@ -16,6 +16,7 @@ #include "update_engine/dlcservice_chromeos.h" +#include #include // NOLINTNEXTLINE(build/include_alpha) "dbus-proxies.h" needs "dlcservice.pb.h" #include @@ -28,6 +29,12 @@ using std::vector; namespace chromeos_update_engine { +namespace { +org::chromium::DlcServiceInterfaceProxy GetDlcServiceProxy() { + return {DBusConnection::Get()->GetDBus()}; +} +} // namespace + std::unique_ptr CreateDlcService() { return std::make_unique(); } @@ -37,11 +44,8 @@ bool DlcServiceChromeOS::GetInstalled(vector* dlc_module_ids) { return false; dlc_module_ids->clear(); - org::chromium::DlcServiceInterfaceProxy dlcservice_proxy( - DBusConnection::Get()->GetDBus()); - dlcservice::DlcModuleList dlc_module_list; - if (!dlcservice_proxy.GetInstalled(&dlc_module_list, nullptr)) { + if (!GetDlcServiceProxy().GetInstalled(&dlc_module_list, nullptr)) { LOG(ERROR) << "dlcservice does not return installed DLC module list."; return false; } @@ -51,4 +55,24 @@ bool DlcServiceChromeOS::GetInstalled(vector* dlc_module_ids) { return true; } +bool DlcServiceChromeOS::InstallCompleted(const std::vector& ids) { + brillo::ErrorPtr err; + if (!GetDlcServiceProxy().InstallCompleted(ids, &err)) { + LOG(ERROR) << "dlcservice failed to complete install. ErrCode=" + << err->GetCode() << ", ErrMsg=" << err->GetMessage(); + return false; + } + return true; +} + +bool DlcServiceChromeOS::UpdateCompleted(const std::vector& ids) { + brillo::ErrorPtr err; + if (!GetDlcServiceProxy().UpdateCompleted(ids, &err)) { + LOG(ERROR) << "dlcservice failed to complete updated. ErrCode=" + << err->GetCode() << ", ErrMsg=" << err->GetMessage(); + return false; + } + return true; +} + } // namespace chromeos_update_engine diff --git a/dlcservice_chromeos.h b/dlcservice_chromeos.h index 73442e62..b56b4950 100644 --- a/dlcservice_chromeos.h +++ b/dlcservice_chromeos.h @@ -32,13 +32,20 @@ class DlcServiceChromeOS : public DlcServiceInterface { DlcServiceChromeOS() = default; ~DlcServiceChromeOS() = default; - // BootControlInterface overrides. + // DlcServiceInterface overrides. + // Will clear the |dlc_module_ids|, passed to be modified. Clearing by // default has the added benefit of avoiding indeterminate behavior in the // case that |dlc_module_ids| wasn't empty to begin which would lead to // possible duplicates and cases when error was not checked it's still safe. bool GetInstalled(std::vector* dlc_module_ids) override; + // Call into dlcservice for it to mark the DLC IDs as being installed. + bool InstallCompleted(const std::vector& ids) override; + + // Call into dlcservice for it to mark the DLC IDs as being updated. + bool UpdateCompleted(const std::vector& ids) override; + private: DISALLOW_COPY_AND_ASSIGN(DlcServiceChromeOS); }; diff --git a/omaha_request_action.cc b/omaha_request_action.cc index 50fe3cc9..85699d8e 100644 --- a/omaha_request_action.cc +++ b/omaha_request_action.cc @@ -782,6 +782,7 @@ bool OmahaRequestAction::ParseStatus(OmahaParserData* parser_data, if (params_->IsDlcAppId(app.id)) { LOG(INFO) << "No update for " << app.id << " but update continuing since a DLC."; + params_->SetDlcNoUpdate(app.id); continue; } // Don't update if any app has status="noupdate". diff --git a/omaha_request_params.cc b/omaha_request_params.cc index 88633926..52675980 100644 --- a/omaha_request_params.cc +++ b/omaha_request_params.cc @@ -258,4 +258,11 @@ bool OmahaRequestParams::IsDlcAppId(const std::string& app_id) const { return dlc_apps_params().find(app_id) != dlc_apps_params().end(); } +void OmahaRequestParams::SetDlcNoUpdate(const string& app_id) { + auto itr = dlc_apps_params_.find(app_id); + if (itr == dlc_apps_params_.end()) + return; + itr->second.updated = false; +} + } // namespace chromeos_update_engine diff --git a/omaha_request_params.h b/omaha_request_params.h index 14f3eaf6..b33d0b16 100644 --- a/omaha_request_params.h +++ b/omaha_request_params.h @@ -76,6 +76,9 @@ class OmahaRequestParams { int64_t ping_date_last_active; int64_t ping_date_last_rollcall; bool send_ping; + // |updated| is only used for DLCs to decide sending DBus message to + // dlcservice on an install/update completion. + bool updated = true; }; // Setters and getters for the various properties. @@ -232,6 +235,9 @@ class OmahaRequestParams { // request parameters. virtual bool IsDlcAppId(const std::string& app_id) const; + // If the App ID is a DLC App ID will set to no update. + void SetDlcNoUpdate(const std::string& app_id); + // Suggested defaults static const char kOsVersion[]; static const int64_t kDefaultMinUpdateChecks = 0; diff --git a/update_attempter.cc b/update_attempter.cc index 29d256cb..c45fe4f9 100644 --- a/update_attempter.cc +++ b/update_attempter.cc @@ -1219,8 +1219,18 @@ void UpdateAttempter::ProcessingDoneInternal(const ActionProcessor* processor, } } +vector UpdateAttempter::GetSuccessfulDlcIds() { + vector dlc_ids; + for (const auto& pr : omaha_request_params_->dlc_apps_params()) + if (pr.second.updated) + dlc_ids.push_back(pr.second.name); + return dlc_ids; +} + void UpdateAttempter::ProcessingDoneInstall(const ActionProcessor* processor, ErrorCode code) { + if (!system_state_->dlcservice()->InstallCompleted(GetSuccessfulDlcIds())) + LOG(WARNING) << "dlcservice didn't successfully handle install completion."; SetStatusAndNotify(UpdateStatus::IDLE); ScheduleUpdates(); LOG(INFO) << "DLC successfully installed, no reboot needed."; @@ -1230,6 +1240,8 @@ void UpdateAttempter::ProcessingDoneUpdate(const ActionProcessor* processor, ErrorCode code) { WriteUpdateCompletedMarker(); + if (!system_state_->dlcservice()->UpdateCompleted(GetSuccessfulDlcIds())) + LOG(WARNING) << "dlcservice didn't successfully handle update completion."; SetStatusAndNotify(UpdateStatus::UPDATED_NEED_REBOOT); ScheduleUpdates(); LOG(INFO) << "Update successfully applied, waiting to reboot."; diff --git a/update_attempter.h b/update_attempter.h index c364de36..3c6f4a13 100644 --- a/update_attempter.h +++ b/update_attempter.h @@ -292,6 +292,7 @@ class UpdateAttempter : public ActionProcessorDelegate, FRIEND_TEST(UpdateAttempterTest, UpdateAttemptFlagsCachedAtUpdateStart); FRIEND_TEST(UpdateAttempterTest, UpdateDeferredByPolicyTest); FRIEND_TEST(UpdateAttempterTest, UpdateIsNotRunningWhenUpdateAvailable); + FRIEND_TEST(UpdateAttempterTest, GetSuccessfulDlcIds); // Returns the special flags to be added to ErrorCode values based on the // parameters used in the current update attempt. @@ -450,6 +451,10 @@ class UpdateAttempter : public ActionProcessorDelegate, // parameter on the |omaha_request_params_| object. void CalculateDlcParams(); + // Returns the list of DLC IDs that were installed/updated, excluding the ones + // which had "noupdate" in the Omaha response. + std::vector GetSuccessfulDlcIds(); + // Last status notification timestamp used for throttling. Use monotonic // TimeTicks to ensure that notifications are sent even if the system clock is // set back in the middle of an update. diff --git a/update_attempter_unittest.cc b/update_attempter_unittest.cc index d65a5563..5849c383 100644 --- a/update_attempter_unittest.cc +++ b/update_attempter_unittest.cc @@ -19,7 +19,9 @@ #include #include +#include #include +#include #include #include @@ -64,12 +66,14 @@ using chromeos_update_manager::MockUpdateManager; using chromeos_update_manager::StagingSchedule; using chromeos_update_manager::UpdateCheckParams; using policy::DevicePolicy; +using std::map; using std::string; using std::unique_ptr; using std::unordered_set; using std::vector; using testing::_; using testing::DoAll; +using testing::ElementsAre; using testing::Field; using testing::InSequence; using testing::Invoke; @@ -135,16 +139,23 @@ struct ProcessingDoneTestParams { UpdateStatus status = UpdateStatus::CHECKING_FOR_UPDATE; ActionProcessor* processor = nullptr; ErrorCode code = ErrorCode::kSuccess; + map dlc_apps_params; // Expects: const bool kExpectedIsInstall = false; bool should_schedule_updates_be_called = true; UpdateStatus expected_exit_status = UpdateStatus::IDLE; + bool should_install_completed_be_called = false; + bool should_update_completed_be_called = false; + vector args_to_install_completed; + vector args_to_update_completed; }; class MockDlcService : public DlcServiceInterface { public: MOCK_METHOD1(GetInstalled, bool(vector*)); + MOCK_METHOD1(InstallCompleted, bool(const vector&)); + MOCK_METHOD1(UpdateCompleted, bool(const vector&)); }; } // namespace @@ -376,6 +387,22 @@ void UpdateAttempterTest::TestProcessingDone() { attempter_.DisableScheduleUpdates(); attempter_.is_install_ = pd_params_.is_install; attempter_.status_ = pd_params_.status; + attempter_.omaha_request_params_->set_dlc_apps_params( + pd_params_.dlc_apps_params); + + // Expects + if (pd_params_.should_install_completed_be_called) + EXPECT_CALL(mock_dlcservice_, + InstallCompleted(pd_params_.args_to_install_completed)) + .WillOnce(Return(true)); + else + EXPECT_CALL(mock_dlcservice_, InstallCompleted(_)).Times(0); + if (pd_params_.should_update_completed_be_called) + EXPECT_CALL(mock_dlcservice_, + UpdateCompleted(pd_params_.args_to_update_completed)) + .WillOnce(Return(true)); + else + EXPECT_CALL(mock_dlcservice_, UpdateCompleted(_)).Times(0); // Invocation attempter_.ProcessingDone(pd_params_.processor, pd_params_.code); @@ -2001,6 +2028,25 @@ TEST_F(UpdateAttempterTest, TEST_F(UpdateAttempterTest, ProcessingDoneUpdated) { // GIVEN an update finished. + // THEN update_engine should call update completion. + pd_params_.should_update_completed_be_called = true; + // THEN need reboot since update applied. + pd_params_.expected_exit_status = UpdateStatus::UPDATED_NEED_REBOOT; + // THEN install indication should be false. + + TestProcessingDone(); +} + +TEST_F(UpdateAttempterTest, ProcessingDoneUpdatedDlcFilter) { + // GIVEN an update finished. + // GIVEN DLC |AppParams| list. + auto dlc_1 = "dlc_1", dlc_2 = "dlc_2"; + pd_params_.dlc_apps_params = {{dlc_1, {.name = dlc_1, .updated = false}}, + {dlc_2, {.name = dlc_2}}}; + + // THEN update_engine should call update completion. + pd_params_.should_update_completed_be_called = true; + pd_params_.args_to_update_completed = {dlc_2}; // THEN need reboot since update applied. pd_params_.expected_exit_status = UpdateStatus::UPDATED_NEED_REBOOT; // THEN install indication should be false. @@ -2012,6 +2058,25 @@ TEST_F(UpdateAttempterTest, ProcessingDoneInstalled) { // GIVEN an install finished. pd_params_.is_install = true; + // THEN update_engine should call install completion. + pd_params_.should_install_completed_be_called = true; + // THEN go idle. + // THEN install indication should be false. + + TestProcessingDone(); +} + +TEST_F(UpdateAttempterTest, ProcessingDoneInstalledDlcFilter) { + // GIVEN an install finished. + pd_params_.is_install = true; + // GIVEN DLC |AppParams| list. + auto dlc_1 = "dlc_1", dlc_2 = "dlc_2"; + pd_params_.dlc_apps_params = {{dlc_1, {.name = dlc_1, .updated = false}}, + {dlc_2, {.name = dlc_2}}}; + + // THEN update_engine should call install completion. + pd_params_.should_install_completed_be_called = true; + pd_params_.args_to_install_completed = {dlc_2}; // THEN go idle. // THEN install indication should be false. @@ -2024,6 +2089,7 @@ TEST_F(UpdateAttempterTest, ProcessingDoneInstallReportingError) { // GIVEN a reporting error occurred. pd_params_.status = UpdateStatus::REPORTING_ERROR_EVENT; + // THEN update_engine should not call install completion. // THEN go idle. // THEN install indication should be false. @@ -2035,6 +2101,7 @@ TEST_F(UpdateAttempterTest, ProcessingDoneNoUpdate) { // GIVEN an action error occured. pd_params_.code = ErrorCode::kNoUpdate; + // THEN update_engine should not call update completion. // THEN go idle. // THEN install indication should be false. @@ -2047,6 +2114,7 @@ TEST_F(UpdateAttempterTest, ProcessingDoneNoInstall) { // GIVEN an action error occured. pd_params_.code = ErrorCode::kNoUpdate; + // THEN update_engine should not call install completion. // THEN go idle. // THEN install indication should be false. @@ -2066,6 +2134,7 @@ TEST_F(UpdateAttempterTest, ProcessingDoneUpdateError) { pd_params_.expected_exit_status = UpdateStatus::REPORTING_ERROR_EVENT; // THEN install indication should be false. + // THEN update_engine should not call update completion. // THEN expect critical actions of |ScheduleErrorEventAction()|. EXPECT_CALL(*processor_, EnqueueAction(Pointee(_))).Times(1); EXPECT_CALL(*processor_, StartProcessing()).Times(1); @@ -2089,6 +2158,7 @@ TEST_F(UpdateAttempterTest, ProcessingDoneInstallError) { pd_params_.expected_exit_status = UpdateStatus::REPORTING_ERROR_EVENT; // THEN install indication should be false. + // THEN update_engine should not call install completion. // THEN expect critical actions of |ScheduleErrorEventAction()|. EXPECT_CALL(*processor_, EnqueueAction(Pointee(_))).Times(1); EXPECT_CALL(*processor_, StartProcessing()).Times(1); @@ -2457,4 +2527,13 @@ TEST_F(UpdateAttempterTest, SetDlcInactive) { EXPECT_FALSE(base::PathExists(metadata_path_dlc0)); } +TEST_F(UpdateAttempterTest, GetSuccessfulDlcIds) { + auto dlc_1 = "1", dlc_2 = "2", dlc_3 = "3"; + attempter_.omaha_request_params_->set_dlc_apps_params( + {{dlc_1, {.name = dlc_1, .updated = false}}, + {dlc_2, {.name = dlc_2}}, + {dlc_3, {.name = dlc_3, .updated = false}}}); + EXPECT_THAT(attempter_.GetSuccessfulDlcIds(), ElementsAre(dlc_2)); +} + } // namespace chromeos_update_engine From 914f554843936ac67ff53f41d24ebfee276e09aa Mon Sep 17 00:00:00 2001 From: Andrew Date: Tue, 21 Apr 2020 10:56:33 -0700 Subject: [PATCH 274/624] update_engine: Fix bug in MemoryPrefs::MemoryStorage::DeleteKey() PrefsInterface specifies that Delete(key) will delete the key if it exists, and do nothing if it doesn't. The behaviour in MemoryPrefs is not consisten with this, and behaves differently than Prefs. BUG=None TEST=cros_workon_make update_engine --test Change-Id: I664eba56d7d365307ce420ed48f1cf0a4c6beeda Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2159295 Tested-by: Andrew Lassalle Commit-Queue: Amin Hassani Auto-Submit: Andrew Lassalle Reviewed-by: Jae Hoon Kim Reviewed-by: Amin Hassani --- common/prefs.cc | 5 ++--- common/prefs_unittest.cc | 2 +- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/common/prefs.cc b/common/prefs.cc index 194bbd8b..6d86a504 100644 --- a/common/prefs.cc +++ b/common/prefs.cc @@ -186,9 +186,8 @@ bool MemoryPrefs::MemoryStorage::KeyExists(const string& key) const { bool MemoryPrefs::MemoryStorage::DeleteKey(const string& key) { auto it = values_.find(key); - if (it == values_.end()) - return false; - values_.erase(it); + if (it != values_.end()) + values_.erase(it); return true; } diff --git a/common/prefs_unittest.cc b/common/prefs_unittest.cc index cb6fc709..3f293199 100644 --- a/common/prefs_unittest.cc +++ b/common/prefs_unittest.cc @@ -358,7 +358,7 @@ TEST_F(MemoryPrefsTest, BasicTest) { EXPECT_TRUE(prefs_.Delete(kKey)); EXPECT_FALSE(prefs_.Exists(kKey)); - EXPECT_FALSE(prefs_.Delete(kKey)); + EXPECT_TRUE(prefs_.Delete(kKey)); } } // namespace chromeos_update_engine From 50504d6ab786972146fbc43ac433f7e4b301f66b Mon Sep 17 00:00:00 2001 From: Jae Hoon Kim Date: Thu, 23 Apr 2020 14:32:38 -0700 Subject: [PATCH 275/624] update_engine: Dump InstallPlan with Partitions Improve visibity to see |InstallPlan| after which the |InstallPlan::Partition|s are populated by |DeltaPerformer|. BUG=chromium:928805 TEST=FEATURES=test emerge-$B update_engine update_engine-client TEST=# logs on DUT for dump Change-Id: I3f53cca7bd006a2d4d01ad738850ebb954cbd73f Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2163175 Tested-by: Jae Hoon Kim Reviewed-by: Andrew Lassalle Reviewed-by: Amin Hassani Commit-Queue: Amin Hassani --- payload_consumer/filesystem_verifier_action.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/payload_consumer/filesystem_verifier_action.cc b/payload_consumer/filesystem_verifier_action.cc index 36e5a35b..f9e7f81c 100644 --- a/payload_consumer/filesystem_verifier_action.cc +++ b/payload_consumer/filesystem_verifier_action.cc @@ -57,6 +57,7 @@ void FilesystemVerifierAction::PerformAction() { abort_action_completer.set_code(ErrorCode::kSuccess); return; } + install_plan_.Dump(); StartPartitionHashing(); abort_action_completer.set_should_complete(false); From 93cde30635c844c9a78b78dcea283af0e70767ee Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Mon, 27 Apr 2020 12:59:29 -0700 Subject: [PATCH 276/624] Allow non-existing fstab.postinstall This file is only installed when cppreopts.sh is installed. However, the latter might not be installed if system_other is not used. Fixes: 155053192 Test: pass Change-Id: I8569d72c03a260bd1be9dcccb5b9ae670117179c --- dynamic_partition_control_android.cc | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc index 20e02ac8..7486d095 100644 --- a/dynamic_partition_control_android.cc +++ b/dynamic_partition_control_android.cc @@ -556,7 +556,10 @@ std::optional DynamicPartitionControlAndroid::IsAvbEnabledInFstab( const std::string& path) { Fstab fstab; if (!ReadFstabFromFile(path, &fstab)) { - LOG(WARNING) << "Cannot read fstab from " << path; + PLOG(WARNING) << "Cannot read fstab from " << path; + if (errno == ENOENT) { + return false; + } return std::nullopt; } for (const auto& entry : fstab) { From 4b28a53926e27d47fac7784be553c494ae44753f Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Mon, 27 Apr 2020 12:59:29 -0700 Subject: [PATCH 277/624] Allow non-existing fstab.postinstall This file is only installed when cppreopts.sh is installed. However, the latter might not be installed if system_other is not used. Bug: 155053192 Test: pass Change-Id: I8569d72c03a260bd1be9dcccb5b9ae670117179c (cherry picked from commit 93cde30635c844c9a78b78dcea283af0e70767ee) Merged-In: I8569d72c03a260bd1be9dcccb5b9ae670117179c --- dynamic_partition_control_android.cc | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc index a310f209..48ad5f03 100644 --- a/dynamic_partition_control_android.cc +++ b/dynamic_partition_control_android.cc @@ -547,7 +547,10 @@ std::optional DynamicPartitionControlAndroid::IsAvbEnabledInFstab( const std::string& path) { Fstab fstab; if (!ReadFstabFromFile(path, &fstab)) { - LOG(WARNING) << "Cannot read fstab from " << path; + PLOG(WARNING) << "Cannot read fstab from " << path; + if (errno == ENOENT) { + return false; + } return std::nullopt; } for (const auto& entry : fstab) { From 7d674af4716f1b39e107dd84c616291b026e96dd Mon Sep 17 00:00:00 2001 From: Bob Badour Date: Mon, 27 Apr 2020 19:12:28 -0700 Subject: [PATCH 278/624] Add METADATA to update_engine: Apache2+BSD = NOTICE Bug: 68860345 Bug: 69058154 Bug: 151953481 Test: no code changes Change-Id: I652e6dde5be3eb84a2336f1767d0ddc7601b0901 --- METADATA | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 METADATA diff --git a/METADATA b/METADATA new file mode 100644 index 00000000..d97975ca --- /dev/null +++ b/METADATA @@ -0,0 +1,3 @@ +third_party { + license_type: NOTICE +} From 2b68e6b41a78adcaa36805ec024700985d9ecde7 Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Fri, 17 Apr 2020 10:49:12 -0700 Subject: [PATCH 279/624] update_engine: Use GetDlcsToUpdate() instead of GetInstalled() The meaning of GetInstalled() DBus in dlcservice have changed. So we need to get the list of DLCs that ought to be updated from the new DBus GetDlcsToUpdate(). Also rename all dlc_module_ids to dlc_ids. BUG=chromium:1071654 TEST=cros_workon_make --board reef --test update_engine Cq-Depend: chromium:2157669 Change-Id: I02e450a1fd75f8b387eb8a107c9c8a32f3e01e6e Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2163441 Tested-by: Amin Hassani Reviewed-by: Andrew Lassalle Reviewed-by: Jae Hoon Kim Commit-Queue: Amin Hassani --- client_library/client_dbus.h | 2 +- client_library/include/update_engine/client.h | 7 +++-- common/dlcservice_interface.h | 8 +++--- common/dlcservice_stub.cc | 10 +++---- common/dlcservice_stub.h | 6 ++--- common_service.cc | 5 ++-- common_service.h | 4 +-- dlcservice_chromeos.cc | 27 +++++++++---------- dlcservice_chromeos.h | 14 +++++----- mock_update_attempter.h | 2 +- omaha_request_builder_xml.cc | 4 +-- omaha_request_builder_xml_unittest.cc | 14 +++++----- update_attempter.cc | 13 +++++---- update_attempter.h | 6 ++--- update_attempter_unittest.cc | 12 ++++----- 15 files changed, 65 insertions(+), 69 deletions(-) diff --git a/client_library/client_dbus.h b/client_library/client_dbus.h index a032d214..74fcce32 100644 --- a/client_library/client_dbus.h +++ b/client_library/client_dbus.h @@ -44,7 +44,7 @@ class DBusUpdateEngineClient : public UpdateEngineClient { bool at_user_request) override; bool AttemptInstall(const std::string& omaha_url, - const std::vector& dlc_module_ids) override; + const std::vector& dlc_ids) override; bool SetDlcActiveValue(bool is_active, const std::string& dlc_id) override; diff --git a/client_library/include/update_engine/client.h b/client_library/include/update_engine/client.h index 9bda0b9b..f7347339 100644 --- a/client_library/include/update_engine/client.h +++ b/client_library/include/update_engine/client.h @@ -54,11 +54,10 @@ class UpdateEngineClient { // empty indicates update_engine should use its default value. Note that // update_engine will ignore this parameter in production mode to avoid // pulling untrusted updates. - // |dlc_module_ids| + // |dlc_ids| // A list of DLC module IDs. - virtual bool AttemptInstall( - const std::string& omaha_url, - const std::vector& dlc_module_ids) = 0; + virtual bool AttemptInstall(const std::string& omaha_url, + const std::vector& dlc_ids) = 0; // Same as above but return the entire struct instead. virtual bool GetStatus(UpdateEngineStatus* out_status) const = 0; diff --git a/common/dlcservice_interface.h b/common/dlcservice_interface.h index 3524d500..70b74ab8 100644 --- a/common/dlcservice_interface.h +++ b/common/dlcservice_interface.h @@ -30,17 +30,17 @@ class DlcServiceInterface { public: virtual ~DlcServiceInterface() = default; - // Returns true and a list of installed DLC module ids in |dlc_module_ids|. + // Returns true and a list of installed DLC ids in |dlc_ids|. // On failure it returns false. - virtual bool GetInstalled(std::vector* dlc_module_ids) = 0; + virtual bool GetDlcsToUpdate(std::vector* dlc_ids) = 0; // Returns true if dlcservice successfully handled the install completion // method call, otherwise false. - virtual bool InstallCompleted(const std::vector& ids) = 0; + virtual bool InstallCompleted(const std::vector& dlc_ids) = 0; // Returns true if dlcservice successfully handled the update completion // method call, otherwise false. - virtual bool UpdateCompleted(const std::vector& ids) = 0; + virtual bool UpdateCompleted(const std::vector& dlc_ids) = 0; protected: DlcServiceInterface() = default; diff --git a/common/dlcservice_stub.cc b/common/dlcservice_stub.cc index 3dcb2e03..24471470 100644 --- a/common/dlcservice_stub.cc +++ b/common/dlcservice_stub.cc @@ -27,16 +27,16 @@ std::unique_ptr CreateDlcService() { return std::make_unique(); } -bool DlcServiceStub::GetInstalled(std::vector* dlc_module_ids) { - if (dlc_module_ids) - dlc_module_ids->clear(); +bool DlcServiceStub::GetDlcsToUpdate(vector* dlc_ids) { + if (dlc_ids) + dlc_ids->clear(); return true; } -bool DlcServiceStub::InstallCompleted(const vector& ids) { +bool DlcServiceStub::InstallCompleted(const vector& dlc_ids) { return true; } -bool DlcServiceStub::UpdateCompleted(const vector& ids) { +bool DlcServiceStub::UpdateCompleted(const vector& dlc_ids) { return true; } diff --git a/common/dlcservice_stub.h b/common/dlcservice_stub.h index 9b27971c..bc803e80 100644 --- a/common/dlcservice_stub.h +++ b/common/dlcservice_stub.h @@ -31,9 +31,9 @@ class DlcServiceStub : public DlcServiceInterface { ~DlcServiceStub() = default; // BootControlInterface overrides. - bool GetInstalled(std::vector* dlc_module_ids) override; - bool InstallCompleted(const std::vector& ids) override; - bool UpdateCompleted(const std::vector& ids) override; + bool GetDlcsToUpdate(std::vector* dlc_ids) override; + bool InstallCompleted(const std::vector& dlc_ids) override; + bool UpdateCompleted(const std::vector& dlc_ids) override; private: DISALLOW_COPY_AND_ASSIGN(DlcServiceStub); diff --git a/common_service.cc b/common_service.cc index 347833b4..85fb9e4f 100644 --- a/common_service.cc +++ b/common_service.cc @@ -105,9 +105,8 @@ bool UpdateEngineService::AttemptUpdate(ErrorPtr* /* error */, bool UpdateEngineService::AttemptInstall(brillo::ErrorPtr* error, const string& omaha_url, - const vector& dlc_module_ids) { - if (!system_state_->update_attempter()->CheckForInstall(dlc_module_ids, - omaha_url)) { + const vector& dlc_ids) { + if (!system_state_->update_attempter()->CheckForInstall(dlc_ids, omaha_url)) { // TODO(xiaochu): support more detailed error messages. LogAndSetError(error, FROM_HERE, "Could not schedule install operation."); return false; diff --git a/common_service.h b/common_service.h index 6c742a5d..cfcece51 100644 --- a/common_service.h +++ b/common_service.h @@ -55,10 +55,10 @@ class UpdateEngineService { // Attempts a DLC module install operation. // |omaha_url|: the URL to query for update. - // |dlc_module_ids|: a list of DLC module IDs. + // |dlc_ids|: a list of DLC module IDs. bool AttemptInstall(brillo::ErrorPtr* error, const std::string& omaha_url, - const std::vector& dlc_module_ids); + const std::vector& dlc_ids); bool AttemptRollback(brillo::ErrorPtr* error, bool in_powerwash); diff --git a/dlcservice_chromeos.cc b/dlcservice_chromeos.cc index 3c76b2ad..08482ee5 100644 --- a/dlcservice_chromeos.cc +++ b/dlcservice_chromeos.cc @@ -23,7 +23,6 @@ #include "update_engine/dbus_connection.h" -using dlcservice::DlcModuleList; using std::string; using std::vector; @@ -39,25 +38,25 @@ std::unique_ptr CreateDlcService() { return std::make_unique(); } -bool DlcServiceChromeOS::GetInstalled(vector* dlc_module_ids) { - if (!dlc_module_ids) +bool DlcServiceChromeOS::GetDlcsToUpdate(vector* dlc_ids) { + if (!dlc_ids) return false; - dlc_module_ids->clear(); + dlc_ids->clear(); - dlcservice::DlcModuleList dlc_module_list; - if (!GetDlcServiceProxy().GetInstalled(&dlc_module_list, nullptr)) { - LOG(ERROR) << "dlcservice does not return installed DLC module list."; + brillo::ErrorPtr err; + if (!GetDlcServiceProxy().GetDlcsToUpdate(dlc_ids, &err)) { + LOG(ERROR) << "dlcservice failed to return DLCs that need to be updated. " + << "ErrorCode=" << err->GetCode() + << ", ErrMsg=" << err->GetMessage(); + dlc_ids->clear(); return false; } - for (const auto& dlc_module_info : dlc_module_list.dlc_module_infos()) { - dlc_module_ids->emplace_back(dlc_module_info.dlc_id()); - } return true; } -bool DlcServiceChromeOS::InstallCompleted(const std::vector& ids) { +bool DlcServiceChromeOS::InstallCompleted(const vector& dlc_ids) { brillo::ErrorPtr err; - if (!GetDlcServiceProxy().InstallCompleted(ids, &err)) { + if (!GetDlcServiceProxy().InstallCompleted(dlc_ids, &err)) { LOG(ERROR) << "dlcservice failed to complete install. ErrCode=" << err->GetCode() << ", ErrMsg=" << err->GetMessage(); return false; @@ -65,9 +64,9 @@ bool DlcServiceChromeOS::InstallCompleted(const std::vector& ids) { return true; } -bool DlcServiceChromeOS::UpdateCompleted(const std::vector& ids) { +bool DlcServiceChromeOS::UpdateCompleted(const vector& dlc_ids) { brillo::ErrorPtr err; - if (!GetDlcServiceProxy().UpdateCompleted(ids, &err)) { + if (!GetDlcServiceProxy().UpdateCompleted(dlc_ids, &err)) { LOG(ERROR) << "dlcservice failed to complete updated. ErrCode=" << err->GetCode() << ", ErrMsg=" << err->GetMessage(); return false; diff --git a/dlcservice_chromeos.h b/dlcservice_chromeos.h index b56b4950..8828e1af 100644 --- a/dlcservice_chromeos.h +++ b/dlcservice_chromeos.h @@ -34,17 +34,17 @@ class DlcServiceChromeOS : public DlcServiceInterface { // DlcServiceInterface overrides. - // Will clear the |dlc_module_ids|, passed to be modified. Clearing by - // default has the added benefit of avoiding indeterminate behavior in the - // case that |dlc_module_ids| wasn't empty to begin which would lead to - // possible duplicates and cases when error was not checked it's still safe. - bool GetInstalled(std::vector* dlc_module_ids) override; + // Will clear the |dlc_ids|, passed to be modified. Clearing by default has + // the added benefit of avoiding indeterminate behavior in the case that + // |dlc_ids| wasn't empty to begin which would lead to possible duplicates and + // cases when error was not checked it's still safe. + bool GetDlcsToUpdate(std::vector* dlc_ids) override; // Call into dlcservice for it to mark the DLC IDs as being installed. - bool InstallCompleted(const std::vector& ids) override; + bool InstallCompleted(const std::vector& dlc_ids) override; // Call into dlcservice for it to mark the DLC IDs as being updated. - bool UpdateCompleted(const std::vector& ids) override; + bool UpdateCompleted(const std::vector& dlc_ids) override; private: DISALLOW_COPY_AND_ASSIGN(DlcServiceChromeOS); diff --git a/mock_update_attempter.h b/mock_update_attempter.h index 9d966d73..fdeba524 100644 --- a/mock_update_attempter.h +++ b/mock_update_attempter.h @@ -55,7 +55,7 @@ class MockUpdateAttempter : public UpdateAttempter { UpdateAttemptFlags flags)); MOCK_METHOD2(CheckForInstall, - bool(const std::vector& dlc_module_ids, + bool(const std::vector& dlc_ids, const std::string& omaha_url)); MOCK_METHOD2(SetDlcActiveValue, bool(bool, const std::string&)); diff --git a/omaha_request_builder_xml.cc b/omaha_request_builder_xml.cc index e2bf307a..097b9f1e 100644 --- a/omaha_request_builder_xml.cc +++ b/omaha_request_builder_xml.cc @@ -421,13 +421,13 @@ string OmahaRequestBuilderXml::GetApps() const { app_xml += GetApp(system_app); } for (const auto& it : params_->dlc_apps_params()) { - OmahaAppData dlc_module_app = { + OmahaAppData dlc_app_data = { .id = it.first, .version = params_->is_install() ? kNoVersion : params_->app_version(), .skip_update = false, .is_dlc = true, .app_params = it.second}; - app_xml += GetApp(dlc_module_app); + app_xml += GetApp(dlc_app_data); } return app_xml; } diff --git a/omaha_request_builder_xml_unittest.cc b/omaha_request_builder_xml_unittest.cc index 3cf5cc06..017acecf 100644 --- a/omaha_request_builder_xml_unittest.cc +++ b/omaha_request_builder_xml_unittest.cc @@ -99,14 +99,14 @@ TEST_F(OmahaRequestBuilderXmlTest, PlatformGetAppTest) { 0, fake_system_state_.prefs(), ""}; - OmahaAppData dlc_module_app = {.id = "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX", - .version = "", - .skip_update = false, - .is_dlc = false}; + OmahaAppData dlc_app_data = {.id = "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX", + .version = "", + .skip_update = false, + .is_dlc = false}; // Verify that the attributes that shouldn't be missing for Platform AppID are // in fact present in the . - const string app = omaha_request.GetApp(dlc_module_app); + const string app = omaha_request.GetApp(dlc_app_data); EXPECT_NE(string::npos, app.find("lang=")); EXPECT_NE(string::npos, app.find("fw_version=")); EXPECT_NE(string::npos, app.find("ec_version=")); @@ -125,12 +125,12 @@ TEST_F(OmahaRequestBuilderXmlTest, DlcGetAppTest) { 0, fake_system_state_.prefs(), ""}; - OmahaAppData dlc_module_app = { + OmahaAppData dlc_app_data = { .id = "_dlc_id", .version = "", .skip_update = false, .is_dlc = true}; // Verify that the attributes that should be missing for DLC AppIDs are in // fact not present in the . - const string app = omaha_request.GetApp(dlc_module_app); + const string app = omaha_request.GetApp(dlc_app_data); EXPECT_EQ(string::npos, app.find("lang=")); EXPECT_EQ(string::npos, app.find("fw_version=")); EXPECT_EQ(string::npos, app.find("ec_version=")); diff --git a/update_attempter.cc b/update_attempter.cc index c45fe4f9..ae7f71eb 100644 --- a/update_attempter.cc +++ b/update_attempter.cc @@ -717,10 +717,10 @@ int64_t UpdateAttempter::GetPingMetadata( } void UpdateAttempter::CalculateDlcParams() { - // Set the |dlc_module_ids_| only for an update. This is required to get the + // Set the |dlc_ids_| only for an update. This is required to get the // currently installed DLC(s). if (!is_install_ && - !system_state_->dlcservice()->GetInstalled(&dlc_module_ids_)) { + !system_state_->dlcservice()->GetDlcsToUpdate(&dlc_ids_)) { LOG(INFO) << "Failed to retrieve DLC module IDs from dlcservice. Check the " "state of dlcservice, will not update DLC modules."; } @@ -730,8 +730,7 @@ void UpdateAttempter::CalculateDlcParams() { base::FileEnumerator dir_enum(metadata_root_path, false /* recursive */, base::FileEnumerator::DIRECTORIES); - std::unordered_set dlc_ids(dlc_module_ids_.begin(), - dlc_module_ids_.end()); + std::unordered_set dlc_ids(dlc_ids_.begin(), dlc_ids_.end()); for (base::FilePath name = dir_enum.Next(); !name.empty(); name = dir_enum.Next()) { string id = name.BaseName().value(); @@ -742,7 +741,7 @@ void UpdateAttempter::CalculateDlcParams() { } } std::map dlc_apps_params; - for (const auto& dlc_id : dlc_module_ids_) { + for (const auto& dlc_id : dlc_ids_) { OmahaRequestParams::AppParams dlc_params{ .active_counting_type = OmahaRequestParams::kDateBased, .name = dlc_id, @@ -1012,7 +1011,7 @@ bool UpdateAttempter::CheckForUpdate(const string& app_version, return true; } -bool UpdateAttempter::CheckForInstall(const vector& dlc_module_ids, +bool UpdateAttempter::CheckForInstall(const vector& dlc_ids, const string& omaha_url) { if (status_ != UpdateStatus::IDLE) { LOG(INFO) << "Refusing to do an install as there is an " @@ -1021,7 +1020,7 @@ bool UpdateAttempter::CheckForInstall(const vector& dlc_module_ids, return false; } - dlc_module_ids_ = dlc_module_ids; + dlc_ids_ = dlc_ids; is_install_ = true; forced_omaha_url_.clear(); diff --git a/update_attempter.h b/update_attempter.h index 3c6f4a13..9e481792 100644 --- a/update_attempter.h +++ b/update_attempter.h @@ -141,7 +141,7 @@ class UpdateAttempter : public ActionProcessorDelegate, UpdateAttemptFlags flags); // This is the version of CheckForUpdate called by AttemptInstall API. - virtual bool CheckForInstall(const std::vector& dlc_module_ids, + virtual bool CheckForInstall(const std::vector& dlc_ids, const std::string& omaha_url); // This is the internal entry point for going through a rollback. This will @@ -447,7 +447,7 @@ class UpdateAttempter : public ActionProcessorDelegate, int64_t GetPingMetadata(const PrefsInterface& prefs, const std::string& metadata_name) const; - // Calculates the update parameters for DLCs. Sets the |dlc_modules_| + // Calculates the update parameters for DLCs. Sets the |dlc_ids_| // parameter on the |omaha_request_params_| object. void CalculateDlcParams(); @@ -555,7 +555,7 @@ class UpdateAttempter : public ActionProcessorDelegate, std::string forced_omaha_url_; // A list of DLC module IDs. - std::vector dlc_module_ids_; + std::vector dlc_ids_; // Whether the operation is install (write to the current slot not the // inactive slot). bool is_install_; diff --git a/update_attempter_unittest.cc b/update_attempter_unittest.cc index 5849c383..56665ad3 100644 --- a/update_attempter_unittest.cc +++ b/update_attempter_unittest.cc @@ -153,7 +153,7 @@ struct ProcessingDoneTestParams { class MockDlcService : public DlcServiceInterface { public: - MOCK_METHOD1(GetInstalled, bool(vector*)); + MOCK_METHOD1(GetDlcsToUpdate, bool(vector*)); MOCK_METHOD1(InstallCompleted, bool(const vector&)); MOCK_METHOD1(UpdateCompleted, bool(const vector&)); }; @@ -2360,7 +2360,7 @@ TEST_F(UpdateAttempterTest, CalculateDlcParamsInstallTest) { ASSERT_TRUE(base::CreateDirectory(metadata_path_dlc0)); attempter_.is_install_ = true; - attempter_.dlc_module_ids_ = {dlc_id}; + attempter_.dlc_ids_ = {dlc_id}; attempter_.CalculateDlcParams(); OmahaRequestParams* params = fake_system_state_.request_params(); @@ -2387,7 +2387,7 @@ TEST_F(UpdateAttempterTest, CalculateDlcParamsNoPrefFilesTest) { base::FilePath(fake_system_state_.request_params()->dlc_prefs_root()) .Append(dlc_id); ASSERT_TRUE(base::CreateDirectory(metadata_path_dlc0)); - EXPECT_CALL(mock_dlcservice_, GetInstalled(_)) + EXPECT_CALL(mock_dlcservice_, GetDlcsToUpdate(_)) .WillOnce( DoAll(SetArgPointee<0>(std::vector({dlc_id})), Return(true))); @@ -2416,7 +2416,7 @@ TEST_F(UpdateAttempterTest, CalculateDlcParamsNonParseableValuesTest) { base::FilePath(fake_system_state_.request_params()->dlc_prefs_root()) .Append(dlc_id); ASSERT_TRUE(base::CreateDirectory(metadata_path_dlc0)); - EXPECT_CALL(mock_dlcservice_, GetInstalled(_)) + EXPECT_CALL(mock_dlcservice_, GetDlcsToUpdate(_)) .WillOnce( DoAll(SetArgPointee<0>(std::vector({dlc_id})), Return(true))); @@ -2449,7 +2449,7 @@ TEST_F(UpdateAttempterTest, CalculateDlcParamsValidValuesTest) { base::FilePath(fake_system_state_.request_params()->dlc_prefs_root()) .Append(dlc_id); ASSERT_TRUE(base::CreateDirectory(metadata_path_dlc0)); - EXPECT_CALL(mock_dlcservice_, GetInstalled(_)) + EXPECT_CALL(mock_dlcservice_, GetDlcsToUpdate(_)) .WillOnce( DoAll(SetArgPointee<0>(std::vector({dlc_id})), Return(true))); @@ -2485,7 +2485,7 @@ TEST_F(UpdateAttempterTest, CalculateDlcParamsRemoveStaleMetadata) { base::FilePath(fake_system_state_.request_params()->dlc_prefs_root()) .Append("stale"); ASSERT_TRUE(base::CreateDirectory(metadata_path_dlc_stale)); - EXPECT_CALL(mock_dlcservice_, GetInstalled(_)) + EXPECT_CALL(mock_dlcservice_, GetDlcsToUpdate(_)) .WillOnce( DoAll(SetArgPointee<0>(std::vector({dlc_id})), Return(true))); From fab175a3db9ddd9898cbe89c24d2796a2985363b Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Fri, 17 Apr 2020 11:20:50 -0700 Subject: [PATCH 280/624] update_engine: Change parameters of AttemptInstall() DBus Currently, the update_engine is using the dlcservice's protobuf as its own input. This is bad API because now anyone dependent on the update_engine's DBus, needs to be dependent on the dlcservice too. This CL changes that so we only pass the arguments we want (DLC DIs and the omaha url). BUG=chromium:1071654 TEST=cros_workon_make --board reef --test update_engine Cq-Depend: chromium:2157670 Change-Id: Id7f7a86d8b3e3194d4d7697a8ad26ed4bcc4ba2c Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2163442 Tested-by: Amin Hassani Reviewed-by: Andrew Lassalle Reviewed-by: Jae Hoon Kim Commit-Queue: Amin Hassani --- client_library/client_dbus.cc | 12 +----------- client_library/client_dbus.h | 1 - .../org.chromium.UpdateEngineInterface.dbus-xml | 7 +++---- dbus_service.cc | 16 +++------------- dbus_service.h | 5 +++-- 5 files changed, 10 insertions(+), 31 deletions(-) diff --git a/client_library/client_dbus.cc b/client_library/client_dbus.cc index 4ec76c5b..5ca519a1 100644 --- a/client_library/client_dbus.cc +++ b/client_library/client_dbus.cc @@ -21,7 +21,6 @@ #include #include -#include #include #include "update_engine/update_status_utils.h" @@ -86,16 +85,7 @@ bool DBusUpdateEngineClient::AttemptUpdate(const string& in_app_version, bool DBusUpdateEngineClient::AttemptInstall(const string& omaha_url, const vector& dlc_ids) { - // Convert parameters into protobuf. - dlcservice::DlcModuleList dlc_parameters; - dlc_parameters.set_omaha_url(omaha_url); - for (const auto& dlc_id : dlc_ids) { - dlcservice::DlcModuleInfo* dlc_module_info = - dlc_parameters.add_dlc_module_infos(); - dlc_module_info->set_dlc_id(dlc_id); - } - return proxy_->AttemptInstall(dlc_parameters, - nullptr /* brillo::ErrorPtr* */); + return proxy_->AttemptInstall(omaha_url, dlc_ids, nullptr); } bool DBusUpdateEngineClient::SetDlcActiveValue(bool is_active, diff --git a/client_library/client_dbus.h b/client_library/client_dbus.h index 74fcce32..f19555fc 100644 --- a/client_library/client_dbus.h +++ b/client_library/client_dbus.h @@ -23,7 +23,6 @@ #include #include -#include #include #include "update_engine/client_library/include/update_engine/client.h" diff --git a/dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml b/dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml index 51457e5e..ac2f0211 100644 --- a/dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml +++ b/dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml @@ -35,12 +35,11 @@ - + + - The information about DLC modules that needs to be installed. + The list of DLC IDs that needs to be installed. - diff --git a/dbus_service.cc b/dbus_service.cc index a5fec740..46ac1d1a 100644 --- a/dbus_service.cc +++ b/dbus_service.cc @@ -19,7 +19,6 @@ #include #include -#include #include #include "update_engine/dbus_connection.h" @@ -30,7 +29,6 @@ namespace chromeos_update_engine { using brillo::ErrorPtr; using chromeos_update_engine::UpdateEngineService; -using dlcservice::DlcModuleList; using std::string; using std::vector; using update_engine::Operation; @@ -82,17 +80,9 @@ bool DBusUpdateEngineService::AttemptUpdateWithFlags( } bool DBusUpdateEngineService::AttemptInstall(ErrorPtr* error, - const DlcModuleList& request) { - vector dlc_ids; - for (const auto& dlc_module_info : request.dlc_module_infos()) { - if (dlc_module_info.dlc_id().empty()) { - *error = brillo::Error::Create( - FROM_HERE, "update_engine", "INTERNAL", "Empty DLC ID passed."); - return false; - } - dlc_ids.push_back(dlc_module_info.dlc_id()); - } - return common_->AttemptInstall(error, request.omaha_url(), dlc_ids); + const string& in_omaha_url, + const vector& dlc_ids) { + return common_->AttemptInstall(error, in_omaha_url, dlc_ids); } bool DBusUpdateEngineService::AttemptRollback(ErrorPtr* error, diff --git a/dbus_service.h b/dbus_service.h index 86f5b93c..873909ee 100644 --- a/dbus_service.h +++ b/dbus_service.h @@ -21,10 +21,10 @@ #include #include +#include #include #include -#include #include #include "update_engine/common_service.h" @@ -52,7 +52,8 @@ class DBusUpdateEngineService int32_t in_flags_as_int) override; bool AttemptInstall(brillo::ErrorPtr* error, - const dlcservice::DlcModuleList& request) override; + const std::string& in_omaha_url, + const std::vector& dlc_ids) override; bool AttemptRollback(brillo::ErrorPtr* error, bool in_powerwash) override; From ebea33916754d5522ce6489a910b990b119b7174 Mon Sep 17 00:00:00 2001 From: Jae Hoon Kim Date: Mon, 27 Apr 2020 13:27:35 -0700 Subject: [PATCH 281/624] update_engine: Utilize Optional for OmahaParserData |OmahaParserData|'s cohort* fields utilize the |base::Optional| class to indicate value assigned to the fields. This atomically hold the set value for cohort related fields. Also, the |HTTPReponseCode| when converted to sane value is logged so previous error code value can be tracked. BUG=chromium:928805 TEST=FEATURES=test emerge-$B update_engine update_engine-client # filter Change-Id: I5dbf59965538dc6c1eab052677a8d607423a34db Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2168611 Tested-by: Jae Hoon Kim Commit-Queue: Jae Hoon Kim Reviewed-by: Amin Hassani --- omaha_request_action.cc | 42 ++++++++++++++++++----------------------- 1 file changed, 18 insertions(+), 24 deletions(-) diff --git a/omaha_request_action.cc b/omaha_request_action.cc index 85699d8e..81abb3e9 100644 --- a/omaha_request_action.cc +++ b/omaha_request_action.cc @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include @@ -55,6 +56,7 @@ #include "update_engine/p2p_manager.h" #include "update_engine/payload_state_interface.h" +using base::Optional; using base::Time; using base::TimeDelta; using chromeos_update_manager::kRollforwardInfinity; @@ -139,12 +141,9 @@ struct OmahaParserData { string manifest_version; map action_postinstall_attrs; string updatecheck_status; - string cohort; - string cohorthint; - string cohortname; - bool cohort_set = false; - bool cohorthint_set = false; - bool cohortname_set = false; + Optional cohort; + Optional cohorthint; + Optional cohortname; struct Package { string name; @@ -180,21 +179,14 @@ void ParserHandlerStart(void* user_data, if (data->current_path == "/response/app") { OmahaParserData::App app; - if (attrs.find(kAttrAppId) != attrs.end()) { + if (attrs.find(kAttrAppId) != attrs.end()) app.id = attrs[kAttrAppId]; - } - if (attrs.find(kAttrCohort) != attrs.end()) { - app.cohort_set = true; + if (attrs.find(kAttrCohort) != attrs.end()) app.cohort = attrs[kAttrCohort]; - } - if (attrs.find(kAttrCohortHint) != attrs.end()) { - app.cohorthint_set = true; + if (attrs.find(kAttrCohortHint) != attrs.end()) app.cohorthint = attrs[kAttrCohortHint]; - } - if (attrs.find(kAttrCohortName) != attrs.end()) { - app.cohortname_set = true; + if (attrs.find(kAttrCohortName) != attrs.end()) app.cohortname = attrs[kAttrCohortName]; - } data->apps.push_back(std::move(app)); } else if (data->current_path == "/response/app/updatecheck") { if (!data->apps.empty()) @@ -733,12 +725,12 @@ bool OmahaRequestAction::ParseResponse(OmahaParserData* parser_data, // We persist the cohorts sent by omaha even if the status is "noupdate". for (const auto& app : parser_data->apps) { if (app.id == params_->GetAppId()) { - if (app.cohort_set) - PersistCohortData(kPrefsOmahaCohort, app.cohort); - if (app.cohorthint_set) - PersistCohortData(kPrefsOmahaCohortHint, app.cohorthint); - if (app.cohortname_set) - PersistCohortData(kPrefsOmahaCohortName, app.cohortname); + if (app.cohort) + PersistCohortData(kPrefsOmahaCohort, app.cohort.value()); + if (app.cohorthint) + PersistCohortData(kPrefsOmahaCohortHint, app.cohorthint.value()); + if (app.cohortname) + PersistCohortData(kPrefsOmahaCohortName, app.cohortname.value()); break; } } @@ -916,11 +908,13 @@ void OmahaRequestAction::TransferComplete(HttpFetcher* fetcher, } if (!successful) { - LOG(ERROR) << "Omaha request network transfer failed."; int code = GetHTTPResponseCode(); + LOG(ERROR) << "Omaha request network transfer failed with HTTPResponseCode=" + << code; // Makes sure we send sane error values. if (code < 0 || code >= 1000) { code = 999; + LOG(WARNING) << "Converting to sane HTTPResponseCode=" << code; } completer.set_code(static_cast( static_cast(ErrorCode::kOmahaRequestHTTPResponseBase) + code)); From 065d78d6963ca13a38ad305bf751b09ec929cf51 Mon Sep 17 00:00:00 2001 From: Andrew Date: Tue, 7 Apr 2020 15:43:07 -0700 Subject: [PATCH 282/624] update_engine: Change DLC metadata path Change the location of the DLC metadata from /var/lib/dlc to /var/lib/update_engine/dlc_prefs/ to make update_engine the owner of metadata. BUG=chromium:912666 TEST=cros_workon_make update_engine --test TEST=install and uninstall DLCs on DUT. Check new prefs path. Change-Id: I75f5506eee1abc834ad89a7cf363f42e384b695b Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2140007 Tested-by: Andrew Lassalle Commit-Queue: Amin Hassani Reviewed-by: Jae Hoon Kim Reviewed-by: Amin Hassani --- common/constants.cc | 3 +- common/constants.h | 5 +- common/prefs.cc | 33 ++++++- common/prefs_interface.h | 5 + common/prefs_unittest.cc | 69 ++++++++++++++ omaha_request_action.cc | 36 ++++--- omaha_request_action_unittest.cc | 137 ++++++++++++--------------- omaha_request_params.cc | 1 - omaha_request_params.h | 6 -- update_attempter.cc | 120 +++++++++++------------- update_attempter.h | 10 +- update_attempter_unittest.cc | 155 +++++++++++++++---------------- 12 files changed, 321 insertions(+), 259 deletions(-) diff --git a/common/constants.cc b/common/constants.cc index 793ce97c..25aa9a8a 100644 --- a/common/constants.cc +++ b/common/constants.cc @@ -18,8 +18,7 @@ namespace chromeos_update_engine { -// TODO(andrewlassalle): Move this to the prefs directory. -const char kDlcMetadataRootpath[] = "/var/lib/dlc/"; +const char kDlcPrefsSubDir[] = "dlc"; const char kPowerwashSafePrefsSubDirectory[] = "update_engine/prefs"; diff --git a/common/constants.h b/common/constants.h index 44b20b0f..67519bdd 100644 --- a/common/constants.h +++ b/common/constants.h @@ -19,9 +19,8 @@ namespace chromeos_update_engine { -// The root path of all DLC modules metadata. -// Keep this in sync with the one in dlcservice. -extern const char kDlcMetadataRootpath[]; +// The root path of all DLC metadata. +extern const char kDlcPrefsSubDir[]; // Directory for AU prefs that are preserved across powerwash. extern const char kPowerwashSafePrefsSubDirectory[]; diff --git a/common/prefs.cc b/common/prefs.cc index 6d86a504..6a330378 100644 --- a/common/prefs.cc +++ b/common/prefs.cc @@ -18,9 +18,11 @@ #include +#include #include #include #include +#include #include #include "update_engine/common/utils.h" @@ -29,6 +31,8 @@ using std::string; namespace chromeos_update_engine { +const char kKeySeparator = '/'; + bool PrefsBase::GetString(const string& key, string* value) const { return storage_->GetKey(key, value); } @@ -104,6 +108,13 @@ void PrefsBase::RemoveObserver(const string& key, ObserverInterface* observer) { observers_for_key.erase(observer_it); } +string PrefsInterface::CreateSubKey(const string& name_space, + const string& sub_pref, + const string& key) { + return base::JoinString({name_space, sub_pref, key}, + string(1, kKeySeparator)); +} + // Prefs bool Prefs::Init(const base::FilePath& prefs_dir) { @@ -112,6 +123,24 @@ bool Prefs::Init(const base::FilePath& prefs_dir) { bool Prefs::FileStorage::Init(const base::FilePath& prefs_dir) { prefs_dir_ = prefs_dir; + // Delete empty directories. Ignore errors when deleting empty directories. + base::FileEnumerator namespace_enum( + prefs_dir_, false /* recursive */, base::FileEnumerator::DIRECTORIES); + for (base::FilePath namespace_path = namespace_enum.Next(); + !namespace_path.empty(); + namespace_path = namespace_enum.Next()) { + base::FileEnumerator sub_pref_enum(namespace_path, + false /* recursive */, + base::FileEnumerator::DIRECTORIES); + for (base::FilePath sub_pref_path = sub_pref_enum.Next(); + !sub_pref_path.empty(); + sub_pref_path = sub_pref_enum.Next()) { + if (base::IsDirectoryEmpty(sub_pref_path)) + base::DeleteFile(sub_pref_path, false); + } + if (base::IsDirectoryEmpty(namespace_path)) + base::DeleteFile(namespace_path, false); + } return true; } @@ -146,7 +175,7 @@ bool Prefs::FileStorage::KeyExists(const string& key) const { bool Prefs::FileStorage::DeleteKey(const string& key) { base::FilePath filename; TEST_AND_RETURN_FALSE(GetFileNameForKey(key, &filename)); - TEST_AND_RETURN_FALSE(base::DeleteFile(filename, false)); + TEST_AND_RETURN_FALSE(base::DeleteFile(filename, true)); return true; } @@ -157,7 +186,7 @@ bool Prefs::FileStorage::GetFileNameForKey(const string& key, for (size_t i = 0; i < key.size(); ++i) { char c = key.at(i); TEST_AND_RETURN_FALSE(base::IsAsciiAlpha(c) || base::IsAsciiDigit(c) || - c == '_' || c == '-'); + c == '_' || c == '-' || c == kKeySeparator); } *filename = prefs_dir_.Append(key); return true; diff --git a/common/prefs_interface.h b/common/prefs_interface.h index 03ae3ecd..b5596974 100644 --- a/common/prefs_interface.h +++ b/common/prefs_interface.h @@ -79,6 +79,11 @@ class PrefsInterface { // this key. Calling with non-existent keys does nothing. virtual bool Delete(const std::string& key) = 0; + // Creates a key which is part of a sub preference. + static std::string CreateSubKey(const std::string& name_space, + const std::string& sub_pref, + const std::string& key); + // Add an observer to watch whenever the given |key| is modified. The // OnPrefSet() and OnPrefDelete() methods will be called whenever any of the // Set*() methods or the Delete() method are called on the given key, diff --git a/common/prefs_unittest.cc b/common/prefs_unittest.cc index 3f293199..f226949c 100644 --- a/common/prefs_unittest.cc +++ b/common/prefs_unittest.cc @@ -31,6 +31,7 @@ using std::string; using testing::_; +using testing::ElementsAre; using testing::Eq; namespace { @@ -59,6 +60,21 @@ class PrefsTest : public ::testing::Test { Prefs prefs_; }; +TEST(Prefs, Init) { + Prefs prefs; + const string name_space = "ns"; + const string sub_pref = "sp"; + + base::ScopedTempDir temp_dir; + ASSERT_TRUE(temp_dir.CreateUniqueTempDir()); + base::FilePath namespace_path = temp_dir.GetPath().Append(name_space); + + EXPECT_TRUE(base::CreateDirectory(namespace_path.Append(sub_pref))); + EXPECT_TRUE(base::PathExists(namespace_path.Append(sub_pref))); + ASSERT_TRUE(prefs.Init(temp_dir.GetPath())); + EXPECT_FALSE(base::PathExists(namespace_path)); +} + TEST_F(PrefsTest, GetFileNameForKey) { const char kAllvalidCharsKey[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz_-"; @@ -77,6 +93,18 @@ TEST_F(PrefsTest, GetFileNameForKeyEmpty) { EXPECT_FALSE(prefs_.file_storage_.GetFileNameForKey("", &path)); } +TEST_F(PrefsTest, CreateSubKey) { + const string name_space = "ns"; + const string sub_pref1 = "sp1"; + const string sub_pref2 = "sp2"; + const string sub_key = "sk"; + + EXPECT_EQ(PrefsInterface::CreateSubKey(name_space, sub_pref1, sub_key), + "ns/sp1/sk"); + EXPECT_EQ(PrefsInterface::CreateSubKey(name_space, sub_pref2, sub_key), + "ns/sp2/sk"); +} + TEST_F(PrefsTest, GetString) { const string test_data = "test data"; ASSERT_TRUE(SetValue(kKey, test_data)); @@ -279,6 +307,29 @@ TEST_F(PrefsTest, DeleteWorks) { EXPECT_FALSE(prefs_.Exists(kKey)); } +TEST_F(PrefsTest, SetDeleteSubKey) { + const string name_space = "ns"; + const string sub_pref = "sp"; + const string sub_key1 = "sk1"; + const string sub_key2 = "sk2"; + auto key1 = prefs_.CreateSubKey(name_space, sub_pref, sub_key1); + auto key2 = prefs_.CreateSubKey(name_space, sub_pref, sub_key2); + base::FilePath sub_pref_path = prefs_dir_.Append(name_space).Append(sub_pref); + + ASSERT_TRUE(prefs_.SetInt64(key1, 0)); + ASSERT_TRUE(prefs_.SetInt64(key2, 0)); + EXPECT_TRUE(base::PathExists(sub_pref_path.Append(sub_key1))); + EXPECT_TRUE(base::PathExists(sub_pref_path.Append(sub_key2))); + + ASSERT_TRUE(prefs_.Delete(key1)); + EXPECT_FALSE(base::PathExists(sub_pref_path.Append(sub_key1))); + EXPECT_TRUE(base::PathExists(sub_pref_path.Append(sub_key2))); + ASSERT_TRUE(prefs_.Delete(key2)); + EXPECT_FALSE(base::PathExists(sub_pref_path.Append(sub_key2))); + prefs_.Init(prefs_dir_); + EXPECT_FALSE(base::PathExists(prefs_dir_.Append(name_space))); +} + class MockPrefsObserver : public PrefsInterface::ObserverInterface { public: MOCK_METHOD1(OnPrefSet, void(const string&)); @@ -299,6 +350,19 @@ TEST_F(PrefsTest, ObserversCalled) { prefs_.Delete(kKey); testing::Mock::VerifyAndClearExpectations(&mock_obserser); + auto key1 = prefs_.CreateSubKey("ns", "sp1", "key1"); + prefs_.AddObserver(key1, &mock_obserser); + + EXPECT_CALL(mock_obserser, OnPrefSet(key1)); + EXPECT_CALL(mock_obserser, OnPrefDeleted(_)).Times(0); + prefs_.SetString(key1, "value"); + testing::Mock::VerifyAndClearExpectations(&mock_obserser); + + EXPECT_CALL(mock_obserser, OnPrefSet(_)).Times(0); + EXPECT_CALL(mock_obserser, OnPrefDeleted(Eq(key1))); + prefs_.Delete(key1); + testing::Mock::VerifyAndClearExpectations(&mock_obserser); + prefs_.RemoveObserver(kKey, &mock_obserser); } @@ -359,6 +423,11 @@ TEST_F(MemoryPrefsTest, BasicTest) { EXPECT_TRUE(prefs_.Delete(kKey)); EXPECT_FALSE(prefs_.Exists(kKey)); EXPECT_TRUE(prefs_.Delete(kKey)); + + auto key = prefs_.CreateSubKey("ns", "sp", "sk"); + ASSERT_TRUE(prefs_.SetInt64(key, 0)); + EXPECT_TRUE(prefs_.Exists(key)); + EXPECT_TRUE(prefs_.Delete(kKey)); } } // namespace chromeos_update_engine diff --git a/omaha_request_action.cc b/omaha_request_action.cc index 81abb3e9..c9b8aa04 100644 --- a/omaha_request_action.cc +++ b/omaha_request_action.cc @@ -413,37 +413,33 @@ void OmahaRequestAction::StorePingReply( continue; const OmahaRequestParams::AppParams& dlc_params = it->second; - + const string& dlc_id = dlc_params.name; // Skip if the ping for this DLC was not sent. if (!dlc_params.send_ping) continue; - base::FilePath metadata_path = - base::FilePath(params_->dlc_prefs_root()).Append(dlc_params.name); - - Prefs prefs; - if (!base::CreateDirectory(metadata_path) || !prefs.Init(metadata_path)) { - LOG(ERROR) << "Failed to initialize the preferences path:" - << metadata_path.value() << "."; - continue; - } + PrefsInterface* prefs = system_state_->prefs(); // Reset the active metadata value to |kPingInactiveValue|. - if (!prefs.SetInt64(kPrefsPingActive, kPingInactiveValue)) - LOG(ERROR) << "Failed to set the value of ping metadata '" - << kPrefsPingActive << "'."; - - if (!prefs.SetString(kPrefsPingLastRollcall, - parser_data.daystart_elapsed_days)) + auto active_key = + prefs->CreateSubKey(kDlcPrefsSubDir, dlc_id, kPrefsPingActive); + if (!prefs->SetInt64(active_key, kPingInactiveValue)) + LOG(ERROR) << "Failed to set the value of ping metadata '" << active_key + << "'."; + + auto last_rollcall_key = + prefs->CreateSubKey(kDlcPrefsSubDir, dlc_id, kPrefsPingLastRollcall); + if (!prefs->SetString(last_rollcall_key, parser_data.daystart_elapsed_days)) LOG(ERROR) << "Failed to set the value of ping metadata '" - << kPrefsPingLastRollcall << "'."; + << last_rollcall_key << "'."; if (dlc_params.ping_active) { // Write the value of elapsed_days into |kPrefsPingLastActive| only if // the previous ping was an active one. - if (!prefs.SetString(kPrefsPingLastActive, - parser_data.daystart_elapsed_days)) + auto last_active_key = + prefs->CreateSubKey(kDlcPrefsSubDir, dlc_id, kPrefsPingLastActive); + if (!prefs->SetString(last_active_key, parser_data.daystart_elapsed_days)) LOG(ERROR) << "Failed to set the value of ping metadata '" - << kPrefsPingLastActive << "'."; + << last_active_key << "'."; } } } diff --git a/omaha_request_action_unittest.cc b/omaha_request_action_unittest.cc index 2528f7b7..e1f5ef90 100644 --- a/omaha_request_action_unittest.cc +++ b/omaha_request_action_unittest.cc @@ -429,12 +429,6 @@ class OmahaRequestActionTest : public ::testing::Test { bool expected_allow_p2p_for_sharing, const string& expected_p2p_url); - // Helper function used to test the Ping request. - // Create the test directory and setup the Omaha response. - void SetUpStorePingReply(const string& dlc_id, - base::FilePath* metadata_path_dlc, - base::ScopedTempDir* tempdir); - FakeSystemState fake_system_state_; FakeUpdateResponse fake_update_response_; // Used by all tests. @@ -453,6 +447,36 @@ class OmahaRequestActionTest : public ::testing::Test { string post_str; }; +class OmahaRequestActionDlcPingTest : public OmahaRequestActionTest { + protected: + void SetUp() override { + OmahaRequestActionTest::SetUp(); + dlc_id_ = "dlc0"; + active_key_ = PrefsInterface::CreateSubKey( + kDlcPrefsSubDir, dlc_id_, kPrefsPingActive); + last_active_key_ = PrefsInterface::CreateSubKey( + kDlcPrefsSubDir, dlc_id_, kPrefsPingLastActive); + last_rollcall_key_ = PrefsInterface::CreateSubKey( + kDlcPrefsSubDir, dlc_id_, kPrefsPingLastRollcall); + + tuc_params_.http_response = + "\"" + "" + ""; + tuc_params_.expected_check_result = + metrics::CheckResult::kNoUpdateAvailable; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; + } + + std::string dlc_id_; + std::string active_key_; + std::string last_active_key_; + std::string last_rollcall_key_; +}; bool OmahaRequestActionTest::TestUpdateCheck() { brillo::FakeMessageLoop loop(nullptr); loop.SetAsCurrent(); @@ -2904,106 +2928,67 @@ TEST_F(OmahaRequestActionTest, PersistEolBadDateTest) { EXPECT_EQ(kEolDateInvalid, StringToEolDate(eol_date)); } -void OmahaRequestActionTest::SetUpStorePingReply( - const string& dlc_id, - base::FilePath* metadata_path_dlc, - base::ScopedTempDir* tempdir) { - // Create a uniquely named test directory. - ASSERT_TRUE(tempdir->CreateUniqueTempDir()); - request_params_.set_root(tempdir->GetPath().value()); - *metadata_path_dlc = - base::FilePath(request_params_.dlc_prefs_root()).Append(dlc_id); - ASSERT_TRUE(base::CreateDirectory(*metadata_path_dlc)); - - tuc_params_.http_response = - "\"" - "" - ""; - tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable; - tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; -} - -TEST_F(OmahaRequestActionTest, StorePingReplyNoPing) { - string dlc_id = "dlc0"; - base::FilePath metadata_path_dlc0; - base::ScopedTempDir tempdir; - SetUpStorePingReply(dlc_id, &metadata_path_dlc0, &tempdir); - int64_t temp_int; - Prefs prefs; - ASSERT_TRUE(prefs.Init(metadata_path_dlc0)); - - OmahaRequestParams::AppParams app_param = {.name = dlc_id}; +TEST_F(OmahaRequestActionDlcPingTest, StorePingReplyNoPing) { + OmahaRequestParams::AppParams app_param = {.name = dlc_id_}; request_params_.set_dlc_apps_params( - {{request_params_.GetDlcAppId(dlc_id), app_param}}); + {{request_params_.GetDlcAppId(dlc_id_), app_param}}); ASSERT_TRUE(TestUpdateCheck()); + + int64_t temp_int; // If there was no ping, the metadata files shouldn't exist yet. - EXPECT_FALSE(prefs.GetInt64(kPrefsPingActive, &temp_int)); - EXPECT_FALSE(prefs.GetInt64(kPrefsPingLastActive, &temp_int)); - EXPECT_FALSE(prefs.GetInt64(kPrefsPingLastRollcall, &temp_int)); + EXPECT_FALSE(fake_prefs_.GetInt64(active_key_, &temp_int)); + EXPECT_FALSE(fake_prefs_.GetInt64(last_active_key_, &temp_int)); + EXPECT_FALSE(fake_prefs_.GetInt64(last_rollcall_key_, &temp_int)); } -TEST_F(OmahaRequestActionTest, StorePingReplyActiveTest) { - string dlc_id = "dlc0"; - base::FilePath metadata_path_dlc0; - base::ScopedTempDir tempdir; - SetUpStorePingReply(dlc_id, &metadata_path_dlc0, &tempdir); - int64_t temp_int; - Prefs prefs; - ASSERT_TRUE(prefs.Init(metadata_path_dlc0)); +TEST_F(OmahaRequestActionDlcPingTest, StorePingReplyActiveTest) { // Create Active value - prefs.SetInt64(kPrefsPingActive, 0); + fake_prefs_.SetInt64(active_key_, 0); OmahaRequestParams::AppParams app_param = { .active_counting_type = OmahaRequestParams::kDateBased, - .name = dlc_id, + .name = dlc_id_, .ping_active = 1, .send_ping = true}; request_params_.set_dlc_apps_params( - {{request_params_.GetDlcAppId(dlc_id), app_param}}); + {{request_params_.GetDlcAppId(dlc_id_), app_param}}); + int64_t temp_int; + string temp_str; ASSERT_TRUE(TestUpdateCheck()); - EXPECT_TRUE(prefs.GetInt64(kPrefsPingActive, &temp_int)); + EXPECT_TRUE(fake_prefs_.GetInt64(active_key_, &temp_int)); EXPECT_EQ(temp_int, kPingInactiveValue); - EXPECT_TRUE(prefs.GetInt64(kPrefsPingLastActive, &temp_int)); - EXPECT_EQ(temp_int, 4763); - EXPECT_TRUE(prefs.GetInt64(kPrefsPingLastRollcall, &temp_int)); - EXPECT_EQ(temp_int, 4763); + EXPECT_TRUE(fake_prefs_.GetString(last_active_key_, &temp_str)); + EXPECT_EQ(temp_str, "4763"); + EXPECT_TRUE(fake_prefs_.GetString(last_rollcall_key_, &temp_str)); + EXPECT_EQ(temp_str, "4763"); } -TEST_F(OmahaRequestActionTest, StorePingReplyInactiveTest) { - string dlc_id = "dlc0"; - base::FilePath metadata_path_dlc0; - base::ScopedTempDir tempdir; - SetUpStorePingReply(dlc_id, &metadata_path_dlc0, &tempdir); - int64_t temp_int; - Prefs prefs; - ASSERT_TRUE(prefs.Init(metadata_path_dlc0)); +TEST_F(OmahaRequestActionDlcPingTest, StorePingReplyInactiveTest) { // Create Active value - prefs.SetInt64(kPrefsPingActive, 0); + fake_prefs_.SetInt64(active_key_, 0); OmahaRequestParams::AppParams app_param = { .active_counting_type = OmahaRequestParams::kDateBased, - .name = dlc_id, + .name = dlc_id_, .ping_active = 0, .send_ping = true}; request_params_.set_dlc_apps_params( - {{request_params_.GetDlcAppId(dlc_id), app_param}}); + {{request_params_.GetDlcAppId(dlc_id_), app_param}}); // Set the previous active value to an older value than 4763. - prefs.SetInt64(kPrefsPingLastActive, 555); + fake_prefs_.SetString(last_active_key_, "555"); + int64_t temp_int; ASSERT_TRUE(TestUpdateCheck()); - ASSERT_TRUE(prefs.Init(metadata_path_dlc0)); - EXPECT_TRUE(prefs.GetInt64(kPrefsPingActive, &temp_int)); + EXPECT_TRUE(fake_prefs_.GetInt64(active_key_, &temp_int)); EXPECT_EQ(temp_int, kPingInactiveValue); - EXPECT_TRUE(prefs.GetInt64(kPrefsPingLastActive, &temp_int)); - EXPECT_EQ(temp_int, 555); - EXPECT_TRUE(prefs.GetInt64(kPrefsPingLastRollcall, &temp_int)); - EXPECT_EQ(temp_int, 4763); + string temp_str; + EXPECT_TRUE(fake_prefs_.GetString(last_active_key_, &temp_str)); + EXPECT_EQ(temp_str, "555"); + EXPECT_TRUE(fake_prefs_.GetString(last_rollcall_key_, &temp_str)); + EXPECT_EQ(temp_str, "4763"); } } // namespace chromeos_update_engine diff --git a/omaha_request_params.cc b/omaha_request_params.cc index 52675980..d4b8d649 100644 --- a/omaha_request_params.cc +++ b/omaha_request_params.cc @@ -214,7 +214,6 @@ bool OmahaRequestParams::IsValidChannel(const string& channel, void OmahaRequestParams::set_root(const string& root) { root_ = root; test::SetImagePropertiesRootPrefix(root_.c_str()); - dlc_prefs_root_ = root + kDlcMetadataRootpath; } int OmahaRequestParams::GetChannelIndex(const string& channel) const { diff --git a/omaha_request_params.h b/omaha_request_params.h index b33d0b16..d29ce70f 100644 --- a/omaha_request_params.h +++ b/omaha_request_params.h @@ -58,7 +58,6 @@ class OmahaRequestParams { update_check_count_wait_enabled_(false), min_update_checks_needed_(kDefaultMinUpdateChecks), max_update_checks_allowed_(kDefaultMaxUpdateChecks), - dlc_prefs_root_(kDlcMetadataRootpath), is_install_(false) {} virtual ~OmahaRequestParams(); @@ -222,8 +221,6 @@ class OmahaRequestParams { return autoupdate_token_; } - inline std::string dlc_prefs_root() const { return dlc_prefs_root_; } - // Returns the App ID corresponding to the current value of the // download channel. virtual std::string GetAppId() const; @@ -410,9 +407,6 @@ class OmahaRequestParams { // When reading files, prepend root_ to the paths. Useful for testing. std::string root_; - // The metadata/prefs root path for DLCs. - std::string dlc_prefs_root_; - // A list of DLC modules to install. A mapping from DLC App ID to |AppParams|. std::map dlc_apps_params_; diff --git a/update_attempter.cc b/update_attempter.cc index ae7f71eb..0ead18ae 100644 --- a/update_attempter.cc +++ b/update_attempter.cc @@ -28,7 +28,6 @@ #include #include -#include #include #include #include @@ -85,6 +84,7 @@ using chromeos_update_manager::EvalStatus; using chromeos_update_manager::Policy; using chromeos_update_manager::StagingCase; using chromeos_update_manager::UpdateCheckParams; +using std::map; using std::string; using std::vector; using update_engine::UpdateAttemptFlags; @@ -658,6 +658,22 @@ void UpdateAttempter::CalculateStagingParams(bool interactive) { } } +bool UpdateAttempter::ResetDlcPrefs(const string& dlc_id) { + vector failures; + PrefsInterface* prefs = system_state_->prefs(); + for (auto& sub_key : + {kPrefsPingActive, kPrefsPingLastActive, kPrefsPingLastRollcall}) { + auto key = prefs->CreateSubKey(kDlcPrefsSubDir, dlc_id, sub_key); + if (!prefs->Delete(key)) + failures.emplace_back(sub_key); + } + if (failures.size() != 0) + PLOG(ERROR) << "Failed to delete prefs (" << base::JoinString(failures, ",") + << " for DLC (" << dlc_id << ")."; + + return failures.size() == 0; +} + bool UpdateAttempter::SetDlcActiveValue(bool is_active, const string& dlc_id) { if (dlc_id.empty()) { LOG(ERROR) << "Empty DLC ID passed."; @@ -665,50 +681,30 @@ bool UpdateAttempter::SetDlcActiveValue(bool is_active, const string& dlc_id) { } LOG(INFO) << "Set DLC (" << dlc_id << ") to " << (is_active ? "Active" : "Inactive"); - // TODO(andrewlassalle): Should dlc_prefs_root be in systemstate instead of - // omaha_request_params_? - base::FilePath metadata_path = - base::FilePath(omaha_request_params_->dlc_prefs_root()).Append(dlc_id); + PrefsInterface* prefs = system_state_->prefs(); if (is_active) { - base::File::Error error; - if (!base::CreateDirectoryAndGetError(metadata_path, &error)) { - PLOG(ERROR) << "Failed to create metadata directory for DLC (" << dlc_id - << "). Error:" << error; - return false; - } - - Prefs prefs; - if (!prefs.Init(metadata_path)) { - LOG(ERROR) << "Failed to initialize the preferences path:" - << metadata_path.value() << "."; - return false; - } - - if (!prefs.SetInt64(kPrefsPingActive, kPingActiveValue)) { + auto ping_active_key = + prefs->CreateSubKey(kDlcPrefsSubDir, dlc_id, kPrefsPingActive); + if (!prefs->SetInt64(ping_active_key, kPingActiveValue)) { LOG(ERROR) << "Failed to set the value of ping metadata '" << kPrefsPingActive << "'."; return false; } } else { - if (!base::DeleteFile(metadata_path, true)) { - PLOG(ERROR) << "Failed to delete metadata directory(" - << metadata_path.value() << ") for DLC (" << dlc_id << ")."; - return false; - } + return ResetDlcPrefs(dlc_id); } return true; } -int64_t UpdateAttempter::GetPingMetadata( - const PrefsInterface& prefs, const std::string& metadata_name) const { +int64_t UpdateAttempter::GetPingMetadata(const string& metadata_key) const { // The first time a ping is sent, the metadata files containing the values // sent back by the server still don't exist. A value of -1 is used to // indicate this. - if (!prefs.Exists(metadata_name)) + if (!system_state_->prefs()->Exists(metadata_key)) return kPingNeverPinged; int64_t value; - if (prefs.GetInt64(metadata_name, &value)) + if (system_state_->prefs()->GetInt64(metadata_key, &value)) return value; // Return -2 when the file exists and there is a problem reading from it, or @@ -724,49 +720,41 @@ void UpdateAttempter::CalculateDlcParams() { LOG(INFO) << "Failed to retrieve DLC module IDs from dlcservice. Check the " "state of dlcservice, will not update DLC modules."; } - base::FilePath metadata_root_path = - base::FilePath(omaha_request_params_->dlc_prefs_root()); - // Cleanup any leftover metadata for DLCs which don't exist. - base::FileEnumerator dir_enum(metadata_root_path, - false /* recursive */, - base::FileEnumerator::DIRECTORIES); - std::unordered_set dlc_ids(dlc_ids_.begin(), dlc_ids_.end()); - for (base::FilePath name = dir_enum.Next(); !name.empty(); - name = dir_enum.Next()) { - string id = name.BaseName().value(); - if (dlc_ids.find(id) == dlc_ids.end()) { - LOG(INFO) << "Deleting stale metadata for DLC:" << id; - if (!base::DeleteFile(name, true)) - PLOG(WARNING) << "Failed to delete DLC prefs path:" << name.value(); - } - } - std::map dlc_apps_params; + PrefsInterface* prefs = system_state_->prefs(); + map dlc_apps_params; for (const auto& dlc_id : dlc_ids_) { OmahaRequestParams::AppParams dlc_params{ .active_counting_type = OmahaRequestParams::kDateBased, .name = dlc_id, .send_ping = false}; - // Only send the ping when the request is to update DLCs. When installing - // DLCs, we don't want to send the ping yet, since the DLCs might fail to - // install or might not really be active yet. - if (!is_install_) { - base::FilePath metadata_path = metadata_root_path.Append(dlc_id); - Prefs prefs; - if (!base::CreateDirectory(metadata_path) || !prefs.Init(metadata_path)) { - LOG(ERROR) << "Failed to initialize the preferences path:" - << metadata_path.value() << "."; - } else { - dlc_params.ping_active = kPingActiveValue; - if (!prefs.GetInt64(kPrefsPingActive, &dlc_params.ping_active) || - dlc_params.ping_active != kPingActiveValue) { - dlc_params.ping_active = kPingInactiveValue; - } - dlc_params.ping_date_last_active = - GetPingMetadata(prefs, kPrefsPingLastActive); - dlc_params.ping_date_last_rollcall = - GetPingMetadata(prefs, kPrefsPingLastRollcall); - dlc_params.send_ping = true; + if (is_install_) { + // In some cases, |SetDlcActiveValue| might fail to reset the DLC prefs + // when a DLC is uninstalled. To avoid having stale values from that + // scenario, we reset the metadata values on a new install request. + // Ignore failure to delete stale prefs. + ResetDlcPrefs(dlc_id); + SetDlcActiveValue(true, dlc_id); + } else { + // Only send the ping when the request is to update DLCs. When installing + // DLCs, we don't want to send the ping yet, since the DLCs might fail to + // install or might not really be active yet. + dlc_params.ping_active = kPingActiveValue; + auto ping_active_key = + prefs->CreateSubKey(kDlcPrefsSubDir, dlc_id, kPrefsPingActive); + if (!prefs->GetInt64(ping_active_key, &dlc_params.ping_active) || + dlc_params.ping_active != kPingActiveValue) { + dlc_params.ping_active = kPingInactiveValue; } + auto ping_last_active_key = + prefs->CreateSubKey(kDlcPrefsSubDir, dlc_id, kPrefsPingLastActive); + dlc_params.ping_date_last_active = GetPingMetadata(ping_last_active_key); + + auto ping_last_rollcall_key = + prefs->CreateSubKey(kDlcPrefsSubDir, dlc_id, kPrefsPingLastRollcall); + dlc_params.ping_date_last_rollcall = + GetPingMetadata(ping_last_rollcall_key); + + dlc_params.send_ping = true; } dlc_apps_params[omaha_request_params_->GetDlcAppId(dlc_id)] = dlc_params; } diff --git a/update_attempter.h b/update_attempter.h index 9e481792..e270b598 100644 --- a/update_attempter.h +++ b/update_attempter.h @@ -439,13 +439,15 @@ class UpdateAttempter : public ActionProcessorDelegate, // Resets interactivity and forced update flags. void ResetInteractivityFlags(); - // Get the integer values from the metadata directory set in |prefs| for - // |kPrefsPingLastActive| or |kPrefsPingLastRollcall|. + // Resets all the DLC prefs. + bool ResetDlcPrefs(const std::string& dlc_id); + + // Get the integer values from the DLC metadata for |kPrefsPingLastActive| + // or |kPrefsPingLastRollcall|. // The value is equal to -2 when the value cannot be read or is not numeric. // The value is equal to -1 the first time it is being sent, which is // when the metadata file doesn't exist. - int64_t GetPingMetadata(const PrefsInterface& prefs, - const std::string& metadata_name) const; + int64_t GetPingMetadata(const std::string& metadata_key) const; // Calculates the update parameters for DLCs. Sets the |dlc_ids_| // parameter on the |omaha_request_params_| object. diff --git a/update_attempter_unittest.cc b/update_attempter_unittest.cc index 56665ad3..5a6a23e9 100644 --- a/update_attempter_unittest.cc +++ b/update_attempter_unittest.cc @@ -72,6 +72,7 @@ using std::unique_ptr; using std::unordered_set; using std::vector; using testing::_; +using testing::Contains; using testing::DoAll; using testing::ElementsAre; using testing::Field; @@ -2349,16 +2350,9 @@ TEST_F(UpdateAttempterTest, FailedEolTest) { } TEST_F(UpdateAttempterTest, CalculateDlcParamsInstallTest) { - // Create a uniquely named test directory. - base::ScopedTempDir tempdir; - ASSERT_TRUE(tempdir.CreateUniqueTempDir()); - fake_system_state_.request_params()->set_root(tempdir.GetPath().value()); string dlc_id = "dlc0"; - base::FilePath metadata_path_dlc0 = - base::FilePath(fake_system_state_.request_params()->dlc_prefs_root()) - .Append(dlc_id); - - ASSERT_TRUE(base::CreateDirectory(metadata_path_dlc0)); + FakePrefs fake_prefs; + fake_system_state_.set_prefs(&fake_prefs); attempter_.is_install_ = true; attempter_.dlc_ids_ = {dlc_id}; attempter_.CalculateDlcParams(); @@ -2371,22 +2365,18 @@ TEST_F(UpdateAttempterTest, CalculateDlcParamsInstallTest) { EXPECT_EQ(false, dlc_app_params.send_ping); // When the DLC gets installed, a ping is not sent, therefore we don't store // the values sent by Omaha. - EXPECT_FALSE( - base::PathExists(metadata_path_dlc0.Append(kPrefsPingLastActive))); - EXPECT_FALSE( - base::PathExists(metadata_path_dlc0.Append(kPrefsPingLastRollcall))); + auto last_active_key = PrefsInterface::CreateSubKey( + kDlcPrefsSubDir, dlc_id, kPrefsPingLastActive); + EXPECT_FALSE(fake_system_state_.prefs()->Exists(last_active_key)); + auto last_rollcall_key = PrefsInterface::CreateSubKey( + kDlcPrefsSubDir, dlc_id, kPrefsPingLastRollcall); + EXPECT_FALSE(fake_system_state_.prefs()->Exists(last_rollcall_key)); } TEST_F(UpdateAttempterTest, CalculateDlcParamsNoPrefFilesTest) { - // Create a uniquely named test directory. - base::ScopedTempDir tempdir; - ASSERT_TRUE(tempdir.CreateUniqueTempDir()); - fake_system_state_.request_params()->set_root(tempdir.GetPath().value()); string dlc_id = "dlc0"; - base::FilePath metadata_path_dlc0 = - base::FilePath(fake_system_state_.request_params()->dlc_prefs_root()) - .Append(dlc_id); - ASSERT_TRUE(base::CreateDirectory(metadata_path_dlc0)); + FakePrefs fake_prefs; + fake_system_state_.set_prefs(&fake_prefs); EXPECT_CALL(mock_dlcservice_, GetDlcsToUpdate(_)) .WillOnce( DoAll(SetArgPointee<0>(std::vector({dlc_id})), Return(true))); @@ -2407,23 +2397,23 @@ TEST_F(UpdateAttempterTest, CalculateDlcParamsNoPrefFilesTest) { } TEST_F(UpdateAttempterTest, CalculateDlcParamsNonParseableValuesTest) { - // Create a uniquely named test directory. - base::ScopedTempDir tempdir; - ASSERT_TRUE(tempdir.CreateUniqueTempDir()); - fake_system_state_.request_params()->set_root(tempdir.GetPath().value()); string dlc_id = "dlc0"; - base::FilePath metadata_path_dlc0 = - base::FilePath(fake_system_state_.request_params()->dlc_prefs_root()) - .Append(dlc_id); - ASSERT_TRUE(base::CreateDirectory(metadata_path_dlc0)); + MemoryPrefs prefs; + fake_system_state_.set_prefs(&prefs); EXPECT_CALL(mock_dlcservice_, GetDlcsToUpdate(_)) .WillOnce( DoAll(SetArgPointee<0>(std::vector({dlc_id})), Return(true))); // Write non numeric values in the metadata files. - base::WriteFile(metadata_path_dlc0.Append(kPrefsPingActive), "z2yz", 4); - base::WriteFile(metadata_path_dlc0.Append(kPrefsPingLastActive), "z2yz", 4); - base::WriteFile(metadata_path_dlc0.Append(kPrefsPingLastRollcall), "z2yz", 4); + auto active_key = + PrefsInterface::CreateSubKey(kDlcPrefsSubDir, dlc_id, kPrefsPingActive); + auto last_active_key = PrefsInterface::CreateSubKey( + kDlcPrefsSubDir, dlc_id, kPrefsPingLastActive); + auto last_rollcall_key = PrefsInterface::CreateSubKey( + kDlcPrefsSubDir, dlc_id, kPrefsPingLastRollcall); + fake_system_state_.prefs()->SetString(active_key, "z2yz"); + fake_system_state_.prefs()->SetString(last_active_key, "z2yz"); + fake_system_state_.prefs()->SetString(last_rollcall_key, "z2yz"); attempter_.is_install_ = false; attempter_.CalculateDlcParams(); @@ -2440,23 +2430,24 @@ TEST_F(UpdateAttempterTest, CalculateDlcParamsNonParseableValuesTest) { } TEST_F(UpdateAttempterTest, CalculateDlcParamsValidValuesTest) { - // Create a uniquely named test directory. - base::ScopedTempDir tempdir; - ASSERT_TRUE(tempdir.CreateUniqueTempDir()); - fake_system_state_.request_params()->set_root(tempdir.GetPath().value()); string dlc_id = "dlc0"; - base::FilePath metadata_path_dlc0 = - base::FilePath(fake_system_state_.request_params()->dlc_prefs_root()) - .Append(dlc_id); - ASSERT_TRUE(base::CreateDirectory(metadata_path_dlc0)); + MemoryPrefs fake_prefs; + fake_system_state_.set_prefs(&fake_prefs); EXPECT_CALL(mock_dlcservice_, GetDlcsToUpdate(_)) .WillOnce( DoAll(SetArgPointee<0>(std::vector({dlc_id})), Return(true))); // Write numeric values in the metadata files. - base::WriteFile(metadata_path_dlc0.Append(kPrefsPingActive), "1", 1); - base::WriteFile(metadata_path_dlc0.Append(kPrefsPingLastActive), "78", 2); - base::WriteFile(metadata_path_dlc0.Append(kPrefsPingLastRollcall), "99", 2); + auto active_key = + PrefsInterface::CreateSubKey(kDlcPrefsSubDir, dlc_id, kPrefsPingActive); + auto last_active_key = PrefsInterface::CreateSubKey( + kDlcPrefsSubDir, dlc_id, kPrefsPingLastActive); + auto last_rollcall_key = PrefsInterface::CreateSubKey( + kDlcPrefsSubDir, dlc_id, kPrefsPingLastRollcall); + + fake_system_state_.prefs()->SetInt64(active_key, 1); + fake_system_state_.prefs()->SetInt64(last_active_key, 78); + fake_system_state_.prefs()->SetInt64(last_rollcall_key, 99); attempter_.is_install_ = false; attempter_.CalculateDlcParams(); @@ -2473,58 +2464,64 @@ TEST_F(UpdateAttempterTest, CalculateDlcParamsValidValuesTest) { } TEST_F(UpdateAttempterTest, CalculateDlcParamsRemoveStaleMetadata) { - base::ScopedTempDir tempdir; - ASSERT_TRUE(tempdir.CreateUniqueTempDir()); - fake_system_state_.request_params()->set_root(tempdir.GetPath().value()); string dlc_id = "dlc0"; - base::FilePath metadata_path_dlc0 = - base::FilePath(fake_system_state_.request_params()->dlc_prefs_root()) - .Append(dlc_id); - ASSERT_TRUE(base::CreateDirectory(metadata_path_dlc0)); - base::FilePath metadata_path_dlc_stale = - base::FilePath(fake_system_state_.request_params()->dlc_prefs_root()) - .Append("stale"); - ASSERT_TRUE(base::CreateDirectory(metadata_path_dlc_stale)); - EXPECT_CALL(mock_dlcservice_, GetDlcsToUpdate(_)) - .WillOnce( - DoAll(SetArgPointee<0>(std::vector({dlc_id})), Return(true))); + FakePrefs fake_prefs; + fake_system_state_.set_prefs(&fake_prefs); + auto active_key = + PrefsInterface::CreateSubKey(kDlcPrefsSubDir, dlc_id, kPrefsPingActive); + auto last_active_key = PrefsInterface::CreateSubKey( + kDlcPrefsSubDir, dlc_id, kPrefsPingLastActive); + auto last_rollcall_key = PrefsInterface::CreateSubKey( + kDlcPrefsSubDir, dlc_id, kPrefsPingLastRollcall); + fake_system_state_.prefs()->SetInt64(active_key, kPingInactiveValue); + fake_system_state_.prefs()->SetInt64(last_active_key, 0); + fake_system_state_.prefs()->SetInt64(last_rollcall_key, 0); + EXPECT_TRUE(fake_system_state_.prefs()->Exists(active_key)); + EXPECT_TRUE(fake_system_state_.prefs()->Exists(last_active_key)); + EXPECT_TRUE(fake_system_state_.prefs()->Exists(last_rollcall_key)); - attempter_.is_install_ = false; + attempter_.dlc_ids_ = {dlc_id}; + attempter_.is_install_ = true; attempter_.CalculateDlcParams(); - EXPECT_TRUE(base::PathExists(metadata_path_dlc0)); - EXPECT_FALSE(base::PathExists(metadata_path_dlc_stale)); + EXPECT_FALSE(fake_system_state_.prefs()->Exists(last_active_key)); + EXPECT_FALSE(fake_system_state_.prefs()->Exists(last_rollcall_key)); + // Active key is set on install. + EXPECT_TRUE(fake_system_state_.prefs()->Exists(active_key)); + int64_t temp_int; + EXPECT_TRUE(fake_system_state_.prefs()->GetInt64(active_key, &temp_int)); + EXPECT_EQ(temp_int, kPingActiveValue); } TEST_F(UpdateAttempterTest, SetDlcActiveValue) { - base::ScopedTempDir tempdir; - ASSERT_TRUE(tempdir.CreateUniqueTempDir()); - fake_system_state_.request_params()->set_root(tempdir.GetPath().value()); string dlc_id = "dlc0"; - base::FilePath metadata_path_dlc0 = - base::FilePath(fake_system_state_.request_params()->dlc_prefs_root()) - .Append(dlc_id); + FakePrefs fake_prefs; + fake_system_state_.set_prefs(&fake_prefs); attempter_.SetDlcActiveValue(true, dlc_id); - Prefs prefs; - ASSERT_TRUE(base::PathExists(metadata_path_dlc0)); - ASSERT_TRUE(prefs.Init(metadata_path_dlc0)); int64_t temp_int; - EXPECT_TRUE(prefs.GetInt64(kPrefsPingActive, &temp_int)); + auto active_key = + PrefsInterface::CreateSubKey(kDlcPrefsSubDir, dlc_id, kPrefsPingActive); + EXPECT_TRUE(fake_system_state_.prefs()->Exists(active_key)); + EXPECT_TRUE(fake_system_state_.prefs()->GetInt64(active_key, &temp_int)); EXPECT_EQ(temp_int, kPingActiveValue); } TEST_F(UpdateAttempterTest, SetDlcInactive) { - base::ScopedTempDir tempdir; - ASSERT_TRUE(tempdir.CreateUniqueTempDir()); - fake_system_state_.request_params()->set_root(tempdir.GetPath().value()); string dlc_id = "dlc0"; - base::FilePath metadata_path_dlc0 = - base::FilePath(fake_system_state_.request_params()->dlc_prefs_root()) - .Append(dlc_id); - base::CreateDirectory(metadata_path_dlc0); - EXPECT_TRUE(base::PathExists(metadata_path_dlc0)); + MemoryPrefs fake_prefs; + fake_system_state_.set_prefs(&fake_prefs); + auto sub_keys = { + kPrefsPingActive, kPrefsPingLastActive, kPrefsPingLastRollcall}; + for (auto& sub_key : sub_keys) { + auto key = PrefsInterface::CreateSubKey(kDlcPrefsSubDir, dlc_id, sub_key); + fake_system_state_.prefs()->SetInt64(key, 1); + EXPECT_TRUE(fake_system_state_.prefs()->Exists(key)); + } attempter_.SetDlcActiveValue(false, dlc_id); - EXPECT_FALSE(base::PathExists(metadata_path_dlc0)); + for (auto& sub_key : sub_keys) { + auto key = PrefsInterface::CreateSubKey(kDlcPrefsSubDir, dlc_id, sub_key); + EXPECT_FALSE(fake_system_state_.prefs()->Exists(key)); + } } TEST_F(UpdateAttempterTest, GetSuccessfulDlcIds) { From a3210e6c3b2a78dffdb8f5544ed80819b538a302 Mon Sep 17 00:00:00 2001 From: Jae Hoon Kim Date: Thu, 7 May 2020 11:32:44 -0700 Subject: [PATCH 283/624] update_engine: NextPayload() resets URL index This change fixes a bug/assumption that |PayloadState| used to make in regards to URL index related to payloads. When a URL index is incremented, there is no gurauntee that subsequent payloads will have the same number of candidate URLs, hence it is critical to reset the URL index back to 0 for subsequent payloads. This fix also allows candidate URLs to not be skipped over for multi-package/payload request/responses. The max number of times a URL is allowed to fail is reduced from 10 to 3 to allow preferred URLs to always be used as the intial URL for payloads. BUG=chromium:928805 TEST=FEATURES=test emerge-$B update_engine # filter PayloadStateTest Change-Id: I67732b2b7da08f580d1b554fd85eb06b3bf1f761 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2188552 Tested-by: Jae Hoon Kim Commit-Queue: Andrew Lassalle Reviewed-by: Amin Hassani --- omaha_request_action.h | 10 +++++----- payload_state.cc | 5 ++--- payload_state.h | 1 + payload_state_unittest.cc | 27 +++++++++++++++++++++++++++ 4 files changed, 35 insertions(+), 8 deletions(-) diff --git a/omaha_request_action.h b/omaha_request_action.h index 623a704e..30b3d227 100644 --- a/omaha_request_action.h +++ b/omaha_request_action.h @@ -68,12 +68,12 @@ class OmahaRequestAction : public Action, public HttpFetcherDelegate { public: static const int kPingTimeJump = -2; - // We choose this value of 10 as a heuristic for a work day in trying + // We choose this value of 3 as a heuristic for a work day in trying // each URL, assuming we check roughly every 45 mins. This is a good time to - // wait - neither too long nor too little - so we don't give up the preferred - // URLs that appear earlier in list too quickly before moving on to the - // fallback ones. - static const int kDefaultMaxFailureCountPerUrl = 10; + // wait so we don't give up the preferred URLs, but allow using the URL that + // appears earlier in list for every payload before resorting to the fallback + // URLs in the candiate URL list. + static const int kDefaultMaxFailureCountPerUrl = 3; // If staging is enabled, set the maximum wait time to 28 days, since that is // the predetermined wait time for staging. diff --git a/payload_state.cc b/payload_state.cc index 355552ec..5facdff1 100644 --- a/payload_state.cc +++ b/payload_state.cc @@ -469,9 +469,7 @@ void PayloadState::IncrementFullPayloadAttemptNumber() { void PayloadState::IncrementUrlIndex() { size_t next_url_index = url_index_ + 1; - size_t max_url_size = 0; - for (const auto& urls : candidate_urls_) - max_url_size = std::max(max_url_size, urls.size()); + size_t max_url_size = candidate_urls_[payload_index_].size(); if (next_url_index < max_url_size) { LOG(INFO) << "Incrementing the URL index for next attempt"; SetUrlIndex(next_url_index); @@ -902,6 +900,7 @@ void PayloadState::SetPayloadIndex(size_t payload_index) { bool PayloadState::NextPayload() { if (payload_index_ + 1 >= candidate_urls_.size()) return false; + SetUrlIndex(0); SetPayloadIndex(payload_index_ + 1); return true; } diff --git a/payload_state.h b/payload_state.h index 5ef12202..bfe2cf0b 100644 --- a/payload_state.h +++ b/payload_state.h @@ -156,6 +156,7 @@ class PayloadState : public PayloadStateInterface { FRIEND_TEST(PayloadStateTest, RollbackHappened); FRIEND_TEST(PayloadStateTest, RollbackVersion); FRIEND_TEST(PayloadStateTest, UpdateSuccessWithWipedPrefs); + FRIEND_TEST(PayloadStateTest, NextPayloadResetsUrlIndex); // Helper called when an attempt has begun, is called by // UpdateResumed(), UpdateRestarted() and Rollback(). diff --git a/payload_state_unittest.cc b/payload_state_unittest.cc index 869c24eb..4a0afcfe 100644 --- a/payload_state_unittest.cc +++ b/payload_state_unittest.cc @@ -1655,4 +1655,31 @@ TEST(PayloadStateTest, P2PStateVarsAreClearedOnNewResponse) { EXPECT_EQ(null_time, payload_state.GetP2PFirstAttemptTimestamp()); } +TEST(PayloadStateTest, NextPayloadResetsUrlIndex) { + PayloadState payload_state; + FakeSystemState fake_system_state; + EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); + + OmahaResponse response; + response.packages.push_back( + {.payload_urls = {"http://test1a", "http://test2a"}, + .size = 123456789, + .metadata_size = 58123, + .metadata_signature = "msign", + .hash = "hash"}); + response.packages.push_back({.payload_urls = {"http://test1b"}, + .size = 123456789, + .metadata_size = 58123, + .metadata_signature = "msign", + .hash = "hash"}); + payload_state.SetResponse(response); + + EXPECT_EQ(payload_state.GetCurrentUrl(), "http://test1a"); + payload_state.IncrementUrlIndex(); + EXPECT_EQ(payload_state.GetCurrentUrl(), "http://test2a"); + + EXPECT_TRUE(payload_state.NextPayload()); + EXPECT_EQ(payload_state.GetCurrentUrl(), "http://test1b"); +} + } // namespace chromeos_update_engine From 64331b37cb9e57e4f3378e4ccbc3ac0af6fbd3ce Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Wed, 13 May 2020 16:50:40 -0700 Subject: [PATCH 284/624] Do not erase system_other if zero length. To be precise, do not erase it if the partition size is smaller than the AVB_FOOTER_SIZE, because it doesn't make sense to erase. Test: pass Fixes: 155263137 Change-Id: I20778a8965526beb7ece1d79100646e770d0aa2e --- dynamic_partition_control_android.cc | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc index 7486d095..a9c2bb3b 100644 --- a/dynamic_partition_control_android.cc +++ b/dynamic_partition_control_android.cc @@ -656,6 +656,13 @@ bool DynamicPartitionControlAndroid::GetSystemOtherPath( return true; } + if (p->size() < AVB_FOOTER_SIZE) { + LOG(INFO) << partition_name_suffix << " has length " << p->size() + << "( < AVB_FOOTER_SIZE " << AVB_FOOTER_SIZE + << "), skip erasing."; + return true; + } + // Delete any pre-existing device with name |partition_name_suffix| and // also remove it from |mapped_devices_|. // In recovery, metadata might not be mounted, and From a74d2f0ef5a9431b6833c60ee618a7135eb9328e Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Wed, 13 May 2020 16:50:40 -0700 Subject: [PATCH 285/624] Do not erase system_other if zero length. To be precise, do not erase it if the partition size is smaller than the AVB_FOOTER_SIZE, because it doesn't make sense to erase. Test: pass Bug: 155263137 Change-Id: I20778a8965526beb7ece1d79100646e770d0aa2e Merged-In: I20778a8965526beb7ece1d79100646e770d0aa2e --- dynamic_partition_control_android.cc | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc index a310f209..ee9c6e3f 100644 --- a/dynamic_partition_control_android.cc +++ b/dynamic_partition_control_android.cc @@ -644,6 +644,13 @@ bool DynamicPartitionControlAndroid::GetSystemOtherPath( return true; } + if (p->size() < AVB_FOOTER_SIZE) { + LOG(INFO) << partition_name_suffix << " has length " << p->size() + << "( < AVB_FOOTER_SIZE " << AVB_FOOTER_SIZE + << "), skip erasing."; + return true; + } + // Delete any pre-existing device with name |partition_name_suffix| and // also remove it from |mapped_devices_|. // In recovery, metadata might not be mounted, and From 50f267388cdfeae3e2195bebd4851893c162efd4 Mon Sep 17 00:00:00 2001 From: Jae Hoon Kim Date: Thu, 14 May 2020 10:46:15 -0700 Subject: [PATCH 286/624] update_engine: Place DlcserviceInterface factory in interface header Cleanup file separation for DlcserviceInterface factory as a completely separate header is not necesary. BUG=none TEST=FEATURES=test emerge-$B update_engine TEST=USE="${USE} -dlc" FEATURES=test emerge-$B update_engine Change-Id: I6c9cfcac1b7d0079e9b0cc4d4e984fafc5f1923f Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2202617 Tested-by: Jae Hoon Kim Auto-Submit: Jae Hoon Kim Reviewed-by: Amin Hassani Reviewed-by: Andrew Lassalle Commit-Queue: Andrew Lassalle Commit-Queue: Amin Hassani --- common/dlcservice.h | 32 -------------------------------- common/dlcservice_interface.h | 5 +++++ real_system_state.cc | 2 +- 3 files changed, 6 insertions(+), 33 deletions(-) delete mode 100644 common/dlcservice.h diff --git a/common/dlcservice.h b/common/dlcservice.h deleted file mode 100644 index 9dae5607..00000000 --- a/common/dlcservice.h +++ /dev/null @@ -1,32 +0,0 @@ -// -// Copyright (C) 2018 The Android Open Source Project -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -#ifndef UPDATE_ENGINE_COMMON_DLCSERVICE_H_ -#define UPDATE_ENGINE_COMMON_DLCSERVICE_H_ - -#include - -#include "update_engine/common/dlcservice_interface.h" - -namespace chromeos_update_engine { - -// This factory function creates a new DlcServiceInterface instance for the -// current platform. -std::unique_ptr CreateDlcService(); - -} // namespace chromeos_update_engine - -#endif // UPDATE_ENGINE_COMMON_DLCSERVICE_H_ diff --git a/common/dlcservice_interface.h b/common/dlcservice_interface.h index 70b74ab8..7b577104 100644 --- a/common/dlcservice_interface.h +++ b/common/dlcservice_interface.h @@ -17,6 +17,7 @@ #ifndef UPDATE_ENGINE_COMMON_DLCSERVICE_INTERFACE_H_ #define UPDATE_ENGINE_COMMON_DLCSERVICE_INTERFACE_H_ +#include #include #include @@ -49,6 +50,10 @@ class DlcServiceInterface { DISALLOW_COPY_AND_ASSIGN(DlcServiceInterface); }; +// This factory function creates a new DlcServiceInterface instance for the +// current platform. +std::unique_ptr CreateDlcService(); + } // namespace chromeos_update_engine #endif // UPDATE_ENGINE_COMMON_DLCSERVICE_INTERFACE_H_ diff --git a/real_system_state.cc b/real_system_state.cc index cc030431..74a37f35 100644 --- a/real_system_state.cc +++ b/real_system_state.cc @@ -32,7 +32,7 @@ #include "update_engine/common/boot_control.h" #include "update_engine/common/boot_control_stub.h" #include "update_engine/common/constants.h" -#include "update_engine/common/dlcservice.h" +#include "update_engine/common/dlcservice_interface.h" #include "update_engine/common/hardware.h" #include "update_engine/common/utils.h" #include "update_engine/metrics_reporter_omaha.h" From e6b888c9c3c5be497b3bb57946f73daba8a21eea Mon Sep 17 00:00:00 2001 From: Clark Chung Date: Mon, 20 Apr 2020 15:50:37 +0800 Subject: [PATCH 287/624] update_engine: run gn format BUILD.gn and *.gni Run gn format **/BUILD.gn and **/*.gni to apply new format of uprev'd gn of version "1733 (82d673ac)". BUG=chromium:1072321 TEST=cros tryjob -g 2156088 -g 2126033 hatch-full-tryjob Cq-Depend: chromium:2126033 Change-Id: Ib42a47d6e1d8c564e1f08e1caa16fbb7d0620030 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2156088 Tested-by: Clark Chung Commit-Queue: Clark Chung Reviewed-by: Amin Hassani --- BUILD.gn | 60 +++++++++++++---------------------------- client-headers/BUILD.gn | 8 ++---- tar_bunzip2.gni | 4 +-- 3 files changed, 21 insertions(+), 51 deletions(-) diff --git a/BUILD.gn b/BUILD.gn index d416b948..204e2d37 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -88,6 +88,8 @@ pkg_config("target_defaults") { "${platform2_root}", "${platform2_root}/update_engine/client_library/include", ] + + # NOSORT pkg_deps = [ "libbrillo", "libchrome-${libbase_ver}", @@ -108,25 +110,19 @@ pkg_config("target_defaults") { proto_library("update_metadata-protos") { proto_in_dir = "." proto_out_dir = "include/update_engine" - sources = [ - "update_metadata.proto", - ] + sources = [ "update_metadata.proto" ] } # Chrome D-Bus bindings. generate_dbus_adaptors("update_engine-dbus-adaptor") { dbus_adaptors_out_dir = "include/dbus_bindings" - sources = [ - "dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml", - ] + sources = [ "dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml" ] } generate_dbus_proxies("update_engine-dbus-kiosk-app-client") { mock_output_file = "include/kiosk-app/dbus-proxy-mocks.h" proxy_output_file = "include/kiosk-app/dbus-proxies.h" - sources = [ - "dbus_bindings/org.chromium.KioskAppService.dbus-xml", - ] + sources = [ "dbus_bindings/org.chromium.KioskAppService.dbus-xml" ] } # The payload application component and common dependencies. @@ -177,14 +173,12 @@ static_library("libpayload_consumer") { # TODO(deymo): Remove unused dependencies once we stop including files # from the root directory. all_dependent_pkg_deps = [ - "libcrypto", - "xz-embedded", "libbspatch", + "libcrypto", "libpuffpatch", + "xz-embedded", ] - deps = [ - ":update_metadata-protos", - ] + deps = [ ":update_metadata-protos" ] } # The main daemon static_library with all the code used to check for updates @@ -289,13 +283,9 @@ static_library("libupdate_engine") { # update_engine daemon. executable("update_engine") { - sources = [ - "main.cc", - ] + sources = [ "main.cc" ] configs += [ ":target_defaults" ] - deps = [ - ":libupdate_engine", - ] + deps = [ ":libupdate_engine" ] } # update_engine client library. @@ -320,9 +310,7 @@ executable("update_engine_client") { "update_engine_client.cc", ] configs += [ ":target_defaults" ] - deps = [ - ":libupdate_engine_client", - ] + deps = [ ":libupdate_engine_client" ] } # server-side code. This is used for delta_generator and unittests but not @@ -357,8 +345,8 @@ static_library("libpayload_generator") { all_dependent_pkg_deps = [ "ext2fs", "libbsdiff", - "libpuffdiff", "liblzma", + "libpuffdiff", ] deps = [ ":libpayload_consumer", @@ -368,9 +356,7 @@ static_library("libpayload_generator") { # server-side delta generator. executable("delta_generator") { - sources = [ - "payload_generator/generate_delta_main.cc", - ] + sources = [ "payload_generator/generate_delta_main.cc" ] configs += [ ":target_defaults" ] configs -= [ "//common-mk:pie" ] deps = [ @@ -400,9 +386,7 @@ if (use.test || use.fuzzer) { ":target_defaults", ] pkg_deps = [ "libshill-client-test" ] - deps = [ - ":libupdate_engine", - ] + deps = [ ":libupdate_engine" ] } } @@ -420,9 +404,7 @@ if (use.test) { # Unpacks sample images used for testing. tar_bunzip2("update_engine-test_images") { image_out_dir = "." - sources = [ - "sample_images/sample_images.tar.bz2", - ] + sources = [ "sample_images/sample_images.tar.bz2" ] } # Test HTTP Server. @@ -442,9 +424,7 @@ if (use.test) { # Test subprocess helper. executable("test_subprocess") { - sources = [ - "test_subprocess.cc", - ] + sources = [ "test_subprocess.cc" ] # //common-mk:test should be on the top. # TODO(crbug.com/887845): Remove this after library odering issue is fixed. @@ -561,9 +541,7 @@ if (use.test) { # Fuzzer target. if (use.fuzzer) { executable("update_engine_delta_performer_fuzzer") { - sources = [ - "payload_consumer/delta_performer_fuzzer.cc", - ] + sources = [ "payload_consumer/delta_performer_fuzzer.cc" ] configs += [ "//common-mk/common_fuzzer", ":target_defaults", @@ -578,9 +556,7 @@ if (use.fuzzer) { ] } executable("update_engine_omaha_request_action_fuzzer") { - sources = [ - "omaha_request_action_fuzzer.cc", - ] + sources = [ "omaha_request_action_fuzzer.cc" ] configs += [ "//common-mk/common_fuzzer", ":target_defaults", diff --git a/client-headers/BUILD.gn b/client-headers/BUILD.gn index 88f8bb9c..8c1a17ea 100644 --- a/client-headers/BUILD.gn +++ b/client-headers/BUILD.gn @@ -17,17 +17,13 @@ import("//common-mk/generate-dbus-proxies.gni") group("all") { - deps = [ - ":libupdate_engine-client-headers", - ] + deps = [ ":libupdate_engine-client-headers" ] } # update_engine client library generated headers. Used by other daemons and # by the update_engine_client console program to interact with update_engine. generate_dbus_proxies("libupdate_engine-client-headers") { - sources = [ - "../dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml", - ] + sources = [ "../dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml" ] dbus_service_config = "../dbus_bindings/dbus-service-config.json" mock_output_file = "include/update_engine/dbus-proxy-mocks.h" proxy_output_file = "include/update_engine/dbus-proxies.h" diff --git a/tar_bunzip2.gni b/tar_bunzip2.gni index 0a178992..5d901675 100644 --- a/tar_bunzip2.gni +++ b/tar_bunzip2.gni @@ -21,9 +21,7 @@ template("tar_bunzip2") { action_foreach(target_name) { sources = invoker.sources script = "//common-mk/file_generator_wrapper.py" - outputs = [ - "${out_dir}/{{source_name_part}}.flag", - ] + outputs = [ "${out_dir}/{{source_name_part}}.flag" ] args = [ "sh", "-c", From 0cf1acbbdc2d8b75704f5799713f81b33ff00e3c Mon Sep 17 00:00:00 2001 From: Miriam Polzer Date: Wed, 29 Apr 2020 17:39:51 +0200 Subject: [PATCH 288/624] update_engine: Add powerwash flag to update status Add a powerwash flag to the update status which is set to true if and only if a powerwash takes place. This will ensure that the user is informed of a pending powerwash exactly when it is going to happen. BUG=chromium:1070563 TEST=FEATURES=test emerge-amd64-generic update_engine channel change and update on test device Cq-Depend: chromium:2187671 Change-Id: I58314ecc7c9c2e64c906ef5b31cb780948196296 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2187672 Reviewed-by: Jae Hoon Kim Reviewed-by: Amin Hassani Tested-by: Miriam Polzer Commit-Queue: Miriam Polzer --- client_library/client_dbus.cc | 2 ++ .../include/update_engine/update_status.h | 2 ++ dbus_service.cc | 2 ++ update_attempter.cc | 6 +++++ update_attempter.h | 4 +++- update_attempter_unittest.cc | 24 +++++++++++++++++++ update_status_utils.cc | 3 +++ update_status_utils_unittest.cc | 2 ++ 8 files changed, 44 insertions(+), 1 deletion(-) diff --git a/client_library/client_dbus.cc b/client_library/client_dbus.cc index 5ca519a1..8e9a7fd1 100644 --- a/client_library/client_dbus.cc +++ b/client_library/client_dbus.cc @@ -56,6 +56,8 @@ void ConvertToUpdateEngineStatus(const StatusResult& status, out_status->is_enterprise_rollback = status.is_enterprise_rollback(); out_status->is_install = status.is_install(); out_status->eol_date = status.eol_date(); + out_status->will_powerwash_after_reboot = + status.will_powerwash_after_reboot(); } } // namespace diff --git a/client_library/include/update_engine/update_status.h b/client_library/include/update_engine/update_status.h index c1d0968f..b1cf1f85 100644 --- a/client_library/include/update_engine/update_status.h +++ b/client_library/include/update_engine/update_status.h @@ -90,6 +90,8 @@ struct UpdateEngineStatus { bool is_install; // The end-of-life date of the device in the number of days since Unix Epoch. int64_t eol_date; + // The system will powerwash once the update is applied. + bool will_powerwash_after_reboot; }; } // namespace update_engine diff --git a/dbus_service.cc b/dbus_service.cc index 46ac1d1a..a282d1e3 100644 --- a/dbus_service.cc +++ b/dbus_service.cc @@ -47,6 +47,8 @@ void ConvertToStatusResult(const UpdateEngineStatus& ue_status, out_status->set_is_enterprise_rollback(ue_status.is_enterprise_rollback); out_status->set_is_install(ue_status.is_install); out_status->set_eol_date(ue_status.eol_date); + out_status->set_will_powerwash_after_reboot( + ue_status.will_powerwash_after_reboot); } } // namespace diff --git a/update_attempter.cc b/update_attempter.cc index 0ead18ae..6324a482 100644 --- a/update_attempter.cc +++ b/update_attempter.cc @@ -1507,6 +1507,12 @@ bool UpdateAttempter::GetStatus(UpdateEngineStatus* out_status) { system_state_->prefs()->GetString(kPrefsOmahaEolDate, &str_eol_date); out_status->eol_date = StringToEolDate(str_eol_date); + // A powerwash will take place either if the install plan says it is required + // or if an enterprise rollback is happening. + out_status->will_powerwash_after_reboot = + install_plan_ && + (install_plan_->powerwash_required || install_plan_->is_rollback); + return true; } diff --git a/update_attempter.h b/update_attempter.h index e270b598..1bf552b2 100644 --- a/update_attempter.h +++ b/update_attempter.h @@ -265,9 +265,11 @@ class UpdateAttempter : public ActionProcessorDelegate, FRIEND_TEST(UpdateAttempterTest, DisableDeltaUpdateIfNeededTest); FRIEND_TEST(UpdateAttempterTest, DownloadProgressAccumulationTest); FRIEND_TEST(UpdateAttempterTest, InstallSetsStatusIdle); - FRIEND_TEST(UpdateAttempterTest, IsEnterpriseRollbackInGetStatusDefault); FRIEND_TEST(UpdateAttempterTest, IsEnterpriseRollbackInGetStatusTrue); FRIEND_TEST(UpdateAttempterTest, IsEnterpriseRollbackInGetStatusFalse); + FRIEND_TEST(UpdateAttempterTest, + PowerwashInGetStatusTrueBecausePowerwashRequired); + FRIEND_TEST(UpdateAttempterTest, PowerwashInGetStatusTrueBecauseRollback); FRIEND_TEST(UpdateAttempterTest, MarkDeltaUpdateFailureTest); FRIEND_TEST(UpdateAttempterTest, PingOmahaTest); FRIEND_TEST(UpdateAttempterTest, ProcessingDoneInstallError); diff --git a/update_attempter_unittest.cc b/update_attempter_unittest.cc index 5a6a23e9..3a1646fd 100644 --- a/update_attempter_unittest.cc +++ b/update_attempter_unittest.cc @@ -2317,6 +2317,30 @@ TEST_F(UpdateAttempterTest, IsEnterpriseRollbackInGetStatusTrue) { EXPECT_TRUE(status.is_enterprise_rollback); } +TEST_F(UpdateAttempterTest, PowerwashInGetStatusDefault) { + UpdateEngineStatus status; + attempter_.GetStatus(&status); + EXPECT_FALSE(status.will_powerwash_after_reboot); +} + +TEST_F(UpdateAttempterTest, PowerwashInGetStatusTrueBecausePowerwashRequired) { + attempter_.install_plan_.reset(new InstallPlan); + attempter_.install_plan_->powerwash_required = true; + + UpdateEngineStatus status; + attempter_.GetStatus(&status); + EXPECT_TRUE(status.will_powerwash_after_reboot); +} + +TEST_F(UpdateAttempterTest, PowerwashInGetStatusTrueBecauseRollback) { + attempter_.install_plan_.reset(new InstallPlan); + attempter_.install_plan_->is_rollback = true; + + UpdateEngineStatus status; + attempter_.GetStatus(&status); + EXPECT_TRUE(status.will_powerwash_after_reboot); +} + TEST_F(UpdateAttempterTest, FutureEolTest) { EolDate eol_date = std::numeric_limits::max(); EXPECT_CALL(*prefs_, GetString(kPrefsOmahaEolDate, _)) diff --git a/update_status_utils.cc b/update_status_utils.cc index f88bb1aa..6c618eca 100644 --- a/update_status_utils.cc +++ b/update_status_utils.cc @@ -38,6 +38,7 @@ const char kLastCheckedTime[] = "LAST_CHECKED_TIME"; const char kNewSize[] = "NEW_SIZE"; const char kNewVersion[] = "NEW_VERSION"; const char kProgress[] = "PROGRESS"; +const char kWillPowerwashAfterReboot[] = "WILL_POWERWASH_AFTER_REBOOT"; } // namespace @@ -84,6 +85,8 @@ string UpdateEngineStatusToString(const UpdateEngineStatus& status) { key_value_store.SetBoolean(kIsEnterpriseRollback, status.is_enterprise_rollback); key_value_store.SetBoolean(kIsInstall, status.is_install); + key_value_store.SetBoolean(kWillPowerwashAfterReboot, + status.will_powerwash_after_reboot); return key_value_store.SaveToString(); } diff --git a/update_status_utils_unittest.cc b/update_status_utils_unittest.cc index e3dd037c..228201c8 100644 --- a/update_status_utils_unittest.cc +++ b/update_status_utils_unittest.cc @@ -35,6 +35,7 @@ TEST(UpdateStatusUtilsTest, UpdateEngineStatusToStringTest) { .new_version = "12345.0.0", .is_enterprise_rollback = true, .is_install = true, + .will_powerwash_after_reboot = true, }; string print = R"(CURRENT_OP=UPDATE_STATUS_CHECKING_FOR_UPDATE @@ -44,6 +45,7 @@ LAST_CHECKED_TIME=156000000 NEW_SIZE=888 NEW_VERSION=12345.0.0 PROGRESS=0.5 +WILL_POWERWASH_AFTER_REBOOT=true )"; EXPECT_EQ(print, UpdateEngineStatusToString(update_engine_status)); } From c1f3692605e422e17a8ef8f3d2148e8e6ef6316e Mon Sep 17 00:00:00 2001 From: Jae Hoon Kim Date: Mon, 11 May 2020 18:20:18 -0700 Subject: [PATCH 289/624] update_engine: Multi-level |PrefsInterface::CreateSubKey()| Currently, |PrefsInterface::CreateSubKey()| is limited to always provide a namespace and subpref, but this can be generalized to a multi-level namespace alongside a supplied key. BUG=chromium:928805 TEST=FEATURES=test emerge-$B update_engine Change-Id: Ib81e93e8319714caa85cd2fe6495d3cb9b0e82ed Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2195623 Tested-by: Jae Hoon Kim Reviewed-by: Andrew Lassalle Reviewed-by: Amin Hassani Commit-Queue: Jae Hoon Kim --- common/prefs.cc | 41 +++++++++++++++----------------- common/prefs_interface.h | 5 ++-- common/prefs_unittest.cc | 32 ++++++++++++++++--------- omaha_request_action.cc | 6 ++--- omaha_request_action_unittest.cc | 6 ++--- update_attempter.cc | 12 +++++----- update_attempter_unittest.cc | 28 +++++++++++----------- 7 files changed, 68 insertions(+), 62 deletions(-) diff --git a/common/prefs.cc b/common/prefs.cc index 6a330378..6db01b76 100644 --- a/common/prefs.cc +++ b/common/prefs.cc @@ -28,11 +28,27 @@ #include "update_engine/common/utils.h" using std::string; +using std::vector; namespace chromeos_update_engine { const char kKeySeparator = '/'; +namespace { + +void DeleteEmptyDirectories(const base::FilePath& path) { + base::FileEnumerator path_enum( + path, false /* recursive */, base::FileEnumerator::DIRECTORIES); + for (base::FilePath dir_path = path_enum.Next(); !dir_path.empty(); + dir_path = path_enum.Next()) { + DeleteEmptyDirectories(dir_path); + if (base::IsDirectoryEmpty(dir_path)) + base::DeleteFile(dir_path, false); + } +} + +} // namespace + bool PrefsBase::GetString(const string& key, string* value) const { return storage_->GetKey(key, value); } @@ -108,11 +124,8 @@ void PrefsBase::RemoveObserver(const string& key, ObserverInterface* observer) { observers_for_key.erase(observer_it); } -string PrefsInterface::CreateSubKey(const string& name_space, - const string& sub_pref, - const string& key) { - return base::JoinString({name_space, sub_pref, key}, - string(1, kKeySeparator)); +string PrefsInterface::CreateSubKey(const vector& ns_and_key) { + return base::JoinString(ns_and_key, string(1, kKeySeparator)); } // Prefs @@ -124,23 +137,7 @@ bool Prefs::Init(const base::FilePath& prefs_dir) { bool Prefs::FileStorage::Init(const base::FilePath& prefs_dir) { prefs_dir_ = prefs_dir; // Delete empty directories. Ignore errors when deleting empty directories. - base::FileEnumerator namespace_enum( - prefs_dir_, false /* recursive */, base::FileEnumerator::DIRECTORIES); - for (base::FilePath namespace_path = namespace_enum.Next(); - !namespace_path.empty(); - namespace_path = namespace_enum.Next()) { - base::FileEnumerator sub_pref_enum(namespace_path, - false /* recursive */, - base::FileEnumerator::DIRECTORIES); - for (base::FilePath sub_pref_path = sub_pref_enum.Next(); - !sub_pref_path.empty(); - sub_pref_path = sub_pref_enum.Next()) { - if (base::IsDirectoryEmpty(sub_pref_path)) - base::DeleteFile(sub_pref_path, false); - } - if (base::IsDirectoryEmpty(namespace_path)) - base::DeleteFile(namespace_path, false); - } + DeleteEmptyDirectories(prefs_dir_); return true; } diff --git a/common/prefs_interface.h b/common/prefs_interface.h index b5596974..3aad4800 100644 --- a/common/prefs_interface.h +++ b/common/prefs_interface.h @@ -20,6 +20,7 @@ #include #include +#include namespace chromeos_update_engine { @@ -80,9 +81,7 @@ class PrefsInterface { virtual bool Delete(const std::string& key) = 0; // Creates a key which is part of a sub preference. - static std::string CreateSubKey(const std::string& name_space, - const std::string& sub_pref, - const std::string& key); + static std::string CreateSubKey(const std::vector& ns_with_key); // Add an observer to watch whenever the given |key| is modified. The // OnPrefSet() and OnPrefDelete() methods will be called whenever any of the diff --git a/common/prefs_unittest.cc b/common/prefs_unittest.cc index f226949c..24a62b52 100644 --- a/common/prefs_unittest.cc +++ b/common/prefs_unittest.cc @@ -62,17 +62,27 @@ class PrefsTest : public ::testing::Test { TEST(Prefs, Init) { Prefs prefs; - const string name_space = "ns"; + const string ns1 = "ns1"; + const string ns2A = "ns2A"; + const string ns2B = "ns2B"; const string sub_pref = "sp"; base::ScopedTempDir temp_dir; ASSERT_TRUE(temp_dir.CreateUniqueTempDir()); - base::FilePath namespace_path = temp_dir.GetPath().Append(name_space); + auto ns1_path = temp_dir.GetPath().Append(ns1); + auto ns2A_path = ns1_path.Append(ns2A); + auto ns2B_path = ns1_path.Append(ns2B); + auto sub_pref_path = ns2A_path.Append(sub_pref); + + EXPECT_TRUE(base::CreateDirectory(ns2B_path)); + EXPECT_TRUE(base::PathExists(ns2B_path)); + + EXPECT_TRUE(base::CreateDirectory(sub_pref_path)); + EXPECT_TRUE(base::PathExists(sub_pref_path)); - EXPECT_TRUE(base::CreateDirectory(namespace_path.Append(sub_pref))); - EXPECT_TRUE(base::PathExists(namespace_path.Append(sub_pref))); + EXPECT_TRUE(base::PathExists(ns1_path)); ASSERT_TRUE(prefs.Init(temp_dir.GetPath())); - EXPECT_FALSE(base::PathExists(namespace_path)); + EXPECT_FALSE(base::PathExists(ns1_path)); } TEST_F(PrefsTest, GetFileNameForKey) { @@ -99,9 +109,9 @@ TEST_F(PrefsTest, CreateSubKey) { const string sub_pref2 = "sp2"; const string sub_key = "sk"; - EXPECT_EQ(PrefsInterface::CreateSubKey(name_space, sub_pref1, sub_key), + EXPECT_EQ(PrefsInterface::CreateSubKey({name_space, sub_pref1, sub_key}), "ns/sp1/sk"); - EXPECT_EQ(PrefsInterface::CreateSubKey(name_space, sub_pref2, sub_key), + EXPECT_EQ(PrefsInterface::CreateSubKey({name_space, sub_pref2, sub_key}), "ns/sp2/sk"); } @@ -312,8 +322,8 @@ TEST_F(PrefsTest, SetDeleteSubKey) { const string sub_pref = "sp"; const string sub_key1 = "sk1"; const string sub_key2 = "sk2"; - auto key1 = prefs_.CreateSubKey(name_space, sub_pref, sub_key1); - auto key2 = prefs_.CreateSubKey(name_space, sub_pref, sub_key2); + auto key1 = prefs_.CreateSubKey({name_space, sub_pref, sub_key1}); + auto key2 = prefs_.CreateSubKey({name_space, sub_pref, sub_key2}); base::FilePath sub_pref_path = prefs_dir_.Append(name_space).Append(sub_pref); ASSERT_TRUE(prefs_.SetInt64(key1, 0)); @@ -350,7 +360,7 @@ TEST_F(PrefsTest, ObserversCalled) { prefs_.Delete(kKey); testing::Mock::VerifyAndClearExpectations(&mock_obserser); - auto key1 = prefs_.CreateSubKey("ns", "sp1", "key1"); + auto key1 = prefs_.CreateSubKey({"ns", "sp1", "key1"}); prefs_.AddObserver(key1, &mock_obserser); EXPECT_CALL(mock_obserser, OnPrefSet(key1)); @@ -424,7 +434,7 @@ TEST_F(MemoryPrefsTest, BasicTest) { EXPECT_FALSE(prefs_.Exists(kKey)); EXPECT_TRUE(prefs_.Delete(kKey)); - auto key = prefs_.CreateSubKey("ns", "sp", "sk"); + auto key = prefs_.CreateSubKey({"ns", "sp", "sk"}); ASSERT_TRUE(prefs_.SetInt64(key, 0)); EXPECT_TRUE(prefs_.Exists(key)); EXPECT_TRUE(prefs_.Delete(kKey)); diff --git a/omaha_request_action.cc b/omaha_request_action.cc index c9b8aa04..8728f72c 100644 --- a/omaha_request_action.cc +++ b/omaha_request_action.cc @@ -421,13 +421,13 @@ void OmahaRequestAction::StorePingReply( PrefsInterface* prefs = system_state_->prefs(); // Reset the active metadata value to |kPingInactiveValue|. auto active_key = - prefs->CreateSubKey(kDlcPrefsSubDir, dlc_id, kPrefsPingActive); + prefs->CreateSubKey({kDlcPrefsSubDir, dlc_id, kPrefsPingActive}); if (!prefs->SetInt64(active_key, kPingInactiveValue)) LOG(ERROR) << "Failed to set the value of ping metadata '" << active_key << "'."; auto last_rollcall_key = - prefs->CreateSubKey(kDlcPrefsSubDir, dlc_id, kPrefsPingLastRollcall); + prefs->CreateSubKey({kDlcPrefsSubDir, dlc_id, kPrefsPingLastRollcall}); if (!prefs->SetString(last_rollcall_key, parser_data.daystart_elapsed_days)) LOG(ERROR) << "Failed to set the value of ping metadata '" << last_rollcall_key << "'."; @@ -436,7 +436,7 @@ void OmahaRequestAction::StorePingReply( // Write the value of elapsed_days into |kPrefsPingLastActive| only if // the previous ping was an active one. auto last_active_key = - prefs->CreateSubKey(kDlcPrefsSubDir, dlc_id, kPrefsPingLastActive); + prefs->CreateSubKey({kDlcPrefsSubDir, dlc_id, kPrefsPingLastActive}); if (!prefs->SetString(last_active_key, parser_data.daystart_elapsed_days)) LOG(ERROR) << "Failed to set the value of ping metadata '" << last_active_key << "'."; diff --git a/omaha_request_action_unittest.cc b/omaha_request_action_unittest.cc index e1f5ef90..765af4f4 100644 --- a/omaha_request_action_unittest.cc +++ b/omaha_request_action_unittest.cc @@ -453,11 +453,11 @@ class OmahaRequestActionDlcPingTest : public OmahaRequestActionTest { OmahaRequestActionTest::SetUp(); dlc_id_ = "dlc0"; active_key_ = PrefsInterface::CreateSubKey( - kDlcPrefsSubDir, dlc_id_, kPrefsPingActive); + {kDlcPrefsSubDir, dlc_id_, kPrefsPingActive}); last_active_key_ = PrefsInterface::CreateSubKey( - kDlcPrefsSubDir, dlc_id_, kPrefsPingLastActive); + {kDlcPrefsSubDir, dlc_id_, kPrefsPingLastActive}); last_rollcall_key_ = PrefsInterface::CreateSubKey( - kDlcPrefsSubDir, dlc_id_, kPrefsPingLastRollcall); + {kDlcPrefsSubDir, dlc_id_, kPrefsPingLastRollcall}); tuc_params_.http_response = "prefs(); for (auto& sub_key : {kPrefsPingActive, kPrefsPingLastActive, kPrefsPingLastRollcall}) { - auto key = prefs->CreateSubKey(kDlcPrefsSubDir, dlc_id, sub_key); + auto key = prefs->CreateSubKey({kDlcPrefsSubDir, dlc_id, sub_key}); if (!prefs->Delete(key)) failures.emplace_back(sub_key); } @@ -684,7 +684,7 @@ bool UpdateAttempter::SetDlcActiveValue(bool is_active, const string& dlc_id) { PrefsInterface* prefs = system_state_->prefs(); if (is_active) { auto ping_active_key = - prefs->CreateSubKey(kDlcPrefsSubDir, dlc_id, kPrefsPingActive); + prefs->CreateSubKey({kDlcPrefsSubDir, dlc_id, kPrefsPingActive}); if (!prefs->SetInt64(ping_active_key, kPingActiveValue)) { LOG(ERROR) << "Failed to set the value of ping metadata '" << kPrefsPingActive << "'."; @@ -740,17 +740,17 @@ void UpdateAttempter::CalculateDlcParams() { // install or might not really be active yet. dlc_params.ping_active = kPingActiveValue; auto ping_active_key = - prefs->CreateSubKey(kDlcPrefsSubDir, dlc_id, kPrefsPingActive); + prefs->CreateSubKey({kDlcPrefsSubDir, dlc_id, kPrefsPingActive}); if (!prefs->GetInt64(ping_active_key, &dlc_params.ping_active) || dlc_params.ping_active != kPingActiveValue) { dlc_params.ping_active = kPingInactiveValue; } auto ping_last_active_key = - prefs->CreateSubKey(kDlcPrefsSubDir, dlc_id, kPrefsPingLastActive); + prefs->CreateSubKey({kDlcPrefsSubDir, dlc_id, kPrefsPingLastActive}); dlc_params.ping_date_last_active = GetPingMetadata(ping_last_active_key); - auto ping_last_rollcall_key = - prefs->CreateSubKey(kDlcPrefsSubDir, dlc_id, kPrefsPingLastRollcall); + auto ping_last_rollcall_key = prefs->CreateSubKey( + {kDlcPrefsSubDir, dlc_id, kPrefsPingLastRollcall}); dlc_params.ping_date_last_rollcall = GetPingMetadata(ping_last_rollcall_key); diff --git a/update_attempter_unittest.cc b/update_attempter_unittest.cc index 3a1646fd..745bcc2f 100644 --- a/update_attempter_unittest.cc +++ b/update_attempter_unittest.cc @@ -2390,10 +2390,10 @@ TEST_F(UpdateAttempterTest, CalculateDlcParamsInstallTest) { // When the DLC gets installed, a ping is not sent, therefore we don't store // the values sent by Omaha. auto last_active_key = PrefsInterface::CreateSubKey( - kDlcPrefsSubDir, dlc_id, kPrefsPingLastActive); + {kDlcPrefsSubDir, dlc_id, kPrefsPingLastActive}); EXPECT_FALSE(fake_system_state_.prefs()->Exists(last_active_key)); auto last_rollcall_key = PrefsInterface::CreateSubKey( - kDlcPrefsSubDir, dlc_id, kPrefsPingLastRollcall); + {kDlcPrefsSubDir, dlc_id, kPrefsPingLastRollcall}); EXPECT_FALSE(fake_system_state_.prefs()->Exists(last_rollcall_key)); } @@ -2430,11 +2430,11 @@ TEST_F(UpdateAttempterTest, CalculateDlcParamsNonParseableValuesTest) { // Write non numeric values in the metadata files. auto active_key = - PrefsInterface::CreateSubKey(kDlcPrefsSubDir, dlc_id, kPrefsPingActive); + PrefsInterface::CreateSubKey({kDlcPrefsSubDir, dlc_id, kPrefsPingActive}); auto last_active_key = PrefsInterface::CreateSubKey( - kDlcPrefsSubDir, dlc_id, kPrefsPingLastActive); + {kDlcPrefsSubDir, dlc_id, kPrefsPingLastActive}); auto last_rollcall_key = PrefsInterface::CreateSubKey( - kDlcPrefsSubDir, dlc_id, kPrefsPingLastRollcall); + {kDlcPrefsSubDir, dlc_id, kPrefsPingLastRollcall}); fake_system_state_.prefs()->SetString(active_key, "z2yz"); fake_system_state_.prefs()->SetString(last_active_key, "z2yz"); fake_system_state_.prefs()->SetString(last_rollcall_key, "z2yz"); @@ -2463,11 +2463,11 @@ TEST_F(UpdateAttempterTest, CalculateDlcParamsValidValuesTest) { // Write numeric values in the metadata files. auto active_key = - PrefsInterface::CreateSubKey(kDlcPrefsSubDir, dlc_id, kPrefsPingActive); + PrefsInterface::CreateSubKey({kDlcPrefsSubDir, dlc_id, kPrefsPingActive}); auto last_active_key = PrefsInterface::CreateSubKey( - kDlcPrefsSubDir, dlc_id, kPrefsPingLastActive); + {kDlcPrefsSubDir, dlc_id, kPrefsPingLastActive}); auto last_rollcall_key = PrefsInterface::CreateSubKey( - kDlcPrefsSubDir, dlc_id, kPrefsPingLastRollcall); + {kDlcPrefsSubDir, dlc_id, kPrefsPingLastRollcall}); fake_system_state_.prefs()->SetInt64(active_key, 1); fake_system_state_.prefs()->SetInt64(last_active_key, 78); @@ -2492,11 +2492,11 @@ TEST_F(UpdateAttempterTest, CalculateDlcParamsRemoveStaleMetadata) { FakePrefs fake_prefs; fake_system_state_.set_prefs(&fake_prefs); auto active_key = - PrefsInterface::CreateSubKey(kDlcPrefsSubDir, dlc_id, kPrefsPingActive); + PrefsInterface::CreateSubKey({kDlcPrefsSubDir, dlc_id, kPrefsPingActive}); auto last_active_key = PrefsInterface::CreateSubKey( - kDlcPrefsSubDir, dlc_id, kPrefsPingLastActive); + {kDlcPrefsSubDir, dlc_id, kPrefsPingLastActive}); auto last_rollcall_key = PrefsInterface::CreateSubKey( - kDlcPrefsSubDir, dlc_id, kPrefsPingLastRollcall); + {kDlcPrefsSubDir, dlc_id, kPrefsPingLastRollcall}); fake_system_state_.prefs()->SetInt64(active_key, kPingInactiveValue); fake_system_state_.prefs()->SetInt64(last_active_key, 0); fake_system_state_.prefs()->SetInt64(last_rollcall_key, 0); @@ -2524,7 +2524,7 @@ TEST_F(UpdateAttempterTest, SetDlcActiveValue) { attempter_.SetDlcActiveValue(true, dlc_id); int64_t temp_int; auto active_key = - PrefsInterface::CreateSubKey(kDlcPrefsSubDir, dlc_id, kPrefsPingActive); + PrefsInterface::CreateSubKey({kDlcPrefsSubDir, dlc_id, kPrefsPingActive}); EXPECT_TRUE(fake_system_state_.prefs()->Exists(active_key)); EXPECT_TRUE(fake_system_state_.prefs()->GetInt64(active_key, &temp_int)); EXPECT_EQ(temp_int, kPingActiveValue); @@ -2537,13 +2537,13 @@ TEST_F(UpdateAttempterTest, SetDlcInactive) { auto sub_keys = { kPrefsPingActive, kPrefsPingLastActive, kPrefsPingLastRollcall}; for (auto& sub_key : sub_keys) { - auto key = PrefsInterface::CreateSubKey(kDlcPrefsSubDir, dlc_id, sub_key); + auto key = PrefsInterface::CreateSubKey({kDlcPrefsSubDir, dlc_id, sub_key}); fake_system_state_.prefs()->SetInt64(key, 1); EXPECT_TRUE(fake_system_state_.prefs()->Exists(key)); } attempter_.SetDlcActiveValue(false, dlc_id); for (auto& sub_key : sub_keys) { - auto key = PrefsInterface::CreateSubKey(kDlcPrefsSubDir, dlc_id, sub_key); + auto key = PrefsInterface::CreateSubKey({kDlcPrefsSubDir, dlc_id, sub_key}); EXPECT_FALSE(fake_system_state_.prefs()->Exists(key)); } } From 29a80e0af8162fce0c79a0187adab2c9d3d9273a Mon Sep 17 00:00:00 2001 From: Jae Hoon Kim Date: Mon, 11 May 2020 20:18:49 -0700 Subject: [PATCH 290/624] update_engine: Fix [Memory|File]Storage partial inconsistency |Prefs::MemoryStorage| and |Prefs::FileStorage| had inconsistency when dealing with operations through |StorageInterface|, this change keeps the implementations more consistent. This keeps the underlying |StorageInterface| impementations behaving as similar as can be whether |[Memory|File]Storage| is used. Passing a namespace to |Prefs::FileStorage| backed |Pref| is no longer recursive in order to restrict to deleting keys. To delete all keys within a namespace, callers can use |Prefs::GetSubKeys(...)| and |Prefs::Delete(...)| accordingly. BUG=chromium:928805 TEST=FEATURES=test emerge-$B update_engine Change-Id: I3ea8b51e14b1405ca1cdef66f858a18d124ca0aa Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2195624 Tested-by: Jae Hoon Kim Commit-Queue: Jae Hoon Kim Reviewed-by: Amin Hassani Reviewed-by: Andrew Lassalle --- common/fake_prefs.cc | 8 ++++ common/fake_prefs.h | 3 ++ common/mock_prefs.h | 4 ++ common/prefs.cc | 52 ++++++++++++++++++--- common/prefs.h | 12 +++++ common/prefs_interface.h | 4 ++ common/prefs_unittest.cc | 97 +++++++++++++++++++++++++++++++++++++++- 7 files changed, 171 insertions(+), 9 deletions(-) diff --git a/common/fake_prefs.cc b/common/fake_prefs.cc index c446e062..73559c52 100644 --- a/common/fake_prefs.cc +++ b/common/fake_prefs.cc @@ -21,6 +21,7 @@ #include using std::string; +using std::vector; using chromeos_update_engine::FakePrefs; @@ -105,6 +106,13 @@ bool FakePrefs::Delete(const string& key) { return true; } +bool FakePrefs::GetSubKeys(const string& ns, vector* keys) const { + for (const auto& pr : values_) + if (pr.first.compare(0, ns.length(), ns) == 0) + keys->push_back(pr.first); + return true; +} + string FakePrefs::GetTypeName(PrefType type) { switch (type) { case PrefType::kString: diff --git a/common/fake_prefs.h b/common/fake_prefs.h index b1c5b712..b24ff4d8 100644 --- a/common/fake_prefs.h +++ b/common/fake_prefs.h @@ -49,6 +49,9 @@ class FakePrefs : public PrefsInterface { bool Exists(const std::string& key) const override; bool Delete(const std::string& key) override; + bool GetSubKeys(const std::string& ns, + std::vector* keys) const override; + void AddObserver(const std::string& key, ObserverInterface* observer) override; void RemoveObserver(const std::string& key, diff --git a/common/mock_prefs.h b/common/mock_prefs.h index 2582e194..62417a8c 100644 --- a/common/mock_prefs.h +++ b/common/mock_prefs.h @@ -18,6 +18,7 @@ #define UPDATE_ENGINE_COMMON_MOCK_PREFS_H_ #include +#include #include @@ -41,6 +42,9 @@ class MockPrefs : public PrefsInterface { MOCK_CONST_METHOD1(Exists, bool(const std::string& key)); MOCK_METHOD1(Delete, bool(const std::string& key)); + MOCK_CONST_METHOD2(GetSubKeys, + bool(const std::string&, std::vector*)); + MOCK_METHOD2(AddObserver, void(const std::string& key, ObserverInterface*)); MOCK_METHOD2(RemoveObserver, void(const std::string& key, ObserverInterface*)); diff --git a/common/prefs.cc b/common/prefs.cc index 6db01b76..615014f4 100644 --- a/common/prefs.cc +++ b/common/prefs.cc @@ -32,10 +32,10 @@ using std::vector; namespace chromeos_update_engine { -const char kKeySeparator = '/'; - namespace { +const char kKeySeparator = '/'; + void DeleteEmptyDirectories(const base::FilePath& path) { base::FileEnumerator path_enum( path, false /* recursive */, base::FileEnumerator::DIRECTORIES); @@ -112,6 +112,10 @@ bool PrefsBase::Delete(const string& key) { return true; } +bool PrefsBase::GetSubKeys(const string& ns, vector* keys) const { + return storage_->GetSubKeys(ns, keys); +} + void PrefsBase::AddObserver(const string& key, ObserverInterface* observer) { observers_[key].push_back(observer); } @@ -150,6 +154,24 @@ bool Prefs::FileStorage::GetKey(const string& key, string* value) const { return true; } +bool Prefs::FileStorage::GetSubKeys(const string& ns, + vector* keys) const { + base::FilePath filename; + TEST_AND_RETURN_FALSE(GetFileNameForKey(ns, &filename)); + base::FileEnumerator namespace_enum( + prefs_dir_, true, base::FileEnumerator::FILES); + for (base::FilePath f = namespace_enum.Next(); !f.empty(); + f = namespace_enum.Next()) { + auto filename_str = filename.value(); + if (f.value().compare(0, filename_str.length(), filename_str) == 0) { + // Only return the key portion excluding the |prefs_dir_| with slash. + keys->push_back(f.value().substr( + prefs_dir_.AsEndingWithSeparator().value().length())); + } + } + return true; +} + bool Prefs::FileStorage::SetKey(const string& key, const string& value) { base::FilePath filename; TEST_AND_RETURN_FALSE(GetFileNameForKey(key, &filename)); @@ -172,19 +194,17 @@ bool Prefs::FileStorage::KeyExists(const string& key) const { bool Prefs::FileStorage::DeleteKey(const string& key) { base::FilePath filename; TEST_AND_RETURN_FALSE(GetFileNameForKey(key, &filename)); - TEST_AND_RETURN_FALSE(base::DeleteFile(filename, true)); + TEST_AND_RETURN_FALSE(base::DeleteFile(filename, false)); return true; } bool Prefs::FileStorage::GetFileNameForKey(const string& key, base::FilePath* filename) const { - // Allows only non-empty keys containing [A-Za-z0-9_-]. + // Allows only non-empty keys containing [A-Za-z0-9_-/]. TEST_AND_RETURN_FALSE(!key.empty()); - for (size_t i = 0; i < key.size(); ++i) { - char c = key.at(i); + for (char c : key) TEST_AND_RETURN_FALSE(base::IsAsciiAlpha(c) || base::IsAsciiDigit(c) || c == '_' || c == '-' || c == kKeySeparator); - } *filename = prefs_dir_.Append(key); return true; } @@ -200,6 +220,24 @@ bool MemoryPrefs::MemoryStorage::GetKey(const string& key, return true; } +bool MemoryPrefs::MemoryStorage::GetSubKeys(const string& ns, + vector* keys) const { + using value_type = decltype(values_)::value_type; + using key_type = decltype(values_)::key_type; + auto lower_comp = [](const value_type& pr, const key_type& ns) { + return pr.first.substr(0, ns.length()) < ns; + }; + auto upper_comp = [](const key_type& ns, const value_type& pr) { + return ns < pr.first.substr(0, ns.length()); + }; + auto lower_it = + std::lower_bound(begin(values_), end(values_), ns, lower_comp); + auto upper_it = std::upper_bound(lower_it, end(values_), ns, upper_comp); + while (lower_it != upper_it) + keys->push_back((lower_it++)->first); + return true; +} + bool MemoryPrefs::MemoryStorage::SetKey(const string& key, const string& value) { values_[key] = value; diff --git a/common/prefs.h b/common/prefs.h index 0116454f..3fc1d891 100644 --- a/common/prefs.h +++ b/common/prefs.h @@ -42,6 +42,11 @@ class PrefsBase : public PrefsInterface { // Returns whether the operation succeeded. virtual bool GetKey(const std::string& key, std::string* value) const = 0; + // Get the keys stored within the namespace. If there are no keys in the + // namespace, |keys| will be empty. Returns whether the operation succeeded. + virtual bool GetSubKeys(const std::string& ns, + std::vector* keys) const = 0; + // Set the value of the key named |key| to |value| regardless of the // previous value. Returns whether the operation succeeded. virtual bool SetKey(const std::string& key, const std::string& value) = 0; @@ -70,6 +75,9 @@ class PrefsBase : public PrefsInterface { bool Exists(const std::string& key) const override; bool Delete(const std::string& key) override; + bool GetSubKeys(const std::string& ns, + std::vector* keys) const override; + void AddObserver(const std::string& key, ObserverInterface* observer) override; void RemoveObserver(const std::string& key, @@ -111,6 +119,8 @@ class Prefs : public PrefsBase { // PrefsBase::StorageInterface overrides. bool GetKey(const std::string& key, std::string* value) const override; + bool GetSubKeys(const std::string& ns, + std::vector* keys) const override; bool SetKey(const std::string& key, const std::string& value) override; bool KeyExists(const std::string& key) const override; bool DeleteKey(const std::string& key) override; @@ -149,6 +159,8 @@ class MemoryPrefs : public PrefsBase { // PrefsBase::StorageInterface overrides. bool GetKey(const std::string& key, std::string* value) const override; + bool GetSubKeys(const std::string& ns, + std::vector* keys) const override; bool SetKey(const std::string& key, const std::string& value) override; bool KeyExists(const std::string& key) const override; bool DeleteKey(const std::string& key) override; diff --git a/common/prefs_interface.h b/common/prefs_interface.h index 3aad4800..1311cb44 100644 --- a/common/prefs_interface.h +++ b/common/prefs_interface.h @@ -83,6 +83,10 @@ class PrefsInterface { // Creates a key which is part of a sub preference. static std::string CreateSubKey(const std::vector& ns_with_key); + // Returns a list of keys within the namespace. + virtual bool GetSubKeys(const std::string& ns, + std::vector* keys) const = 0; + // Add an observer to watch whenever the given |key| is modified. The // OnPrefSet() and OnPrefDelete() methods will be called whenever any of the // Set*() methods or the Delete() method are called on the given key, diff --git a/common/prefs_unittest.cc b/common/prefs_unittest.cc index 24a62b52..6dd26c09 100644 --- a/common/prefs_unittest.cc +++ b/common/prefs_unittest.cc @@ -20,6 +20,7 @@ #include #include +#include #include #include @@ -30,9 +31,11 @@ #include using std::string; +using std::vector; using testing::_; using testing::ElementsAre; using testing::Eq; +using testing::UnorderedElementsAre; namespace { // Test key used along the tests. @@ -41,12 +44,92 @@ const char kKey[] = "test-key"; namespace chromeos_update_engine { -class PrefsTest : public ::testing::Test { +class BasePrefsTest : public ::testing::Test { + protected: + void MultiNamespaceKeyTest() { + ASSERT_TRUE(common_prefs_); + auto key0 = common_prefs_->CreateSubKey({"ns1", "key"}); + // Corner case for "ns1". + auto key0corner = common_prefs_->CreateSubKey({"ns11", "key"}); + auto key1A = common_prefs_->CreateSubKey({"ns1", "nsA", "keyA"}); + auto key1B = common_prefs_->CreateSubKey({"ns1", "nsA", "keyB"}); + auto key2 = common_prefs_->CreateSubKey({"ns1", "nsB", "key"}); + // Corner case for "ns1/nsB". + auto key2corner = common_prefs_->CreateSubKey({"ns1", "nsB1", "key"}); + EXPECT_FALSE(common_prefs_->Exists(key0)); + EXPECT_FALSE(common_prefs_->Exists(key1A)); + EXPECT_FALSE(common_prefs_->Exists(key1B)); + EXPECT_FALSE(common_prefs_->Exists(key2)); + + EXPECT_TRUE(common_prefs_->SetString(key0, "")); + EXPECT_TRUE(common_prefs_->SetString(key0corner, "")); + EXPECT_TRUE(common_prefs_->SetString(key1A, "")); + EXPECT_TRUE(common_prefs_->SetString(key1B, "")); + EXPECT_TRUE(common_prefs_->SetString(key2, "")); + EXPECT_TRUE(common_prefs_->SetString(key2corner, "")); + + EXPECT_TRUE(common_prefs_->Exists(key0)); + EXPECT_TRUE(common_prefs_->Exists(key0corner)); + EXPECT_TRUE(common_prefs_->Exists(key1A)); + EXPECT_TRUE(common_prefs_->Exists(key1B)); + EXPECT_TRUE(common_prefs_->Exists(key2)); + EXPECT_TRUE(common_prefs_->Exists(key2corner)); + + vector keys2; + EXPECT_TRUE(common_prefs_->GetSubKeys("ns1/nsB/", &keys2)); + EXPECT_THAT(keys2, ElementsAre(key2)); + for (const auto& key : keys2) + EXPECT_TRUE(common_prefs_->Delete(key)); + EXPECT_TRUE(common_prefs_->Exists(key0)); + EXPECT_TRUE(common_prefs_->Exists(key0corner)); + EXPECT_TRUE(common_prefs_->Exists(key1A)); + EXPECT_TRUE(common_prefs_->Exists(key1B)); + EXPECT_FALSE(common_prefs_->Exists(key2)); + EXPECT_TRUE(common_prefs_->Exists(key2corner)); + + vector keys2corner; + EXPECT_TRUE(common_prefs_->GetSubKeys("ns1/nsB", &keys2corner)); + EXPECT_THAT(keys2corner, ElementsAre(key2corner)); + for (const auto& key : keys2corner) + EXPECT_TRUE(common_prefs_->Delete(key)); + EXPECT_FALSE(common_prefs_->Exists(key2corner)); + + vector keys1; + EXPECT_TRUE(common_prefs_->GetSubKeys("ns1/nsA/", &keys1)); + EXPECT_THAT(keys1, UnorderedElementsAre(key1A, key1B)); + for (const auto& key : keys1) + EXPECT_TRUE(common_prefs_->Delete(key)); + EXPECT_TRUE(common_prefs_->Exists(key0)); + EXPECT_TRUE(common_prefs_->Exists(key0corner)); + EXPECT_FALSE(common_prefs_->Exists(key1A)); + EXPECT_FALSE(common_prefs_->Exists(key1B)); + + vector keys0; + EXPECT_TRUE(common_prefs_->GetSubKeys("ns1/", &keys0)); + EXPECT_THAT(keys0, ElementsAre(key0)); + for (const auto& key : keys0) + EXPECT_TRUE(common_prefs_->Delete(key)); + EXPECT_FALSE(common_prefs_->Exists(key0)); + EXPECT_TRUE(common_prefs_->Exists(key0corner)); + + vector keys0corner; + EXPECT_TRUE(common_prefs_->GetSubKeys("ns1", &keys0corner)); + EXPECT_THAT(keys0corner, ElementsAre(key0corner)); + for (const auto& key : keys0corner) + EXPECT_TRUE(common_prefs_->Delete(key)); + EXPECT_FALSE(common_prefs_->Exists(key0corner)); + } + + PrefsInterface* common_prefs_; +}; + +class PrefsTest : public BasePrefsTest { protected: void SetUp() override { ASSERT_TRUE(temp_dir_.CreateUniqueTempDir()); prefs_dir_ = temp_dir_.GetPath(); ASSERT_TRUE(prefs_.Init(prefs_dir_)); + common_prefs_ = &prefs_; } bool SetValue(const string& key, const string& value) { @@ -415,8 +498,14 @@ TEST_F(PrefsTest, UnsuccessfulCallsNotObserved) { prefs_.RemoveObserver(kInvalidKey, &mock_obserser); } -class MemoryPrefsTest : public ::testing::Test { +TEST_F(PrefsTest, MultiNamespaceKeyTest) { + MultiNamespaceKeyTest(); +} + +class MemoryPrefsTest : public BasePrefsTest { protected: + void SetUp() override { common_prefs_ = &prefs_; } + MemoryPrefs prefs_; }; @@ -440,4 +529,8 @@ TEST_F(MemoryPrefsTest, BasicTest) { EXPECT_TRUE(prefs_.Delete(kKey)); } +TEST_F(MemoryPrefsTest, MultiNamespaceKeyTest) { + MultiNamespaceKeyTest(); +} + } // namespace chromeos_update_engine From 38de3b155e5c3b4739dd68887a1f26ebf8edf645 Mon Sep 17 00:00:00 2001 From: Jae Hoon Kim Date: Wed, 29 Apr 2020 19:41:23 -0700 Subject: [PATCH 291/624] update_engine: Implement Excluder Class + Tests Excluder persists the exclusion state for excluding certain names. This will be used to exclude update Payloads which are contiuously faulty. BUG=chromium:928805 TEST=FEATURES=test emerge-$B update_engine TEST=USE="${USE} -dlc" FEATURES=test emerge-$B update_engine Change-Id: I780a9cf2ad979833382a832e01833211ec2ccf7d Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2172074 Tested-by: Jae Hoon Kim Commit-Queue: Jae Hoon Kim Auto-Submit: Jae Hoon Kim Reviewed-by: Amin Hassani --- BUILD.gn | 13 +++++-- common/constants.cc | 2 ++ common/constants.h | 3 ++ common/excluder_interface.h | 60 +++++++++++++++++++++++++++++++ common/excluder_stub.cc | 43 +++++++++++++++++++++++ common/excluder_stub.h | 46 ++++++++++++++++++++++++ common/mock_excluder.h | 37 ++++++++++++++++++++ excluder_chromeos.cc | 63 +++++++++++++++++++++++++++++++++ excluder_chromeos.h | 52 +++++++++++++++++++++++++++ excluder_chromeos_unittest.cc | 66 +++++++++++++++++++++++++++++++++++ 10 files changed, 383 insertions(+), 2 deletions(-) create mode 100644 common/excluder_interface.h create mode 100644 common/excluder_stub.cc create mode 100644 common/excluder_stub.h create mode 100644 common/mock_excluder.h create mode 100644 excluder_chromeos.cc create mode 100644 excluder_chromeos.h create mode 100644 excluder_chromeos_unittest.cc diff --git a/BUILD.gn b/BUILD.gn index 204e2d37..3f2ea445 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -275,9 +275,15 @@ static_library("libupdate_engine") { } if (use.dlc) { - sources += [ "dlcservice_chromeos.cc" ] + sources += [ + "dlcservice_chromeos.cc", + "excluder_chromeos.cc", + ] } else { - sources += [ "common/dlcservice_stub.cc" ] + sources += [ + "common/dlcservice_stub.cc", + "common/excluder_stub.cc", + ] } } @@ -515,6 +521,9 @@ if (use.test) { "update_manager/weekly_time_unittest.cc", "update_status_utils_unittest.cc", ] + if (use.dlc) { + sources += [ "excluder_chromeos_unittest.cc" ] + } # //common-mk:test should be on the top. # TODO(crbug.com/887845): Remove this after library odering issue is fixed. diff --git a/common/constants.cc b/common/constants.cc index 25aa9a8a..ac652ea7 100644 --- a/common/constants.cc +++ b/common/constants.cc @@ -18,6 +18,8 @@ namespace chromeos_update_engine { +const char kExclusionPrefsSubDir[] = "exclusion"; + const char kDlcPrefsSubDir[] = "dlc"; const char kPowerwashSafePrefsSubDirectory[] = "update_engine/prefs"; diff --git a/common/constants.h b/common/constants.h index 67519bdd..248fd05e 100644 --- a/common/constants.h +++ b/common/constants.h @@ -19,6 +19,9 @@ namespace chromeos_update_engine { +// The root path of all exclusion prefs. +extern const char kExclusionPrefsSubDir[]; + // The root path of all DLC metadata. extern const char kDlcPrefsSubDir[]; diff --git a/common/excluder_interface.h b/common/excluder_interface.h new file mode 100644 index 00000000..3985bba8 --- /dev/null +++ b/common/excluder_interface.h @@ -0,0 +1,60 @@ +// +// Copyright (C) 2020 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef UPDATE_ENGINE_COMMON_EXCLUDER_INTERFACE_H_ +#define UPDATE_ENGINE_COMMON_EXCLUDER_INTERFACE_H_ + +#include +#include + +#include + +namespace chromeos_update_engine { + +class PrefsInterface; + +class ExcluderInterface { + public: + virtual ~ExcluderInterface() = default; + + // Returns true on successfuly excluding |name|, otherwise false. On a + // successful |Exclude()| the passed in |name| will be considered excluded + // and calls to |IsExcluded()| will return true. The exclusions are persisted. + virtual bool Exclude(const std::string& name) = 0; + + // Returns true if |name| reached the exclusion limit, otherwise false. + virtual bool IsExcluded(const std::string& name) = 0; + + // Returns true on sucessfully reseting the entire exclusion state, otherwise + // false. On a successful |Reset()| there will be no excluded |name| in the + // exclusion state. + virtual bool Reset() = 0; + + // Not copyable or movable + ExcluderInterface(const ExcluderInterface&) = delete; + ExcluderInterface& operator=(const ExcluderInterface&) = delete; + ExcluderInterface(ExcluderInterface&&) = delete; + ExcluderInterface& operator=(ExcluderInterface&&) = delete; + + protected: + ExcluderInterface() = default; +}; + +std::unique_ptr CreateExcluder(PrefsInterface* prefs); + +} // namespace chromeos_update_engine + +#endif // UPDATE_ENGINE_COMMON_EXCLUDER_INTERFACE_H_ diff --git a/common/excluder_stub.cc b/common/excluder_stub.cc new file mode 100644 index 00000000..a251765e --- /dev/null +++ b/common/excluder_stub.cc @@ -0,0 +1,43 @@ +// +// Copyright (C) 2020 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include "update_engine/common/excluder_stub.h" + +#include + +#include "update_engine/common/prefs_interface.h" + +using std::string; + +namespace chromeos_update_engine { + +std::unique_ptr CreateExcluder(PrefsInterface* prefs) { + return std::make_unique(); +} + +bool ExcluderStub::Exclude(const string& name) { + return true; +} + +bool ExcluderStub::IsExcluded(const string& name) { + return false; +} + +bool ExcluderStub::Reset() { + return true; +} + +} // namespace chromeos_update_engine diff --git a/common/excluder_stub.h b/common/excluder_stub.h new file mode 100644 index 00000000..2d5372a9 --- /dev/null +++ b/common/excluder_stub.h @@ -0,0 +1,46 @@ +// +// Copyright (C) 2020 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef UPDATE_ENGINE_COMMON_EXCLUDER_STUB_H_ +#define UPDATE_ENGINE_COMMON_EXCLUDER_STUB_H_ + +#include + +#include "update_engine/common/excluder_interface.h" + +namespace chromeos_update_engine { + +// An implementation of the |ExcluderInterface| that does nothing. +class ExcluderStub : public ExcluderInterface { + public: + ExcluderStub() = default; + ~ExcluderStub() = default; + + // |ExcluderInterface| overrides. + bool Exclude(const std::string& name) override; + bool IsExcluded(const std::string& name) override; + bool Reset() override; + + // Not copyable or movable. + ExcluderStub(const ExcluderStub&) = delete; + ExcluderStub& operator=(const ExcluderStub&) = delete; + ExcluderStub(ExcluderStub&&) = delete; + ExcluderStub& operator=(ExcluderStub&&) = delete; +}; + +} // namespace chromeos_update_engine + +#endif // UPDATE_ENGINE_COMMON_EXCLUDER_STUB_H_ diff --git a/common/mock_excluder.h b/common/mock_excluder.h new file mode 100644 index 00000000..bc547729 --- /dev/null +++ b/common/mock_excluder.h @@ -0,0 +1,37 @@ +// +// Copyright (C) 2020 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef UPDATE_ENGINE_MOCK_APP_EXCLUDER_H_ +#define UPDATE_ENGINE_MOCK_APP_EXCLUDER_H_ + +#include "update_engine/common/excluder_interface.h" + +#include + +#include + +namespace chromeos_update_engine { + +class MockExcluder : public ExcluderInterface { + public: + MOCK_METHOD(bool, Exclude, (const std::string&), (override)); + MOCK_METHOD(bool, IsExcluded, (const std::string&), (override)); + MOCK_METHOD(bool, Reset, (), (override)); +}; + +} // namespace chromeos_update_engine + +#endif // UPDATE_ENGINE_MOCK_APP_EXCLUDER_H_ diff --git a/excluder_chromeos.cc b/excluder_chromeos.cc new file mode 100644 index 00000000..bfd6f046 --- /dev/null +++ b/excluder_chromeos.cc @@ -0,0 +1,63 @@ +// +// Copyright (C) 2020 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include "update_engine/excluder_chromeos.h" + +#include +#include + +#include +#include +#include +#include + +#include "update_engine/common/constants.h" +#include "update_engine/common/prefs.h" +#include "update_engine/system_state.h" + +using std::string; +using std::vector; + +namespace chromeos_update_engine { + +std::unique_ptr CreateExcluder(PrefsInterface* prefs) { + return std::make_unique(prefs); +} + +ExcluderChromeOS::ExcluderChromeOS(PrefsInterface* prefs) : prefs_(prefs) {} + +bool ExcluderChromeOS::Exclude(const string& name) { + auto key = prefs_->CreateSubKey({kExclusionPrefsSubDir, name}); + return prefs_->SetString(key, ""); +} + +bool ExcluderChromeOS::IsExcluded(const string& name) { + auto key = prefs_->CreateSubKey({kExclusionPrefsSubDir, name}); + return prefs_->Exists(key); +} + +bool ExcluderChromeOS::Reset() { + bool ret = true; + vector keys; + if (!prefs_->GetSubKeys(kExclusionPrefsSubDir, &keys)) + return false; + for (const auto& key : keys) + if (!(ret &= prefs_->Delete(key))) + LOG(ERROR) << "Failed to delete exclusion pref for " << key; + return ret; +} + +} // namespace chromeos_update_engine diff --git a/excluder_chromeos.h b/excluder_chromeos.h new file mode 100644 index 00000000..e4c1a529 --- /dev/null +++ b/excluder_chromeos.h @@ -0,0 +1,52 @@ +// +// Copyright (C) 2020 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef UPDATE_ENGINE_EXCLUDER_CHROMEOS_H_ +#define UPDATE_ENGINE_EXCLUDER_CHROMEOS_H_ + +#include + +#include "update_engine/common/excluder_interface.h" +#include "update_engine/common/prefs_interface.h" + +namespace chromeos_update_engine { + +class SystemState; + +// The Chrome OS implementation of the |ExcluderInterface|. +class ExcluderChromeOS : public ExcluderInterface { + public: + explicit ExcluderChromeOS(PrefsInterface* prefs); + ~ExcluderChromeOS() = default; + + // |ExcluderInterface| overrides. + bool Exclude(const std::string& name) override; + bool IsExcluded(const std::string& name) override; + bool Reset() override; + + // Not copyable or movable. + ExcluderChromeOS(const ExcluderChromeOS&) = delete; + ExcluderChromeOS& operator=(const ExcluderChromeOS&) = delete; + ExcluderChromeOS(ExcluderChromeOS&&) = delete; + ExcluderChromeOS& operator=(ExcluderChromeOS&&) = delete; + + private: + PrefsInterface* prefs_; +}; + +} // namespace chromeos_update_engine + +#endif // UPDATE_ENGINE_EXCLUDER_CHROMEOS_H_ diff --git a/excluder_chromeos_unittest.cc b/excluder_chromeos_unittest.cc new file mode 100644 index 00000000..a8c14b39 --- /dev/null +++ b/excluder_chromeos_unittest.cc @@ -0,0 +1,66 @@ +// +// Copyright (C) 2020 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include "update_engine/excluder_chromeos.h" + +#include + +#include +#include +#include + +#include "update_engine/common/prefs.h" + +using std::string; +using std::unique_ptr; + +namespace chromeos_update_engine { + +constexpr char kDummyHash[] = + "71ff43d76e2488e394e46872f5b066cc25e394c2c3e3790dd319517883b33db1"; + +class ExcluderChromeOSTest : public ::testing::Test { + protected: + void SetUp() override { + ASSERT_TRUE(tempdir_.CreateUniqueTempDir()); + ASSERT_TRUE(base::PathExists(tempdir_.GetPath())); + ASSERT_TRUE(prefs_.Init(tempdir_.GetPath())); + excluder_ = std::make_unique(&prefs_); + } + + base::ScopedTempDir tempdir_; + Prefs prefs_; + unique_ptr excluder_; +}; + +TEST_F(ExcluderChromeOSTest, ExclusionCheck) { + EXPECT_FALSE(excluder_->IsExcluded(kDummyHash)); + EXPECT_TRUE(excluder_->Exclude(kDummyHash)); + EXPECT_TRUE(excluder_->IsExcluded(kDummyHash)); +} + +TEST_F(ExcluderChromeOSTest, ResetFlow) { + EXPECT_TRUE(excluder_->Exclude("abc")); + EXPECT_TRUE(excluder_->Exclude(kDummyHash)); + EXPECT_TRUE(excluder_->IsExcluded("abc")); + EXPECT_TRUE(excluder_->IsExcluded(kDummyHash)); + + EXPECT_TRUE(excluder_->Reset()); + EXPECT_FALSE(excluder_->IsExcluded("abc")); + EXPECT_FALSE(excluder_->IsExcluded(kDummyHash)); +} + +} // namespace chromeos_update_engine From 5e8e30b7e3aa04ee852ac79b9bd6fddb63200928 Mon Sep 17 00:00:00 2001 From: Jae Hoon Kim Date: Wed, 6 May 2020 14:59:06 -0700 Subject: [PATCH 292/624] update_engine: UpdateAttempter manages lifetime of Excluder Prior to adding the exclusion logic within various |Action|s, the |UpdateAttempter| provides a way to access the |Excluder| encapsulated within the |UpdateAttempter| singleton. |PayloadState| uses |Excluder| from |UpdateAttempter| as a member. BUG=chromium:928805 TEST=FEATURES=test emerge-$B update_engine TEST=USE="${USE} -dlc" FEATURES=test emerge-$B update_engine Change-Id: I63ace436e8aacd349e13004fe1e2f4dd37479978 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2190236 Tested-by: Jae Hoon Kim Reviewed-by: Amin Hassani Commit-Queue: Jae Hoon Kim Auto-Submit: Jae Hoon Kim --- mock_update_attempter.h | 2 ++ payload_state.cc | 4 ++++ payload_state.h | 6 ++++++ update_attempter.cc | 3 +++ update_attempter.h | 7 +++++++ 5 files changed, 22 insertions(+) diff --git a/mock_update_attempter.h b/mock_update_attempter.h index fdeba524..ad348028 100644 --- a/mock_update_attempter.h +++ b/mock_update_attempter.h @@ -60,6 +60,8 @@ class MockUpdateAttempter : public UpdateAttempter { MOCK_METHOD2(SetDlcActiveValue, bool(bool, const std::string&)); + MOCK_CONST_METHOD0(GetExcluder, ExcluderInterface*(void)); + MOCK_METHOD0(RefreshDevicePolicy, void(void)); MOCK_CONST_METHOD0(consecutive_failed_update_checks, unsigned int(void)); diff --git a/payload_state.cc b/payload_state.cc index 5facdff1..2e07ad97 100644 --- a/payload_state.cc +++ b/payload_state.cc @@ -37,6 +37,7 @@ #include "update_engine/omaha_request_params.h" #include "update_engine/payload_consumer/install_plan.h" #include "update_engine/system_state.h" +#include "update_engine/update_attempter.h" using base::Time; using base::TimeDelta; @@ -60,6 +61,8 @@ static const uint64_t kUptimeResolution = 1; PayloadState::PayloadState() : prefs_(nullptr), + powerwash_safe_prefs_(nullptr), + excluder_(nullptr), using_p2p_for_downloading_(false), p2p_num_attempts_(0), payload_attempt_number_(0), @@ -79,6 +82,7 @@ bool PayloadState::Initialize(SystemState* system_state) { system_state_ = system_state; prefs_ = system_state_->prefs(); powerwash_safe_prefs_ = system_state_->powerwash_safe_prefs(); + excluder_ = system_state_->update_attempter()->GetExcluder(); LoadResponseSignature(); LoadPayloadAttemptNumber(); LoadFullPayloadAttemptNumber(); diff --git a/payload_state.h b/payload_state.h index bfe2cf0b..bc4bf0dd 100644 --- a/payload_state.h +++ b/payload_state.h @@ -24,6 +24,7 @@ #include #include // for FRIEND_TEST +#include "update_engine/common/excluder_interface.h" #include "update_engine/common/prefs_interface.h" #include "update_engine/metrics_constants.h" #include "update_engine/payload_state_interface.h" @@ -429,6 +430,11 @@ class PayloadState : public PayloadStateInterface { // This object persists across powerwashes. PrefsInterface* powerwash_safe_prefs_; + // Interface object with which we determine exclusion decisions for + // payloads/partitions during the update. This must be set by calling the + // Initialize method before calling any other method. + ExcluderInterface* excluder_; + // This is the current response object from Omaha. OmahaResponse response_; diff --git a/update_attempter.cc b/update_attempter.cc index 7479134b..52561929 100644 --- a/update_attempter.cc +++ b/update_attempter.cc @@ -47,6 +47,7 @@ #include "update_engine/common/clock_interface.h" #include "update_engine/common/constants.h" #include "update_engine/common/dlcservice_interface.h" +#include "update_engine/common/excluder_interface.h" #include "update_engine/common/hardware_interface.h" #include "update_engine/common/platform_constants.h" #include "update_engine/common/prefs.h" @@ -1762,6 +1763,8 @@ void UpdateAttempter::UpdateEngineStarted() { system_state_->payload_state()->UpdateEngineStarted(); StartP2PAtStartup(); + + excluder_ = CreateExcluder(system_state_->prefs()); } bool UpdateAttempter::StartP2PAtStartup() { diff --git a/update_attempter.h b/update_attempter.h index 1bf552b2..dd958f54 100644 --- a/update_attempter.h +++ b/update_attempter.h @@ -37,6 +37,7 @@ #include "update_engine/client_library/include/update_engine/update_status.h" #include "update_engine/common/action_processor.h" #include "update_engine/common/cpu_limiter.h" +#include "update_engine/common/excluder_interface.h" #include "update_engine/common/proxy_resolver.h" #include "update_engine/omaha_request_builder_xml.h" #include "update_engine/omaha_request_params.h" @@ -184,6 +185,9 @@ class UpdateAttempter : public ActionProcessorDelegate, // Called at update_engine startup to do various house-keeping. void UpdateEngineStarted(); + // Returns the |Excluder| that is currently held onto. + virtual ExcluderInterface* GetExcluder() const { return excluder_.get(); } + // Reloads the device policy from libbrillo. Note: This method doesn't // cause a real-time policy fetch from the policy server. It just reloads the // latest value that libbrillo has cached. libbrillo fetches the policies @@ -571,6 +575,9 @@ class UpdateAttempter : public ActionProcessorDelegate, // This is the session ID used to track update flow to Omaha. std::string session_id_; + // Interface for excluder. + std::unique_ptr excluder_; + DISALLOW_COPY_AND_ASSIGN(UpdateAttempter); }; From 0b6501c204f7d61ffeb0a110f6f9b311b337bf3d Mon Sep 17 00:00:00 2001 From: Qijiang Fan Date: Thu, 21 May 2020 05:19:12 +0900 Subject: [PATCH 293/624] update_engine: use base::NumberToString base::XXXToString for number-string conversion has been unified to base::NumberToString. BUG=chromium:1054279 TEST=unittest Change-Id: Ie47b1257df3492de660ea6d1c3fdf6e5cc8af791 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2210081 Reviewed-by: Jae Hoon Kim Reviewed-by: Amin Hassani Commit-Queue: Qijiang Fan Tested-by: Qijiang Fan --- omaha_request_action_unittest.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/omaha_request_action_unittest.cc b/omaha_request_action_unittest.cc index 765af4f4..e530de46 100644 --- a/omaha_request_action_unittest.cc +++ b/omaha_request_action_unittest.cc @@ -156,7 +156,7 @@ struct FakeUpdateResponse { version + "\">" "" + (multi_package ? "" From 975e86643939308559293ee1562a654cf874f259 Mon Sep 17 00:00:00 2001 From: Allen Webb Date: Thu, 14 May 2020 14:33:13 -0700 Subject: [PATCH 294/624] update_engine: Ignore Wdeprecated-declarations. Ignore Wdepercated-declarations for payload_consumer/delta_performer.cc and payload_consumer/payload_constants.cc until the usages of the deprecated declarations are removed. BUG=chromium:1082352 TEST=emerge update_engine with CL:2197715 Change-Id: I7d9ec55faaff169d50a51547e189c558edd21e21 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2203106 Tested-by: Allen Webb Commit-Queue: Allen Webb Reviewed-by: Amin Hassani --- BUILD.gn | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/BUILD.gn b/BUILD.gn index 3f2ea445..7961fe18 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -170,6 +170,10 @@ static_library("libpayload_consumer") { "rt", ] + # TODO(crbug.com/1082873): Remove after fixing usage of deprecated + # declarations. + cflags_cc = [ "-Wno-error=deprecated-declarations" ] + # TODO(deymo): Remove unused dependencies once we stop including files # from the root directory. all_dependent_pkg_deps = [ @@ -358,6 +362,10 @@ static_library("libpayload_generator") { ":libpayload_consumer", ":update_metadata-protos", ] + + # TODO(crbug.com/1082873): Remove after fixing usage of deprecated + # declarations. + cflags_cc = [ "-Wno-error=deprecated-declarations" ] } # server-side delta generator. From 3f894a81f130a9ba37e4bc710f5214b65981c6d2 Mon Sep 17 00:00:00 2001 From: Jae Hoon Kim Date: Wed, 20 May 2020 19:26:19 -0700 Subject: [PATCH 295/624] update_engine: Fix ubsan error for undefined-behavior The error: "runtime error: applying non-zero offset 224 to null pointer" Incrementing past the |std::vector::end()| is undefined behavior, hence we must check before assigning the |second| variable based off of |first| because |first| may be |std::vector::end()|. BUG=chromium:1067975 TEST=USE="${USE} ubsan" FEATURES=test P2_TEST_FILTER=*EmptyFilesystemTest*-*RunAsRoot* emerge-$B update_engine Change-Id: I9fb2a213464755c4cf115dc5ba3e658a927d0262 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2210979 Tested-by: Jae Hoon Kim Reviewed-by: Manoj Gupta Reviewed-by: Amin Hassani Commit-Queue: Jae Hoon Kim --- payload_generator/squashfs_filesystem.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/payload_generator/squashfs_filesystem.cc b/payload_generator/squashfs_filesystem.cc index 234a5878..eb4fda37 100644 --- a/payload_generator/squashfs_filesystem.cc +++ b/payload_generator/squashfs_filesystem.cc @@ -200,7 +200,8 @@ bool SquashfsFilesystem::Init(const string& map, // If there is any overlap between two consecutive extents, remove them. Here // we are assuming all files have exactly one extent. If this assumption // changes then this implementation needs to change too. - for (auto first = files_.begin(), second = first + 1; + for (auto first = files_.begin(), + second = first + (first == files_.end() ? 0 : 1); first != files_.end() && second != files_.end(); second = first + 1) { auto first_begin = first->extents[0].start_block(); From ad6719817e6dd198edc6922e74672e2262487b57 Mon Sep 17 00:00:00 2001 From: Miriam Polzer Date: Thu, 23 Apr 2020 16:25:58 +0200 Subject: [PATCH 296/624] update_engine: Powerwash based on version Powerwash after an update is currently enforced based on the channel: If the new channel is more stable, powerwash happens. This may cause unnecessary powerwash if the currently installed Chrome OS version is old enough so that a normal update (e.g. 12817.68.0 -> 12817.76.0) can take place. Additionally decide whether a powerwash should take place based on the new and old version number. Only powerwash if the new version number is older than old version number. BUG=chromium:1070563 TEST=FEATURES=test emerge-amd64-generic update_engine channel change and update on test device Change-Id: Ib211cf87711bde9f9c912395548124dcbb1194bb Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2162986 Tested-by: Miriam Polzer Commit-Queue: Miriam Polzer Reviewed-by: Amin Hassani Reviewed-by: Jae Hoon Kim --- omaha_request_params.h | 6 +- omaha_response_handler_action.cc | 21 +++++- omaha_response_handler_action_unittest.cc | 82 +++++++++++++++++++++-- 3 files changed, 100 insertions(+), 9 deletions(-) diff --git a/omaha_request_params.h b/omaha_request_params.h index d29ce70f..34529658 100644 --- a/omaha_request_params.h +++ b/omaha_request_params.h @@ -266,8 +266,10 @@ class OmahaRequestParams { // or Init is called again. virtual void UpdateDownloadChannel(); - // Returns whether we should powerwash for this update. - virtual bool ShouldPowerwash() const; + // Returns whether we should powerwash for this update. Note that this is + // just an indication, the final decision to powerwash or not is made in the + // response handler. + bool ShouldPowerwash() const; // Check if the provided update URL is official, meaning either the default // autoupdate server or the autoupdate autotest server. diff --git a/omaha_response_handler_action.cc b/omaha_response_handler_action.cc index c25b76f5..915e8392 100644 --- a/omaha_response_handler_action.cc +++ b/omaha_response_handler_action.cc @@ -21,6 +21,7 @@ #include #include +#include #include #include "update_engine/common/constants.h" @@ -183,8 +184,26 @@ void OmahaResponseHandlerAction::PerformAction() { params->rollback_data_save_requested(); } - if (response.powerwash_required || params->ShouldPowerwash()) + // Powerwash if either the response requires it or the parameters indicated + // powerwash and we are downgrading the version. + if (response.powerwash_required) { install_plan_.powerwash_required = true; + } else if (params->ShouldPowerwash()) { + base::Version new_version(response.version); + base::Version current_version(params->app_version()); + + if (!new_version.IsValid()) { + LOG(WARNING) << "Not powerwashing," + << " the update's version number is unreadable." + << " Update's version number: " << response.version; + } else if (!current_version.IsValid()) { + LOG(WARNING) << "Not powerwashing," + << " the current version number is unreadable." + << " Current version number: " << params->app_version(); + } else if (new_version < current_version) { + install_plan_.powerwash_required = true; + } + } TEST_AND_RETURN(HasOutputPipe()); if (HasOutputPipe()) diff --git a/omaha_response_handler_action_unittest.cc b/omaha_response_handler_action_unittest.cc index 0ebf8483..04cfa73e 100644 --- a/omaha_response_handler_action_unittest.cc +++ b/omaha_response_handler_action_unittest.cc @@ -430,10 +430,11 @@ TEST_F(OmahaResponseHandlerActionTest, HashChecksForBothHttpAndHttpsTest) { EXPECT_EQ(in.version, install_plan.version); } -TEST_F(OmahaResponseHandlerActionTest, ChangeToMoreStableChannelTest) { +TEST_F(OmahaResponseHandlerActionTest, + ChangeToMoreStableVersionAndChannelTest) { OmahaResponse in; in.update_exists = true; - in.version = "a.b.c.d"; + in.version = "1.0.0.0"; in.packages.push_back({.payload_urls = {"https://MoreStableChannelTest"}, .size = 1, .hash = kPayloadHashHex}); @@ -454,7 +455,7 @@ TEST_F(OmahaResponseHandlerActionTest, ChangeToMoreStableChannelTest) { #endif // __ANDROID__ EXPECT_TRUE(params.SetTargetChannel("stable-channel", true, nullptr)); params.UpdateDownloadChannel(); - EXPECT_TRUE(params.ShouldPowerwash()); + params.set_app_version("2.0.0.0"); fake_system_state_.set_request_params(¶ms); InstallPlan install_plan; @@ -462,10 +463,79 @@ TEST_F(OmahaResponseHandlerActionTest, ChangeToMoreStableChannelTest) { EXPECT_TRUE(install_plan.powerwash_required); } -TEST_F(OmahaResponseHandlerActionTest, ChangeToLessStableChannelTest) { +TEST_F(OmahaResponseHandlerActionTest, + ChangeToMoreStableVersionAndChannelPowerwashNotAllowedTest) { OmahaResponse in; in.update_exists = true; - in.version = "a.b.c.d"; + in.version = "1.0.0.0"; + in.packages.push_back({.payload_urls = {"https://MoreStableChannelTest"}, + .size = 1, + .hash = kPayloadHashHex}); + in.more_info_url = "http://more/info"; + + // Create a uniquely named test directory. + base::ScopedTempDir tempdir; + ASSERT_TRUE(tempdir.CreateUniqueTempDir()); + + OmahaRequestParams params(&fake_system_state_); + fake_system_state_.fake_hardware()->SetIsOfficialBuild(false); + params.set_root(tempdir.GetPath().value()); + params.set_current_channel("canary-channel"); + // The |ImageProperties| in Android uses prefs to store + // |MutableImageProperties|. +#ifdef __ANDROID__ + EXPECT_CALL(*fake_system_state_.mock_prefs(), SetBoolean(_, true)) + .WillOnce(Return(true)); +#endif // __ANDROID__ + EXPECT_TRUE(params.SetTargetChannel("stable-channel", false, nullptr)); + params.UpdateDownloadChannel(); + params.set_app_version("2.0.0.0"); + + fake_system_state_.set_request_params(¶ms); + InstallPlan install_plan; + EXPECT_TRUE(DoTest(in, "", &install_plan)); + EXPECT_FALSE(install_plan.powerwash_required); +} + +TEST_F(OmahaResponseHandlerActionTest, + ChangeToMoreStableChannelButNewerVersionTest) { + OmahaResponse in; + in.update_exists = true; + in.version = "12345.96.0.0"; + in.packages.push_back({.payload_urls = {"https://ChannelDownVersionUp"}, + .size = 1, + .hash = kPayloadHashHex}); + in.more_info_url = "http://more/info"; + + // Create a uniquely named test directory. + base::ScopedTempDir tempdir; + ASSERT_TRUE(tempdir.CreateUniqueTempDir()); + + OmahaRequestParams params(&fake_system_state_); + fake_system_state_.fake_hardware()->SetIsOfficialBuild(false); + params.set_root(tempdir.GetPath().value()); + params.set_current_channel("beta-channel"); + // The |ImageProperties| in Android uses prefs to store + // |MutableImageProperties|. +#ifdef __ANDROID__ + EXPECT_CALL(*fake_system_state_.mock_prefs(), SetBoolean(_, true)) + .WillOnce(Return(true)); +#endif // __ANDROID__ + EXPECT_TRUE(params.SetTargetChannel("stable-channel", true, nullptr)); + params.UpdateDownloadChannel(); + params.set_app_version("12345.48.0.0"); + + fake_system_state_.set_request_params(¶ms); + InstallPlan install_plan; + EXPECT_TRUE(DoTest(in, "", &install_plan)); + EXPECT_FALSE(install_plan.powerwash_required); +} + +TEST_F(OmahaResponseHandlerActionTest, + ChangeToLessStableVersionAndChannelTest) { + OmahaResponse in; + in.update_exists = true; + in.version = "2.0.0.0"; in.packages.push_back({.payload_urls = {"https://LessStableChannelTest"}, .size = 15, .hash = kPayloadHashHex}); @@ -486,7 +556,7 @@ TEST_F(OmahaResponseHandlerActionTest, ChangeToLessStableChannelTest) { #endif // __ANDROID__ EXPECT_TRUE(params.SetTargetChannel("canary-channel", false, nullptr)); params.UpdateDownloadChannel(); - EXPECT_FALSE(params.ShouldPowerwash()); + params.set_app_version("1.0.0.0"); fake_system_state_.set_request_params(¶ms); InstallPlan install_plan; From 74e47d938eb60c3ebbbd65e45fd2432abec3e68a Mon Sep 17 00:00:00 2001 From: Tatsuhisa Yamaguchi Date: Wed, 27 May 2020 05:32:16 +0000 Subject: [PATCH 297/624] update_engine: Fix header file dependency. generate_delta_main.cc in :delta_generator target includes a generated header file by protoc. The rule providing such header should be directly depended by the parent target, or done by public-dependency chain. BUG=chromium:1086795 TEST=run "gn gen --check" like done by crrev.com/c/2215951 TEST=USE="asan fuzzer" cros_workon_make --board=$BOARD update_engine Change-Id: Ic1d4f8f5ac5d205bca2212651b20f6f946f73736 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2217162 Tested-by: Tatsuhisa Yamaguchi Reviewed-by: Keigo Oka Reviewed-by: Amin Hassani Commit-Queue: Tatsuhisa Yamaguchi --- BUILD.gn | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/BUILD.gn b/BUILD.gn index 7961fe18..e438af46 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -182,7 +182,7 @@ static_library("libpayload_consumer") { "libpuffpatch", "xz-embedded", ] - deps = [ ":update_metadata-protos" ] + public_deps = [ ":update_metadata-protos" ] } # The main daemon static_library with all the code used to check for updates From e0176278ce4e1745e5ff669765f09320b0f02642 Mon Sep 17 00:00:00 2001 From: Simon Glass Date: Thu, 21 May 2020 14:17:49 -0600 Subject: [PATCH 298/624] update_engine: Update for new location for brillo processes These files have moved into a subdirectory. Update the code so they can still be found. BUG=chromium:1085464 TEST=emerge-nami update_engine Will also use pre-CQ Change-Id: I85d89b6efa960e77bb56f67ab432c734e8fea8ea Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2212760 Tested-by: Simon Glass Commit-Queue: Simon Glass Reviewed-by: Amin Hassani --- common/http_fetcher_unittest.cc | 2 +- common/subprocess.cc | 2 +- common/subprocess.h | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/common/http_fetcher_unittest.cc b/common/http_fetcher_unittest.cc index 589579e3..3ecb996f 100644 --- a/common/http_fetcher_unittest.cc +++ b/common/http_fetcher_unittest.cc @@ -37,7 +37,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/common/subprocess.cc b/common/subprocess.cc index 45dff923..ff37472f 100644 --- a/common/subprocess.cc +++ b/common/subprocess.cc @@ -32,7 +32,7 @@ #include #include #include -#include +#include #include #include "update_engine/common/utils.h" diff --git a/common/subprocess.h b/common/subprocess.h index 3eda8d50..e1a7ce33 100644 --- a/common/subprocess.h +++ b/common/subprocess.h @@ -30,8 +30,8 @@ #include #include #include -#include -#include +#include +#include #include // for FRIEND_TEST // The Subprocess class is a singleton. It's used to spawn off a subprocess From 4ed0512acc3a2f854256abac1a856aed705642d3 Mon Sep 17 00:00:00 2001 From: Alessio Balsini Date: Tue, 26 May 2020 22:17:03 +0100 Subject: [PATCH 299/624] Report retrofit and COW image size Add is_vab_retrofit (that tells if the device upgraded to Virtual A/B or was natively supporting the feature) and cow_file_size_bytes (the total size of the space allocated in /data for the dm-snapshot COW images). Bug: 154016862 Test: Manual OTA Signed-off-by: Alessio Balsini Change-Id: I529028bc208b7dd12df0bc28974f65b821d14cd9 --- cleanup_previous_update_action.cc | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/cleanup_previous_update_action.cc b/cleanup_previous_update_action.cc index e43730d3..88dbc57e 100644 --- a/cleanup_previous_update_action.cc +++ b/cleanup_previous_update_action.cc @@ -343,7 +343,9 @@ void CleanupPreviousUpdateAction::InitiateMergeAndWait() { return; } - if (snapshot_->InitiateMerge()) { + uint64_t cow_file_size; + if (snapshot_->InitiateMerge(&cow_file_size)) { + merge_stats_->set_cow_file_size(cow_file_size); WaitForMergeOrSchedule(); return; } @@ -399,14 +401,22 @@ void CleanupPreviousUpdateAction::ReportMergeStats() { auto passed_ms = std::chrono::duration_cast( result->merge_time()); + + bool vab_retrofit = boot_control_->GetDynamicPartitionControl() + ->GetVirtualAbFeatureFlag() + .IsRetrofit(); + LOG(INFO) << "Reporting merge stats: " << android::snapshot::UpdateState_Name(report.state()) << " in " << passed_ms.count() << "ms (resumed " << report.resume_count() - << " times)"; + << " times), using " << report.cow_file_size() + << " bytes of COW image."; android::util::stats_write(android::util::SNAPSHOT_MERGE_REPORTED, static_cast(report.state()), static_cast(passed_ms.count()), - static_cast(report.resume_count())); + static_cast(report.resume_count()), + vab_retrofit, + static_cast(report.cow_file_size())); #endif } From e2ad4d3bebd0d864400c5d6931dc8db2db3363f3 Mon Sep 17 00:00:00 2001 From: Alessio Balsini Date: Tue, 26 May 2020 22:18:09 +0100 Subject: [PATCH 300/624] Report super partition size, slot size and free space Add the following metrics to the update attempter: - super_partition_size_bytes: that tells the total size of the super partition, - slot_size_bytes: total size of all the partition groups in the current slot, and - super_free_space: available free space in the super partition. Bug: 154016862 Test: Manual OTA Signed-off-by: Alessio Balsini Change-Id: I159a31fb3b71a3ae4c6fb63c81c55635645cc82a --- metrics_reporter_android.cc | 53 ++++++++++++++++++++++++++++++++++++- 1 file changed, 52 insertions(+), 1 deletion(-) diff --git a/metrics_reporter_android.cc b/metrics_reporter_android.cc index 24740c8a..d8fa6e5b 100644 --- a/metrics_reporter_android.cc +++ b/metrics_reporter_android.cc @@ -22,10 +22,22 @@ #include #include +#include +#include +#include +#include +#include #include #include "update_engine/common/constants.h" +using android::fs_mgr::GetPartitionGroupName; +using android::fs_mgr::LpMetadata; +using android::fs_mgr::MetadataBuilder; +using android::fs_mgr::ReadMetadata; +using android::fs_mgr::SlotNumberForSlotSuffix; +using base::EndsWith; + namespace { // A number offset adds on top of the enum value. e.g. ErrorCode::SUCCESS will // be reported as 10000, and AttemptResult::UPDATE_CANCELED will be reported as @@ -58,6 +70,42 @@ void MetricsReporterAndroid::ReportUpdateAttemptMetrics( metrics::AttemptResult attempt_result, ErrorCode error_code) { int64_t payload_size_mib = payload_size / kNumBytesInOneMiB; + + int64_t super_partition_size_bytes = 0; + int64_t super_free_space = 0; + int64_t slot_size_bytes = 0; + + if (android::base::GetBoolProperty("ro.boot.dynamic_partitions", false)) { + uint32_t slot = SlotNumberForSlotSuffix(fs_mgr_get_slot_suffix()); + auto super_device = fs_mgr_get_super_partition_name(); + std::unique_ptr metadata = ReadMetadata(super_device, slot); + if (metadata) { + super_partition_size_bytes = GetTotalSuperPartitionSize(*metadata); + + for (const auto& group : metadata->groups) { + if (EndsWith(GetPartitionGroupName(group), + fs_mgr_get_slot_suffix(), + base::CompareCase::SENSITIVE)) { + slot_size_bytes += group.maximum_size; + } + } + + auto metadata_builder = MetadataBuilder::New(*metadata); + if (metadata_builder) { + auto free_regions = metadata_builder->GetFreeRegions(); + for (const auto& interval : free_regions) { + super_free_space += interval.length(); + } + super_free_space *= android::dm::kSectorSize; + } else { + LOG(ERROR) << "Cannot create metadata builder."; + } + } else { + LOG(ERROR) << "Could not read dynamic partition metadata for device: " + << super_device; + } + } + android::util::stats_write( android::util::UPDATE_ENGINE_UPDATE_ATTEMPT_REPORTED, attempt_number, @@ -67,7 +115,10 @@ void MetricsReporterAndroid::ReportUpdateAttemptMetrics( payload_size_mib, GetStatsdEnumValue(static_cast(attempt_result)), GetStatsdEnumValue(static_cast(error_code)), - android::base::GetProperty("ro.build.fingerprint", "").c_str()); + android::base::GetProperty("ro.build.fingerprint", "").c_str(), + super_partition_size_bytes, + slot_size_bytes, + super_free_space); } void MetricsReporterAndroid::ReportUpdateAttemptDownloadMetrics( From 2dfd35d8e912a8cb1a06cb3c9e629bf58deefa87 Mon Sep 17 00:00:00 2001 From: Jae Hoon Kim Date: Tue, 2 Jun 2020 10:53:13 -0700 Subject: [PATCH 301/624] update_engine: Log when EOL date is set + retrieved autoupdate_EOL autotests seems to fail and return a default EOL date value of -9999. This logging helps pinpoint whether prefs are the culprit. BUG=chromium:1090283 TEST=FEATURES=test emerge-$B update_engine update_engine-client Change-Id: If2feb4841d2642af89dac94e699a30a7ee2fb002 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2227031 Tested-by: Jae Hoon Kim Reviewed-by: Amin Hassani Auto-Submit: Jae Hoon Kim Commit-Queue: Jae Hoon Kim --- omaha_request_action.cc | 12 +++++++----- update_attempter.cc | 4 +++- update_attempter_unittest.cc | 14 ++++++++++++-- 3 files changed, 22 insertions(+), 8 deletions(-) diff --git a/omaha_request_action.cc b/omaha_request_action.cc index 8728f72c..86d4b93f 100644 --- a/omaha_request_action.cc +++ b/omaha_request_action.cc @@ -1366,11 +1366,13 @@ bool OmahaRequestAction::PersistEolInfo(const map& attrs) { // If EOL date attribute is not sent, don't delete the old persisted EOL // date information. auto eol_date_attr = attrs.find(kAttrEolDate); - if (eol_date_attr != attrs.end() && - !system_state_->prefs()->SetString(kPrefsOmahaEolDate, - eol_date_attr->second)) { - LOG(ERROR) << "Setting EOL date failed."; - return false; + if (eol_date_attr != attrs.end()) { + const auto& eol_date = eol_date_attr->second; + if (!system_state_->prefs()->SetString(kPrefsOmahaEolDate, eol_date)) { + LOG(ERROR) << "Setting EOL date failed."; + return false; + } + LOG(INFO) << "Set EOL date to " << eol_date; } return true; } diff --git a/update_attempter.cc b/update_attempter.cc index 52561929..60c2c36c 100644 --- a/update_attempter.cc +++ b/update_attempter.cc @@ -1505,7 +1505,9 @@ bool UpdateAttempter::GetStatus(UpdateEngineStatus* out_status) { out_status->is_install = is_install_; string str_eol_date; - system_state_->prefs()->GetString(kPrefsOmahaEolDate, &str_eol_date); + if (system_state_->prefs()->Exists(kPrefsOmahaEolDate) && + !system_state_->prefs()->GetString(kPrefsOmahaEolDate, &str_eol_date)) + LOG(ERROR) << "Failed to retrieve kPrefsOmahaEolDate pref."; out_status->eol_date = StringToEolDate(str_eol_date); // A powerwash will take place either if the install plan says it is required diff --git a/update_attempter_unittest.cc b/update_attempter_unittest.cc index 745bcc2f..0086dd5d 100644 --- a/update_attempter_unittest.cc +++ b/update_attempter_unittest.cc @@ -2343,6 +2343,7 @@ TEST_F(UpdateAttempterTest, PowerwashInGetStatusTrueBecauseRollback) { TEST_F(UpdateAttempterTest, FutureEolTest) { EolDate eol_date = std::numeric_limits::max(); + EXPECT_CALL(*prefs_, Exists(kPrefsOmahaEolDate)).WillOnce(Return(true)); EXPECT_CALL(*prefs_, GetString(kPrefsOmahaEolDate, _)) .WillOnce( DoAll(SetArgPointee<1>(EolDateToString(eol_date)), Return(true))); @@ -2354,6 +2355,7 @@ TEST_F(UpdateAttempterTest, FutureEolTest) { TEST_F(UpdateAttempterTest, PastEolTest) { EolDate eol_date = 1; + EXPECT_CALL(*prefs_, Exists(kPrefsOmahaEolDate)).WillOnce(Return(true)); EXPECT_CALL(*prefs_, GetString(kPrefsOmahaEolDate, _)) .WillOnce( DoAll(SetArgPointee<1>(EolDateToString(eol_date)), Return(true))); @@ -2364,13 +2366,21 @@ TEST_F(UpdateAttempterTest, PastEolTest) { } TEST_F(UpdateAttempterTest, FailedEolTest) { - EolDate eol_date = kEolDateInvalid; + EXPECT_CALL(*prefs_, Exists(kPrefsOmahaEolDate)).WillOnce(Return(true)); EXPECT_CALL(*prefs_, GetString(kPrefsOmahaEolDate, _)) .WillOnce(Return(false)); UpdateEngineStatus status; attempter_.GetStatus(&status); - EXPECT_EQ(eol_date, status.eol_date); + EXPECT_EQ(kEolDateInvalid, status.eol_date); +} + +TEST_F(UpdateAttempterTest, MissingEolTest) { + EXPECT_CALL(*prefs_, Exists(kPrefsOmahaEolDate)).WillOnce(Return(false)); + + UpdateEngineStatus status; + attempter_.GetStatus(&status); + EXPECT_EQ(kEolDateInvalid, status.eol_date); } TEST_F(UpdateAttempterTest, CalculateDlcParamsInstallTest) { From 550ade4e44a72708354597307122a3f46e43a7d6 Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Tue, 2 Jun 2020 15:08:37 -0700 Subject: [PATCH 302/624] Fix update_engine libchrome log tag. Test: boot and inspect logcat Fixes: 158030597 Change-Id: I931af4faf6219ce42ff94080ea834721726dc0ba --- logging_android.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/logging_android.cc b/logging_android.cc index 88b068bc..0219075c 100644 --- a/logging_android.cc +++ b/logging_android.cc @@ -241,8 +241,8 @@ bool RedirectToLiblog(int severity, ignore_result(android::base::ConsumeSuffix(&sv, "\n")); std::string str(sv.data(), sv.size()); // This will eventually be redirected to CombinedLogger. - // |tag| is ignored by CombinedLogger, so just leave it empty. - __android_log_write(priority, "" /* tag */, str.c_str()); + // Use nullptr as tag so that liblog infers log tag from getprogname(). + __android_log_write(priority, nullptr /* tag */, str.c_str()); return true; } From ddf27738af372b335c85ee5cf8a7d9fcab4cd7cf Mon Sep 17 00:00:00 2001 From: "P.Adarsh Reddy" Date: Mon, 8 Jun 2020 23:17:36 +0530 Subject: [PATCH 303/624] Check for system_other existence in recovery mode as well. This removes the recovery mode conditional, to ensure we check for the existence of system_other in recovery mode as well, before going ahead with performing avb related operations on system_other. Bug: 155053192 Test: adb sideload of OTA zip in recovery mode now passes on a device which doesn't have system_other enabled. Change-Id: I1884755aad2a8d37f540dbee73c7c7baab2759e7 Merged-In: I1884755aad2a8d37f540dbee73c7c7baab2759e7 --- dynamic_partition_control_android.cc | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc index 5ed604ab..ecd6252d 100644 --- a/dynamic_partition_control_android.cc +++ b/dynamic_partition_control_android.cc @@ -570,18 +570,15 @@ bool DynamicPartitionControlAndroid::GetSystemOtherPath( path->clear(); *should_unmap = false; - // In recovery, just erase no matter what. - // - On devices with retrofit dynamic partitions, no logical partitions - // should be mounted at this point. Hence it should be safe to erase. - // Otherwise, do check that AVB is enabled on system_other before erasing. - if (!IsRecovery()) { - auto has_avb = IsAvbEnabledOnSystemOther(); - TEST_AND_RETURN_FALSE(has_avb.has_value()); - if (!has_avb.value()) { - LOG(INFO) << "AVB is not enabled on system_other. Skip erasing."; - return true; - } + // Check that AVB is enabled on system_other before erasing. + auto has_avb = IsAvbEnabledOnSystemOther(); + TEST_AND_RETURN_FALSE(has_avb.has_value()); + if (!has_avb.value()) { + LOG(INFO) << "AVB is not enabled on system_other. Skip erasing."; + return true; + } + if (!IsRecovery()) { // Found unexpected avb_keys for system_other on devices retrofitting // dynamic partitions. Previous crash in update_engine may leave logical // partitions mapped on physical system_other partition. It is difficult to From 51a5a39c892f11ce30f1c537133432f5e67525e0 Mon Sep 17 00:00:00 2001 From: Tianjie Date: Wed, 3 Jun 2020 14:39:32 -0700 Subject: [PATCH 304/624] Add an overload function in boot control It's used to support partial update. The overload function takes additional info whether the partition is included in payload. And it also outputs if the partition is a dynamic partition. Bug: 157778739 Test: unit tests pass Change-Id: I0741d44c223fb7c187fe208564371acd6d868c65 --- boot_control_android.cc | 18 +++++++++++++--- boot_control_android.h | 5 +++++ boot_control_chromeos.cc | 17 ++++++++++++--- boot_control_chromeos.h | 5 +++++ common/boot_control_interface.h | 14 +++++++++++-- common/boot_control_stub.cc | 9 ++++++++ common/boot_control_stub.h | 5 +++++ common/fake_boot_control.h | 10 ++++++++- dynamic_partition_control_android.cc | 31 ++++++++++++++++++++++++---- dynamic_partition_control_android.h | 8 +++++++ 10 files changed, 109 insertions(+), 13 deletions(-) diff --git a/boot_control_android.cc b/boot_control_android.cc index ec2ca0f8..dee5fa84 100644 --- a/boot_control_android.cc +++ b/boot_control_android.cc @@ -82,12 +82,24 @@ BootControlInterface::Slot BootControlAndroid::GetCurrentSlot() const { return module_->getCurrentSlot(); } +bool BootControlAndroid::GetPartitionDevice(const std::string& partition_name, + BootControlInterface::Slot slot, + bool not_in_payload, + std::string* device, + bool* is_dynamic) const { + return dynamic_control_->GetPartitionDevice(partition_name, + slot, + GetCurrentSlot(), + not_in_payload, + device, + is_dynamic); +} bool BootControlAndroid::GetPartitionDevice(const string& partition_name, - Slot slot, + BootControlInterface::Slot slot, string* device) const { - return dynamic_control_->GetPartitionDevice( - partition_name, slot, GetCurrentSlot(), device); + return GetPartitionDevice( + partition_name, slot, false /* not_in_payload */, device, nullptr); } bool BootControlAndroid::IsSlotBootable(Slot slot) const { diff --git a/boot_control_android.h b/boot_control_android.h index 0b042e3d..5009dbd3 100644 --- a/boot_control_android.h +++ b/boot_control_android.h @@ -44,6 +44,11 @@ class BootControlAndroid : public BootControlInterface { // BootControlInterface overrides. unsigned int GetNumSlots() const override; BootControlInterface::Slot GetCurrentSlot() const override; + bool GetPartitionDevice(const std::string& partition_name, + BootControlInterface::Slot slot, + bool not_in_payload, + std::string* device, + bool* is_dynamic) const override; bool GetPartitionDevice(const std::string& partition_name, BootControlInterface::Slot slot, std::string* device) const override; diff --git a/boot_control_chromeos.cc b/boot_control_chromeos.cc index 0f471696..da84e992 100644 --- a/boot_control_chromeos.cc +++ b/boot_control_chromeos.cc @@ -148,9 +148,11 @@ BootControlInterface::Slot BootControlChromeOS::GetCurrentSlot() const { return current_slot_; } -bool BootControlChromeOS::GetPartitionDevice(const string& partition_name, - unsigned int slot, - string* device) const { +bool BootControlChromeOS::GetPartitionDevice(const std::string& partition_name, + BootControlInterface::Slot slot, + bool not_in_payload, + std::string* device, + bool* is_dynamic) const { // Partition name prefixed with |kPartitionNamePrefixDlc| is a DLC module. if (base::StartsWith(partition_name, kPartitionNamePrefixDlc, @@ -180,9 +182,18 @@ bool BootControlChromeOS::GetPartitionDevice(const string& partition_name, return false; *device = part_device; + if (is_dynamic) { + *is_dynamic = false; + } return true; } +bool BootControlChromeOS::GetPartitionDevice(const string& partition_name, + BootControlInterface::Slot slot, + string* device) const { + return GetPartitionDevice(partition_name, slot, false, device, nullptr); +} + bool BootControlChromeOS::IsSlotBootable(Slot slot) const { int partition_num = GetPartitionNumber(kChromeOSPartitionNameKernel, slot); if (partition_num < 0) diff --git a/boot_control_chromeos.h b/boot_control_chromeos.h index 02090522..6edc1480 100644 --- a/boot_control_chromeos.h +++ b/boot_control_chromeos.h @@ -45,6 +45,11 @@ class BootControlChromeOS : public BootControlInterface { // BootControlInterface overrides. unsigned int GetNumSlots() const override; BootControlInterface::Slot GetCurrentSlot() const override; + bool GetPartitionDevice(const std::string& partition_name, + BootControlInterface::Slot slot, + bool not_in_payload, + std::string* device, + bool* is_dynamic) const override; bool GetPartitionDevice(const std::string& partition_name, BootControlInterface::Slot slot, std::string* device) const override; diff --git a/common/boot_control_interface.h b/common/boot_control_interface.h index 3906e2f5..c93de5c5 100644 --- a/common/boot_control_interface.h +++ b/common/boot_control_interface.h @@ -59,8 +59,18 @@ class BootControlInterface { // every slot. In order to access the dynamic partitions in the target slot, // GetDynamicPartitionControl()->PreparePartitionsForUpdate() must be called // (with |update| == true for the first time for a payload, and |false| for - // for the rest of the times) prior to calling this function. On success, - // returns true and stores the block device in |device|. + // for the rest of the times) prior to calling this function. + // The handling may be different based on whether the partition is included + // in the update payload. On success, returns true; and stores the block + // device in |device|, if the partition is dynamic in |is_dynamic|. + virtual bool GetPartitionDevice(const std::string& partition_name, + Slot slot, + bool not_in_payload, + std::string* device, + bool* is_dynamic) const = 0; + + // Overload of the above function. We assume the partition is always included + // in the payload. virtual bool GetPartitionDevice(const std::string& partition_name, Slot slot, std::string* device) const = 0; diff --git a/common/boot_control_stub.cc b/common/boot_control_stub.cc index 2eb92116..907f6707 100644 --- a/common/boot_control_stub.cc +++ b/common/boot_control_stub.cc @@ -35,6 +35,15 @@ BootControlInterface::Slot BootControlStub::GetCurrentSlot() const { return 0; } +bool BootControlStub::GetPartitionDevice(const std::string& partition_name, + BootControlInterface::Slot slot, + bool not_in_payload, + std::string* device, + bool* is_dynamic) const { + LOG(ERROR) << __FUNCTION__ << " should never be called."; + return false; +} + bool BootControlStub::GetPartitionDevice(const string& partition_name, Slot slot, string* device) const { diff --git a/common/boot_control_stub.h b/common/boot_control_stub.h index cc161902..a1bdb965 100644 --- a/common/boot_control_stub.h +++ b/common/boot_control_stub.h @@ -40,6 +40,11 @@ class BootControlStub : public BootControlInterface { // BootControlInterface overrides. unsigned int GetNumSlots() const override; BootControlInterface::Slot GetCurrentSlot() const override; + bool GetPartitionDevice(const std::string& partition_name, + Slot slot, + bool not_in_payload, + std::string* device, + bool* is_dynamic) const override; bool GetPartitionDevice(const std::string& partition_name, BootControlInterface::Slot slot, std::string* device) const override; diff --git a/common/fake_boot_control.h b/common/fake_boot_control.h index bd9d9ca8..adbacd67 100644 --- a/common/fake_boot_control.h +++ b/common/fake_boot_control.h @@ -48,7 +48,9 @@ class FakeBootControl : public BootControlInterface { bool GetPartitionDevice(const std::string& partition_name, BootControlInterface::Slot slot, - std::string* device) const override { + bool not_in_payload, + std::string* device, + bool* is_dynamic) const override { if (slot >= num_slots_) return false; auto part_it = devices_[slot].find(partition_name); @@ -58,6 +60,12 @@ class FakeBootControl : public BootControlInterface { return true; } + bool GetPartitionDevice(const std::string& partition_name, + BootControlInterface::Slot slot, + std::string* device) const override { + return GetPartitionDevice(partition_name, slot, false, device, nullptr); + } + bool IsSlotBootable(BootControlInterface::Slot slot) const override { return slot < num_slots_ && is_bootable_[slot]; } diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc index a9c2bb3b..829e3eb6 100644 --- a/dynamic_partition_control_android.cc +++ b/dynamic_partition_control_android.cc @@ -886,22 +886,35 @@ bool DynamicPartitionControlAndroid::GetPartitionDevice( const std::string& partition_name, uint32_t slot, uint32_t current_slot, - std::string* device) { + bool not_in_payload, + std::string* device, + bool* is_dynamic) { const auto& partition_name_suffix = partition_name + SlotSuffixForSlotNumber(slot); std::string device_dir_str; TEST_AND_RETURN_FALSE(GetDeviceDir(&device_dir_str)); base::FilePath device_dir(device_dir_str); + if (is_dynamic) { + *is_dynamic = false; + } + // When looking up target partition devices, treat them as static if the // current payload doesn't encode them as dynamic partitions. This may happen // when applying a retrofit update on top of a dynamic-partitions-enabled // build. if (GetDynamicPartitionsFeatureFlag().IsEnabled() && (slot == current_slot || is_target_dynamic_)) { - switch (GetDynamicPartitionDevice( - device_dir, partition_name_suffix, slot, current_slot, device)) { + switch (GetDynamicPartitionDevice(device_dir, + partition_name_suffix, + slot, + current_slot, + not_in_payload, + device)) { case DynamicPartitionDeviceStatus::SUCCESS: + if (is_dynamic) { + *is_dynamic = true; + } return true; case DynamicPartitionDeviceStatus::TRY_STATIC: break; @@ -920,6 +933,15 @@ bool DynamicPartitionControlAndroid::GetPartitionDevice( return true; } +bool DynamicPartitionControlAndroid::GetPartitionDevice( + const std::string& partition_name, + uint32_t slot, + uint32_t current_slot, + std::string* device) { + return GetPartitionDevice( + partition_name, slot, current_slot, false, device, nullptr); +} + bool DynamicPartitionControlAndroid::IsSuperBlockDevice( const base::FilePath& device_dir, uint32_t current_slot, @@ -936,6 +958,7 @@ DynamicPartitionControlAndroid::GetDynamicPartitionDevice( const std::string& partition_name_suffix, uint32_t slot, uint32_t current_slot, + bool not_in_payload, std::string* device) { std::string super_device = device_dir.Append(GetSuperPartitionName(slot)).value(); @@ -975,7 +998,7 @@ DynamicPartitionControlAndroid::GetDynamicPartitionDevice( } } - bool force_writable = slot != current_slot; + bool force_writable = (slot != current_slot) && !not_in_payload; if (MapPartitionOnDeviceMapper( super_device, partition_name_suffix, slot, force_writable, device)) { return DynamicPartitionDeviceStatus::SUCCESS; diff --git a/dynamic_partition_control_android.h b/dynamic_partition_control_android.h index 18a05fb2..e3bedbcb 100644 --- a/dynamic_partition_control_android.h +++ b/dynamic_partition_control_android.h @@ -58,6 +58,13 @@ class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { // Note: this function is only used by BootControl*::GetPartitionDevice. // Other callers should prefer BootControl*::GetPartitionDevice over // BootControl*::GetDynamicPartitionControl()->GetPartitionDevice(). + bool GetPartitionDevice(const std::string& partition_name, + uint32_t slot, + uint32_t current_slot, + bool not_in_payload, + std::string* device, + bool* is_dynamic); + bool GetPartitionDevice(const std::string& partition_name, uint32_t slot, uint32_t current_slot, @@ -222,6 +229,7 @@ class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { const std::string& partition_name_suffix, uint32_t slot, uint32_t current_slot, + bool not_in_payload, std::string* device); // Return true if |partition_name_suffix| is a block device of From 13e4195d65fb8a5583b7acb9cc0763733e8fca86 Mon Sep 17 00:00:00 2001 From: "P.Adarsh Reddy" Date: Mon, 8 Jun 2020 23:17:36 +0530 Subject: [PATCH 305/624] Check for system_other existence in recovery mode as well. This removes the recovery mode conditional, to ensure we check for the existence of system_other in recovery mode as well, before going ahead with performing avb related operations on system_other. Bug: 155053192 Test: adb sideload of OTA zip in recovery mode now passes on a device which doesn't have system_other enabled. Change-Id: I1884755aad2a8d37f540dbee73c7c7baab2759e7 --- dynamic_partition_control_android.cc | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc index 829e3eb6..79c269c4 100644 --- a/dynamic_partition_control_android.cc +++ b/dynamic_partition_control_android.cc @@ -579,18 +579,15 @@ bool DynamicPartitionControlAndroid::GetSystemOtherPath( path->clear(); *should_unmap = false; - // In recovery, just erase no matter what. - // - On devices with retrofit dynamic partitions, no logical partitions - // should be mounted at this point. Hence it should be safe to erase. - // Otherwise, do check that AVB is enabled on system_other before erasing. - if (!IsRecovery()) { - auto has_avb = IsAvbEnabledOnSystemOther(); - TEST_AND_RETURN_FALSE(has_avb.has_value()); - if (!has_avb.value()) { - LOG(INFO) << "AVB is not enabled on system_other. Skip erasing."; - return true; - } + // Check that AVB is enabled on system_other before erasing. + auto has_avb = IsAvbEnabledOnSystemOther(); + TEST_AND_RETURN_FALSE(has_avb.has_value()); + if (!has_avb.value()) { + LOG(INFO) << "AVB is not enabled on system_other. Skip erasing."; + return true; + } + if (!IsRecovery()) { // Found unexpected avb_keys for system_other on devices retrofitting // dynamic partitions. Previous crash in update_engine may leave logical // partitions mapped on physical system_other partition. It is difficult to From d3865d1bc4298739652ee101d1b625fb12361f9b Mon Sep 17 00:00:00 2001 From: Tianjie Date: Wed, 3 Jun 2020 15:25:17 -0700 Subject: [PATCH 306/624] Add PartitionUpdateGenerator For a/b partial update, the payload doesn't contain all partitions for the device to boot up. Therefore, we add a new class to generate additional operations on top of the partial update. The class is used in payload consumer when the install plan is created. In specific, the new class parses for the a/b partitions not included in the payload. Then, for static partitions, it generates SOURCE_COPY operations to copy the bytes from the source slot to target slot. For dynamic partitions, it only calculates the partition hash for the filesystem verification later. Bug: 157778739 Test: do a partial OTA Change-Id: Ia5c64cff4655aad05311b0e7c8c8327bc2f4fd91 --- Android.bp | 1 + payload_consumer/delta_performer.cc | 70 +++++++++++++------ .../partition_update_generator_android.cc | 41 +++++++++++ .../partition_update_generator_android.h | 46 ++++++++++++ .../partition_update_generator_interface.h | 55 +++++++++++++++ .../partition_update_generator_stub.cc | 38 ++++++++++ .../partition_update_generator_stub.h | 40 +++++++++++ update_metadata.proto | 3 + 8 files changed, 274 insertions(+), 20 deletions(-) create mode 100644 payload_consumer/partition_update_generator_android.cc create mode 100644 payload_consumer/partition_update_generator_android.h create mode 100644 payload_consumer/partition_update_generator_interface.h create mode 100644 payload_consumer/partition_update_generator_stub.cc create mode 100644 payload_consumer/partition_update_generator_stub.h diff --git a/Android.bp b/Android.bp index 3287b7b4..59d698d8 100644 --- a/Android.bp +++ b/Android.bp @@ -184,6 +184,7 @@ cc_library_static { "payload_consumer/verity_writer_android.cc", "payload_consumer/xz_extent_writer.cc", "payload_consumer/fec_file_descriptor.cc", + "payload_consumer/partition_update_generator_android.cc", ], } diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc index 4c4ff041..d1de9f4d 100644 --- a/payload_consumer/delta_performer.cc +++ b/payload_consumer/delta_performer.cc @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -50,6 +51,7 @@ #include "update_engine/payload_consumer/download_action.h" #include "update_engine/payload_consumer/extent_reader.h" #include "update_engine/payload_consumer/extent_writer.h" +#include "update_engine/payload_consumer/partition_update_generator_interface.h" #if USE_FEC #include "update_engine/payload_consumer/fec_file_descriptor.h" #endif // USE_FEC @@ -357,12 +359,15 @@ bool DeltaPerformer::OpenCurrentPartition() { install_plan_->partitions.size() - partitions_.size(); const InstallPlan::Partition& install_part = install_plan_->partitions[num_previous_partitions + current_partition_]; - // Open source fds if we have a delta payload with minor version >= 2. - if (payload_->type == InstallPayloadType::kDelta && - GetMinorVersion() != kInPlaceMinorPayloadVersion && - // With dynamic partitions we could create a new partition in a - // delta payload, and we shouldn't open source partition in that case. - install_part.source_size > 0) { + // Open source fds if we have a delta payload with minor version >= 2, or for + // partitions in the partial update. + bool source_may_exist = manifest_.partial_update() || + (payload_->type == InstallPayloadType::kDelta && + GetMinorVersion() != kInPlaceMinorPayloadVersion); + // We shouldn't open the source partition in certain cases, e.g. some dynamic + // partitions in delta payload, partitions included in the full payload for + // partial updates. Use the source size as the indicator. + if (source_may_exist && install_part.source_size > 0) { source_path_ = install_part.source_path; int err; source_fd_ = OpenFile(source_path_.c_str(), O_RDONLY, false, &err); @@ -851,6 +856,41 @@ bool DeltaPerformer::ParseManifestPartitions(ErrorCode* error) { partitions_.push_back(std::move(kern_part)); } + // For VAB and partial updates, the partition preparation will copy the + // dynamic partitions metadata to the target metadata slot, and rename the + // slot suffix of the partitions in the metadata. + if (install_plan_->target_slot != BootControlInterface::kInvalidSlot) { + uint64_t required_size = 0; + if (!PreparePartitionsForUpdate(&required_size)) { + if (required_size > 0) { + *error = ErrorCode::kNotEnoughSpace; + } else { + *error = ErrorCode::kInstallDeviceOpenError; + } + return false; + } + } + + // TODO(xunchang) TBD: allow partial update only on devices with dynamic + // partition. + if (manifest_.partial_update()) { + std::set touched_partitions; + for (const auto& partition_update : partitions_) { + touched_partitions.insert(partition_update.partition_name()); + } + + auto generator = partition_update_generator::Create(boot_control_); + std::vector other_partitions; + TEST_AND_RETURN_FALSE( + generator->GenerateOperationsForPartitionsNotInPayload( + install_plan_->source_slot, + install_plan_->target_slot, + touched_partitions, + &other_partitions)); + partitions_.insert( + partitions_.end(), other_partitions.begin(), other_partitions.end()); + } + // Fill in the InstallPlan::partitions based on the partitions from the // payload. for (const auto& partition : partitions_) { @@ -924,22 +964,13 @@ bool DeltaPerformer::ParseManifestPartitions(ErrorCode* error) { install_plan_->partitions.push_back(install_part); } - if (install_plan_->target_slot != BootControlInterface::kInvalidSlot) { - uint64_t required_size = 0; - if (!PreparePartitionsForUpdate(&required_size)) { - if (required_size > 0) { - *error = ErrorCode::kNotEnoughSpace; - } else { - *error = ErrorCode::kInstallDeviceOpenError; - } - return false; - } - } - if (major_payload_version_ == kBrilloMajorPayloadVersion) { manifest_.clear_partitions(); } + // TODO(xunchang) only need to load the partitions for those in payload. + // Because we have already loaded the other once when generating SOURCE_COPY + // operations. if (!install_plan_->LoadPartitionsFromSlots(boot_control_)) { LOG(ERROR) << "Unable to determine all the partition devices."; *error = ErrorCode::kInstallDeviceOpenError; @@ -1712,7 +1743,6 @@ DeltaPerformer::CreatePayloadVerifier() { ErrorCode DeltaPerformer::ValidateManifest() { // Perform assorted checks to sanity check the manifest, make sure it // matches data from other sources, and that it is a supported version. - bool has_old_fields = (manifest_.has_old_kernel_info() || manifest_.has_old_rootfs_info()); for (const PartitionUpdate& partition : manifest_.partitions()) { @@ -1737,8 +1767,8 @@ ErrorCode DeltaPerformer::ValidateManifest() { << "' payload."; return ErrorCode::kPayloadMismatchedType; } - // Check that the minor version is compatible. + // TODO(xunchang) increment minor version & add check for partial update if (actual_payload_type == InstallPayloadType::kFull) { if (manifest_.minor_version() != kFullPayloadMinorVersion) { LOG(ERROR) << "Manifest contains minor version " diff --git a/payload_consumer/partition_update_generator_android.cc b/payload_consumer/partition_update_generator_android.cc new file mode 100644 index 00000000..fcacc86d --- /dev/null +++ b/payload_consumer/partition_update_generator_android.cc @@ -0,0 +1,41 @@ +// +// Copyright (C) 2020 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include "update_engine/payload_consumer/partition_update_generator_android.h" + +#include + +namespace chromeos_update_engine { + +bool PartitionUpdateGeneratorAndroid:: + GenerateOperationsForPartitionsNotInPayload( + BootControlInterface::Slot source_slot, + BootControlInterface::Slot target_slot, + const std::set& partitions_in_payload, + std::vector* update_list) { + // TODO(xunchang) implement the function + CHECK(boot_control_); + return true; +} + +namespace partition_update_generator { +std::unique_ptr Create( + BootControlInterface* boot_control) { + return std::make_unique(boot_control); +} +} // namespace partition_update_generator + +} // namespace chromeos_update_engine diff --git a/payload_consumer/partition_update_generator_android.h b/payload_consumer/partition_update_generator_android.h new file mode 100644 index 00000000..bb50133a --- /dev/null +++ b/payload_consumer/partition_update_generator_android.h @@ -0,0 +1,46 @@ +// +// Copyright (C) 2020 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_PARTITION_UPDATE_GENERATOR_ANDROID_H_ +#define UPDATE_ENGINE_PAYLOAD_CONSUMER_PARTITION_UPDATE_GENERATOR_ANDROID_H_ + +#include +#include +#include + +#include "update_engine/common/boot_control_interface.h" +#include "update_engine/payload_consumer/partition_update_generator_interface.h" + +namespace chromeos_update_engine { +class PartitionUpdateGeneratorAndroid + : public PartitionUpdateGeneratorInterface { + public: + explicit PartitionUpdateGeneratorAndroid(BootControlInterface* boot_control) + : boot_control_(boot_control) {} + + bool GenerateOperationsForPartitionsNotInPayload( + BootControlInterface::Slot source_slot, + BootControlInterface::Slot target_slot, + const std::set& partitions_in_payload, + std::vector* update_list) override; + + private: + BootControlInterface* boot_control_; +}; + +} // namespace chromeos_update_engine + +#endif diff --git a/payload_consumer/partition_update_generator_interface.h b/payload_consumer/partition_update_generator_interface.h new file mode 100644 index 00000000..0341d40b --- /dev/null +++ b/payload_consumer/partition_update_generator_interface.h @@ -0,0 +1,55 @@ +// +// Copyright (C) 2020 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_PARTITION_UPDATE_GENERATOR_INTERFACE_H_ +#define UPDATE_ENGINE_PAYLOAD_CONSUMER_PARTITION_UPDATE_GENERATOR_INTERFACE_H_ + +#include +#include +#include +#include + +#include "update_engine/common/boot_control_interface.h" + +namespace chromeos_update_engine { +class PartitionUpdate; + +// This class parses the partitions that are not included in the payload of a +// partial A/B update. And it generates additional operations for these +// partitions to make the update complete. +class PartitionUpdateGeneratorInterface { + public: + virtual ~PartitionUpdateGeneratorInterface() = default; + + // Adds PartitionUpdate for partitions not included in the payload. For static + // partitions, it generates SOURCE_COPY operations to copy the bytes from the + // source slot to target slot. For dynamic partitions, it only calculates the + // partition hash for the filesystem verification later. + virtual bool GenerateOperationsForPartitionsNotInPayload( + BootControlInterface::Slot source_slot, + BootControlInterface::Slot target_slot, + const std::set& partitions_in_payload, + std::vector* update_list) = 0; +}; + +namespace partition_update_generator { +std::unique_ptr Create( + BootControlInterface* boot_control); +} + +} // namespace chromeos_update_engine + +#endif diff --git a/payload_consumer/partition_update_generator_stub.cc b/payload_consumer/partition_update_generator_stub.cc new file mode 100644 index 00000000..e2b64ec5 --- /dev/null +++ b/payload_consumer/partition_update_generator_stub.cc @@ -0,0 +1,38 @@ +// +// Copyright (C) 2020 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include "update_engine/payload_consumer/partition_update_generator_stub.h" + +#include + +namespace chromeos_update_engine { + +bool PartitionUpdateGeneratorStub::GenerateOperationsForPartitionsNotInPayload( + chromeos_update_engine::BootControlInterface::Slot source_slot, + chromeos_update_engine::BootControlInterface::Slot target_slot, + const std::set& partitions_in_payload, + std::vector* update_list) { + return true; +} + +namespace partition_update_generator { +std::unique_ptr Create( + BootControlInterface* boot_control) { + return std::make_unique(); +} +} // namespace partition_update_generator + +} // namespace chromeos_update_engine diff --git a/payload_consumer/partition_update_generator_stub.h b/payload_consumer/partition_update_generator_stub.h new file mode 100644 index 00000000..282875ef --- /dev/null +++ b/payload_consumer/partition_update_generator_stub.h @@ -0,0 +1,40 @@ +// +// Copyright (C) 2020 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_PARTITION_UPDATE_GENERATOR_STUB_H_ +#define UPDATE_ENGINE_PAYLOAD_CONSUMER_PARTITION_UPDATE_GENERATOR_STUB_H_ + +#include +#include +#include + +#include "update_engine/common/boot_control_interface.h" +#include "update_engine/payload_consumer/partition_update_generator_interface.h" + +namespace chromeos_update_engine { +class PartitionUpdateGeneratorStub : public PartitionUpdateGeneratorInterface { + public: + PartitionUpdateGeneratorStub() = default; + bool GenerateOperationsForPartitionsNotInPayload( + BootControlInterface::Slot source_slot, + BootControlInterface::Slot target_slot, + const std::set& partitions_in_payload, + std::vector* update_list) override; +}; + +} // namespace chromeos_update_engine + +#endif diff --git a/update_metadata.proto b/update_metadata.proto index 9bc0d8a5..4b4c327a 100644 --- a/update_metadata.proto +++ b/update_metadata.proto @@ -367,4 +367,7 @@ message DeltaArchiveManifest { // Metadata related to all dynamic partitions. optional DynamicPartitionMetadata dynamic_partition_metadata = 15; + + // If the payload only updates a subset of partitions on the device. + optional bool partial_update = 16; } From 3562a5da1ef2d7280320167e0bd56a77e02ed4ff Mon Sep 17 00:00:00 2001 From: Jae Hoon Kim Date: Wed, 10 Jun 2020 13:55:41 -0700 Subject: [PATCH 307/624] update_engine: Remove dead CpuLimiter declaration BUG=none TEST=FEATURES=test emerge-$B update_engine update_engine-client # + CQ Change-Id: If62c6bd49e3188e7a35a9e1dc277276e833c0ae4 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2240157 Tested-by: Jae Hoon Kim Commit-Queue: Amin Hassani Auto-Submit: Jae Hoon Kim Reviewed-by: Amin Hassani Reviewed-by: Andrew Lassalle --- common/cpu_limiter.h | 4 ---- 1 file changed, 4 deletions(-) diff --git a/common/cpu_limiter.h b/common/cpu_limiter.h index c7add89d..e6d7331e 100644 --- a/common/cpu_limiter.h +++ b/common/cpu_limiter.h @@ -30,10 +30,6 @@ enum class CpuShares : int { kLow = 2, }; -// Sets the current process shares to |shares|. Returns true on -// success, false otherwise. -bool SetCpuShares(CpuShares shares); - class CPULimiter { public: CPULimiter() = default; From 694eeb0dece40f88e11ece3a776d995d855be79b Mon Sep 17 00:00:00 2001 From: Jae Hoon Kim Date: Mon, 1 Jun 2020 14:24:08 -0700 Subject: [PATCH 308/624] update_engine: Add PayloadState Exclusion Logic |PayloadState| will exclude payloads based on specific update failures. This is to prevent critical platform updates from being blocked by less critical updates (e.g. DLCs). A layer of robustness is added in protecting CrOS devices from falling off the update train. Some important changes to mention: - Only during updates will update_engine exclude non-critical payloads - |OmahaRequestAction|, the current precursor |Action| to |OmahaResponseHandlerAction|, during a update will exclude faulty/excluded payloads prior to setting the |OmahaResponse| as an output object for suqsequent bonded |Action| to consume - When all payloads are excluded for an update, the |ErrorCode| will be indicated as |OmahaResponseInvalid| as this case is not a valid Omaha response update_engine should ever run into because non-critical updates must tag alongside a critical update BUG=chromium:928805 TEST=FEATURES=test emerge-$B update_engine update_engine-client Change-Id: I0551a228d0b84defb4d59966e8ed46a5d9278d60 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2190237 Tested-by: Jae Hoon Kim Auto-Submit: Jae Hoon Kim Reviewed-by: Amin Hassani Commit-Queue: Jae Hoon Kim --- common/utils.cc | 4 ++ common/utils.h | 4 ++ omaha_request_action.cc | 59 +++++++++++++++- omaha_request_action_unittest.cc | 80 +++++++++++++++++++++- omaha_response.h | 3 + omaha_response_handler_action.cc | 3 + payload_state.cc | 20 ++++++ payload_state.h | 9 +++ payload_state_unittest.cc | 112 ++++++++++++++++++++++++++++--- 9 files changed, 283 insertions(+), 11 deletions(-) diff --git a/common/utils.cc b/common/utils.cc index 3a234cb8..644493d5 100644 --- a/common/utils.cc +++ b/common/utils.cc @@ -959,6 +959,10 @@ void ParseRollbackKeyVersion(const string& raw_version, } } +string GetExclusionName(const string& str_to_convert) { + return base::NumberToString(base::StringPieceHash()(str_to_convert)); +} + } // namespace utils } // namespace chromeos_update_engine diff --git a/common/utils.h b/common/utils.h index d949a3e9..ee2dce08 100644 --- a/common/utils.h +++ b/common/utils.h @@ -313,6 +313,10 @@ void ParseRollbackKeyVersion(const std::string& raw_version, uint16_t* high_version, uint16_t* low_version); +// Returns the string format of the hashed |str_to_convert| that can be used +// with |Excluder| as the exclusion name. +std::string GetExclusionName(const std::string& str_to_convert); + } // namespace utils // Utility class to close a file descriptor diff --git a/omaha_request_action.cc b/omaha_request_action.cc index 86d4b93f..3a0b91c7 100644 --- a/omaha_request_action.cc +++ b/omaha_request_action.cc @@ -55,6 +55,7 @@ #include "update_engine/omaha_request_params.h" #include "update_engine/p2p_manager.h" #include "update_engine/payload_state_interface.h" +#include "update_engine/update_attempter.h" using base::Optional; using base::Time; @@ -534,6 +535,7 @@ bool UpdateLastPingDays(OmahaParserData* parser_data, PrefsInterface* prefs) { // False otherwise, in which case it sets any error code using |completer|. bool ParsePackage(OmahaParserData::App* app, OmahaResponse* output_object, + bool can_exclude, ScopedActionCompleter* completer) { if (app->updatecheck_status.empty() || app->updatecheck_status == kValNoUpdate) { @@ -580,6 +582,7 @@ bool ParsePackage(OmahaParserData::App* app, LOG(INFO) << "Found package " << package.name; OmahaResponse::Package out_package; + out_package.can_exclude = can_exclude; for (const string& codebase : app->url_codebase) { if (codebase.empty()) { LOG(ERROR) << "Omaha Response URL has empty codebase"; @@ -625,6 +628,42 @@ bool ParsePackage(OmahaParserData::App* app, return true; } +// Removes the candidate URLs which are excluded within packages, if all the +// candidate URLs are excluded within a package, the package will be excluded. +void ProcessExclusions(OmahaResponse* output_object, + ExcluderInterface* excluder) { + for (auto package_it = output_object->packages.begin(); + package_it != output_object->packages.end(); + /* Increment logic in loop */) { + // If package cannot be excluded, quickly continue. + if (!package_it->can_exclude) { + ++package_it; + continue; + } + // Remove the excluded payload URLs. + for (auto payload_url_it = package_it->payload_urls.begin(); + payload_url_it != package_it->payload_urls.end(); + /* Increment logic in loop */) { + auto exclusion_name = utils::GetExclusionName(*payload_url_it); + // If payload URL is not excluded, quickly continue. + if (!excluder->IsExcluded(exclusion_name)) { + ++payload_url_it; + continue; + } + LOG(INFO) << "Excluding payload URL=" << *payload_url_it + << " for payload hash=" << package_it->hash; + payload_url_it = package_it->payload_urls.erase(payload_url_it); + } + // If there are no candidate payload URLs, remove the package. + if (package_it->payload_urls.empty()) { + LOG(INFO) << "Excluding payload hash=" << package_it->hash; + package_it = output_object->packages.erase(package_it); + continue; + } + ++package_it; + } +} + // Parses the 2 key version strings kernel_version and firmware_version. If the // field is not present, or cannot be parsed the values default to 0xffff. void ParseRollbackVersions(int allowed_milestones, @@ -751,9 +790,15 @@ bool OmahaRequestAction::ParseResponse(OmahaParserData* parser_data, // Package has to be parsed after Params now because ParseParams need to make // sure that postinstall action exists. - for (auto& app : parser_data->apps) - if (!ParsePackage(&app, output_object, completer)) + for (auto& app : parser_data->apps) { + // Only allow exclusions for a non-critical package during an update. For + // non-critical package installations, let the errors propagate instead + // of being handled inside update_engine as installations are a dlcservice + // specific feature. + bool can_exclude = !params_->is_install() && params_->IsDlcAppId(app.id); + if (!ParsePackage(&app, output_object, can_exclude, completer)) return false; + } return true; } @@ -977,6 +1022,8 @@ void OmahaRequestAction::TransferComplete(HttpFetcher* fetcher, OmahaResponse output_object; if (!ParseResponse(&parser_data, &output_object, &completer)) return; + ProcessExclusions(&output_object, + system_state_->update_attempter()->GetExcluder()); output_object.update_exists = true; SetOutputObject(output_object); @@ -1469,6 +1516,14 @@ bool OmahaRequestAction::ShouldIgnoreUpdate(const OmahaResponse& response, return true; } + // Currently non-critical updates always update alongside the platform update + // (a critical update) so this case should never actually be hit if the + // request to Omaha for updates are correct. In other words, stop the update + // from happening as there are no packages in the response to process. + if (response.packages.empty()) { + LOG(ERROR) << "All packages were excluded."; + } + // Note: We could technically delete the UpdateFirstSeenAt state when we // return true. If we do, it'll mean a device has to restart the // UpdateFirstSeenAt and thus help scattering take effect when the AU is diff --git a/omaha_request_action_unittest.cc b/omaha_request_action_unittest.cc index e530de46..6a0c2139 100644 --- a/omaha_request_action_unittest.cc +++ b/omaha_request_action_unittest.cc @@ -44,6 +44,7 @@ #include "update_engine/common/constants.h" #include "update_engine/common/fake_prefs.h" #include "update_engine/common/hash_calculator.h" +#include "update_engine/common/mock_excluder.h" #include "update_engine/common/mock_http_fetcher.h" #include "update_engine/common/platform_constants.h" #include "update_engine/common/prefs.h" @@ -75,6 +76,7 @@ using testing::ReturnPointee; using testing::ReturnRef; using testing::SaveArg; using testing::SetArgPointee; +using testing::StrictMock; namespace { @@ -204,7 +206,8 @@ struct FakeUpdateResponse { ? "" "" "" @@ -389,6 +392,9 @@ class OmahaRequestActionTest : public ::testing::Test { .expected_check_reaction = metrics::CheckReaction::kUpdating, .expected_download_error_code = metrics::DownloadErrorCode::kUnset, }; + + ON_CALL(*fake_system_state_.mock_update_attempter(), GetExcluder()) + .WillByDefault(Return(&mock_excluder_)); } // This function uses the parameters in |tuc_params_| to do an update check. @@ -429,6 +435,7 @@ class OmahaRequestActionTest : public ::testing::Test { bool expected_allow_p2p_for_sharing, const string& expected_p2p_url); + StrictMock mock_excluder_; FakeSystemState fake_system_state_; FakeUpdateResponse fake_update_response_; // Used by all tests. @@ -2759,8 +2766,44 @@ TEST_F(OmahaRequestActionTest, UpdateWithDlcTest) { {{request_params_.GetDlcAppId(kDlcId1), {.name = kDlcId1}}}); fake_update_response_.dlc_app_update = true; tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); + EXPECT_CALL(mock_excluder_, IsExcluded(_)).WillRepeatedly(Return(false)); + ASSERT_TRUE(TestUpdateCheck()); + + EXPECT_EQ(response.packages.size(), 2u); + // Two candidate URLs. + EXPECT_EQ(response.packages[1].payload_urls.size(), 2u); + EXPECT_TRUE(response.update_exists); +} + +TEST_F(OmahaRequestActionTest, UpdateWithPartiallyExcludedDlcTest) { + request_params_.set_dlc_apps_params( + {{request_params_.GetDlcAppId(kDlcId1), {.name = kDlcId1}}}); + fake_update_response_.dlc_app_update = true; + tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); + // The first DLC candidate URL is excluded. + EXPECT_CALL(mock_excluder_, IsExcluded(_)) + .WillOnce(Return(true)) + .WillOnce(Return(false)); + ASSERT_TRUE(TestUpdateCheck()); + + EXPECT_EQ(response.packages.size(), 2u); + // One candidate URL. + EXPECT_EQ(response.packages[1].payload_urls.size(), 1u); + EXPECT_TRUE(response.update_exists); +} + +TEST_F(OmahaRequestActionTest, UpdateWithExcludedDlcTest) { + request_params_.set_dlc_apps_params( + {{request_params_.GetDlcAppId(kDlcId1), {.name = kDlcId1}}}); + fake_update_response_.dlc_app_update = true; + tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); + // Both DLC candidate URLs are excluded. + EXPECT_CALL(mock_excluder_, IsExcluded(_)) + .WillOnce(Return(true)) + .WillOnce(Return(true)); ASSERT_TRUE(TestUpdateCheck()); + EXPECT_EQ(response.packages.size(), 1u); EXPECT_TRUE(response.update_exists); } @@ -2769,6 +2812,7 @@ TEST_F(OmahaRequestActionTest, UpdateWithDeprecatedDlcTest) { {{request_params_.GetDlcAppId(kDlcId2), {.name = kDlcId2}}}); fake_update_response_.dlc_app_no_update = true; tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); + EXPECT_CALL(mock_excluder_, IsExcluded(_)).WillRepeatedly(Return(false)); ASSERT_TRUE(TestUpdateCheck()); EXPECT_TRUE(response.update_exists); @@ -2781,6 +2825,7 @@ TEST_F(OmahaRequestActionTest, UpdateWithDlcAndDeprecatedDlcTest) { fake_update_response_.dlc_app_update = true; fake_update_response_.dlc_app_no_update = true; tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); + EXPECT_CALL(mock_excluder_, IsExcluded(_)).WillRepeatedly(Return(false)); ASSERT_TRUE(TestUpdateCheck()); EXPECT_TRUE(response.update_exists); @@ -2991,4 +3036,37 @@ TEST_F(OmahaRequestActionDlcPingTest, StorePingReplyInactiveTest) { EXPECT_EQ(temp_str, "4763"); } +TEST_F(OmahaRequestActionTest, OmahaResponseUpdateCanExcludeCheck) { + request_params_.set_dlc_apps_params( + {{request_params_.GetDlcAppId(kDlcId1), {.name = kDlcId1}}}); + fake_update_response_.dlc_app_update = true; + tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); + + EXPECT_CALL(mock_excluder_, IsExcluded(_)).WillRepeatedly(Return(false)); + ASSERT_TRUE(TestUpdateCheck()); + ASSERT_TRUE(delegate_.omaha_response_); + const auto& packages = delegate_.omaha_response_->packages; + ASSERT_EQ(packages.size(), 2); + + EXPECT_FALSE(packages[0].can_exclude); + EXPECT_TRUE(packages[1].can_exclude); +} + +TEST_F(OmahaRequestActionTest, OmahaResponseInstallCannotExcludeCheck) { + request_params_.set_is_install(true); + request_params_.set_dlc_apps_params( + {{request_params_.GetDlcAppId(kDlcId1), {.name = kDlcId1}}}); + fake_update_response_.dlc_app_update = true; + tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); + + EXPECT_CALL(mock_excluder_, IsExcluded(_)).WillRepeatedly(Return(false)); + ASSERT_TRUE(TestUpdateCheck()); + ASSERT_TRUE(delegate_.omaha_response_); + const auto& packages = delegate_.omaha_response_->packages; + ASSERT_EQ(packages.size(), 2); + + EXPECT_FALSE(packages[0].can_exclude); + EXPECT_FALSE(packages[1].can_exclude); +} + } // namespace chromeos_update_engine diff --git a/omaha_response.h b/omaha_response.h index ab253a19..2b86fe70 100644 --- a/omaha_response.h +++ b/omaha_response.h @@ -51,6 +51,9 @@ struct OmahaResponse { // True if the payload described in this response is a delta payload. // False if it's a full payload. bool is_delta = false; + // True if the payload can be excluded from updating if consistently faulty. + // False if the payload is critical to update. + bool can_exclude = false; }; std::vector packages; diff --git a/omaha_response_handler_action.cc b/omaha_response_handler_action.cc index 915e8392..040f8e79 100644 --- a/omaha_response_handler_action.cc +++ b/omaha_response_handler_action.cc @@ -70,6 +70,9 @@ void OmahaResponseHandlerAction::PerformAction() { } // This is the url to the first package, not all packages. + // (For updates): All |Action|s prior to this must pass in non-excluded URLs + // within the |OmahaResponse|, reference exlusion logic in + // |OmahaRequestAction| and keep the enforcement of exclusions for updates. install_plan_.download_url = current_url; install_plan_.version = response.version; install_plan_.system_version = response.system_version; diff --git a/payload_state.cc b/payload_state.cc index 2e07ad97..cf3aab91 100644 --- a/payload_state.cc +++ b/payload_state.cc @@ -312,6 +312,7 @@ void PayloadState::UpdateFailed(ErrorCode error) { case ErrorCode::kUnsupportedMinorPayloadVersion: case ErrorCode::kPayloadTimestampError: case ErrorCode::kVerityCalculationError: + ExcludeCurrentPayload(); IncrementUrlIndex(); break; @@ -502,10 +503,29 @@ void PayloadState::IncrementFailureCount() { } else { LOG(INFO) << "Reached max number of failures for Url" << GetUrlIndex() << ". Trying next available URL"; + ExcludeCurrentPayload(); IncrementUrlIndex(); } } +void PayloadState::ExcludeCurrentPayload() { + const auto& package = response_.packages[payload_index_]; + if (!package.can_exclude) { + LOG(INFO) << "Not excluding as marked non-excludable for package hash=" + << package.hash; + return; + } + auto exclusion_name = utils::GetExclusionName(GetCurrentUrl()); + if (!excluder_->Exclude(exclusion_name)) + LOG(WARNING) << "Failed to exclude " + << " Package Hash=" << package.hash + << " CurrentUrl=" << GetCurrentUrl(); + else + LOG(INFO) << "Excluded " + << " Package Hash=" << package.hash + << " CurrentUrl=" << GetCurrentUrl(); +} + void PayloadState::UpdateBackoffExpiryTime() { if (response_.disable_payload_backoff) { LOG(INFO) << "Resetting backoff expiry time as payload backoff is disabled"; diff --git a/payload_state.h b/payload_state.h index bc4bf0dd..d13c6420 100644 --- a/payload_state.h +++ b/payload_state.h @@ -158,6 +158,9 @@ class PayloadState : public PayloadStateInterface { FRIEND_TEST(PayloadStateTest, RollbackVersion); FRIEND_TEST(PayloadStateTest, UpdateSuccessWithWipedPrefs); FRIEND_TEST(PayloadStateTest, NextPayloadResetsUrlIndex); + FRIEND_TEST(PayloadStateTest, ExcludeNoopForNonExcludables); + FRIEND_TEST(PayloadStateTest, ExcludeOnlyCanExcludables); + FRIEND_TEST(PayloadStateTest, IncrementFailureExclusionTest); // Helper called when an attempt has begun, is called by // UpdateResumed(), UpdateRestarted() and Rollback(). @@ -182,6 +185,12 @@ class PayloadState : public PayloadStateInterface { // to the next URL and resets the failure count for that URL. void IncrementFailureCount(); + // Excludes the current payload + current candidate URL from being part of + // future updates/retries. Whenever |SetResponse()| or |NextPayload()| decide + // on the initial current URL index and the next payload respectively, it will + // advanced based on exclusions. + void ExcludeCurrentPayload(); + // Updates the backoff expiry time exponentially based on the current // payload attempt number. void UpdateBackoffExpiryTime(); diff --git a/payload_state_unittest.cc b/payload_state_unittest.cc index 4a0afcfe..bf9aed44 100644 --- a/payload_state_unittest.cc +++ b/payload_state_unittest.cc @@ -23,9 +23,11 @@ #include #include "update_engine/common/constants.h" +#include "update_engine/common/excluder_interface.h" #include "update_engine/common/fake_clock.h" #include "update_engine/common/fake_hardware.h" #include "update_engine/common/fake_prefs.h" +#include "update_engine/common/mock_excluder.h" #include "update_engine/common/mock_prefs.h" #include "update_engine/common/prefs.h" #include "update_engine/common/test_utils.h" @@ -44,6 +46,7 @@ using testing::Mock; using testing::NiceMock; using testing::Return; using testing::SetArgPointee; +using testing::StrictMock; namespace chromeos_update_engine { @@ -1012,10 +1015,6 @@ TEST(PayloadStateTest, RollbackVersion) { NiceMock* mock_powerwash_safe_prefs = fake_system_state.mock_powerwash_safe_prefs(); - EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); - - // Verify pre-conditions are good. - EXPECT_TRUE(payload_state.GetRollbackVersion().empty()); // Mock out the os version and make sure it's blacklisted correctly. string rollback_version = "2345.0.0"; @@ -1023,6 +1022,11 @@ TEST(PayloadStateTest, RollbackVersion) { params.Init(rollback_version, "", false); fake_system_state.set_request_params(¶ms); + EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); + + // Verify pre-conditions are good. + EXPECT_TRUE(payload_state.GetRollbackVersion().empty()); + EXPECT_CALL(*mock_powerwash_safe_prefs, SetString(kPrefsRollbackVersion, rollback_version)); payload_state.Rollback(); @@ -1353,15 +1357,15 @@ TEST(PayloadStateTest, PayloadTypeMetricWhenTypeIsForcedFull) { PayloadState payload_state; FakeSystemState fake_system_state; - EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); - SetupPayloadStateWith2Urls( - "Hash6437", true, false, &payload_state, &response); - // Mock the request to a request where the delta was disabled. OmahaRequestParams params(&fake_system_state); params.set_delta_okay(false); fake_system_state.set_request_params(¶ms); + EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); + SetupPayloadStateWith2Urls( + "Hash6437", true, false, &payload_state, &response); + // Simulate a successful download and update. payload_state.DownloadComplete(); @@ -1658,6 +1662,9 @@ TEST(PayloadStateTest, P2PStateVarsAreClearedOnNewResponse) { TEST(PayloadStateTest, NextPayloadResetsUrlIndex) { PayloadState payload_state; FakeSystemState fake_system_state; + StrictMock mock_excluder; + EXPECT_CALL(*fake_system_state.mock_update_attempter(), GetExcluder()) + .WillOnce(Return(&mock_excluder)); EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); OmahaResponse response; @@ -1682,4 +1689,93 @@ TEST(PayloadStateTest, NextPayloadResetsUrlIndex) { EXPECT_EQ(payload_state.GetCurrentUrl(), "http://test1b"); } +TEST(PayloadStateTest, ExcludeNoopForNonExcludables) { + PayloadState payload_state; + FakeSystemState fake_system_state; + StrictMock mock_excluder; + EXPECT_CALL(*fake_system_state.mock_update_attempter(), GetExcluder()) + .WillOnce(Return(&mock_excluder)); + EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); + + OmahaResponse response; + response.packages.push_back( + {.payload_urls = {"http://test1a", "http://test2a"}, + .size = 123456789, + .metadata_size = 58123, + .metadata_signature = "msign", + .hash = "hash", + .can_exclude = false}); + payload_state.SetResponse(response); + + EXPECT_CALL(mock_excluder, Exclude(_)).Times(0); + payload_state.ExcludeCurrentPayload(); +} + +TEST(PayloadStateTest, ExcludeOnlyCanExcludables) { + PayloadState payload_state; + FakeSystemState fake_system_state; + StrictMock mock_excluder; + EXPECT_CALL(*fake_system_state.mock_update_attempter(), GetExcluder()) + .WillOnce(Return(&mock_excluder)); + EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); + + OmahaResponse response; + response.packages.push_back( + {.payload_urls = {"http://test1a", "http://test2a"}, + .size = 123456789, + .metadata_size = 58123, + .metadata_signature = "msign", + .hash = "hash", + .can_exclude = true}); + payload_state.SetResponse(response); + + EXPECT_CALL(mock_excluder, Exclude(utils::GetExclusionName("http://test1a"))) + .WillOnce(Return(true)); + payload_state.ExcludeCurrentPayload(); +} + +TEST(PayloadStateTest, IncrementFailureExclusionTest) { + PayloadState payload_state; + FakeSystemState fake_system_state; + StrictMock mock_excluder; + EXPECT_CALL(*fake_system_state.mock_update_attempter(), GetExcluder()) + .WillOnce(Return(&mock_excluder)); + EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); + + OmahaResponse response; + // Critical package. + response.packages.push_back( + {.payload_urls = {"http://crit-test1a", "http://crit-test2a"}, + .size = 123456789, + .metadata_size = 58123, + .metadata_signature = "msign", + .hash = "hash", + .can_exclude = false}); + // Non-critical package. + response.packages.push_back( + {.payload_urls = {"http://test1a", "http://test2a"}, + .size = 123456789, + .metadata_size = 58123, + .metadata_signature = "msign", + .hash = "hash", + .can_exclude = true}); + response.max_failure_count_per_url = 2; + payload_state.SetResponse(response); + + // Critical package won't be excluded. + // Increment twice as failure count allowed per URL is set to 2. + payload_state.IncrementFailureCount(); + payload_state.IncrementFailureCount(); + + EXPECT_TRUE(payload_state.NextPayload()); + + // First increment failure should not exclude. + payload_state.IncrementFailureCount(); + + // Second increment failure should exclude. + EXPECT_CALL(mock_excluder, Exclude(utils::GetExclusionName("http://test1a"))) + .WillOnce(Return(true)); + payload_state.IncrementFailureCount(); +} + } // namespace chromeos_update_engine From 99d570d67bd5dab11de321068c4002ab76ae774a Mon Sep 17 00:00:00 2001 From: Tianjie Date: Thu, 4 Jun 2020 14:57:19 -0700 Subject: [PATCH 309/624] Implement PartitionUpdateGenerator for partial updates Implement the logic in PartitionUpdateGenerator. Here's the summary, 1. finds the a/b partitions (both static & dynamic) on the device For partitions not included in the payload: 2. calculates the partition hash for filesystem verification 3. generates one SOURCE_COPY operation for each static partition The order of the partitions are sorted. So the update will resume from the correct operation in case it's interruptted. Bug: 157778739 Test: run a partial OTA with boot & system_ext Change-Id: I5683b85e3c6dab813a33d5144aceb996fd8163d4 --- Android.bp | 4 +- common/dynamic_partition_control_interface.h | 12 + common/dynamic_partition_control_stub.cc | 9 + common/dynamic_partition_control_stub.h | 5 + dynamic_partition_control_android.cc | 32 +++ dynamic_partition_control_android.h | 9 +- payload_consumer/delta_performer.cc | 3 +- .../partition_update_generator_android.cc | 224 +++++++++++++++++- .../partition_update_generator_android.h | 36 ++- ...ition_update_generator_android_unittest.cc | 162 +++++++++++++ .../partition_update_generator_interface.h | 2 +- 11 files changed, 486 insertions(+), 12 deletions(-) create mode 100644 payload_consumer/partition_update_generator_android_unittest.cc diff --git a/Android.bp b/Android.bp index 59d698d8..a5223c76 100644 --- a/Android.bp +++ b/Android.bp @@ -123,15 +123,16 @@ cc_defaults { "libbz", "libbspatch", "libbrotli", + "libc++fs", "libfec_rs", "libpuffpatch", "libverity_tree", ], shared_libs: [ - "libziparchive", "libbase", "libcrypto", "libfec", + "libziparchive", ], } @@ -731,6 +732,7 @@ cc_test { "payload_consumer/file_descriptor_utils_unittest.cc", "payload_consumer/file_writer_unittest.cc", "payload_consumer/filesystem_verifier_action_unittest.cc", + "payload_consumer/partition_update_generator_android_unittest.cc", "payload_consumer/postinstall_runner_action_unittest.cc", "payload_consumer/verity_writer_android_unittest.cc", "payload_consumer/xz_extent_writer_unittest.cc", diff --git a/common/dynamic_partition_control_interface.h b/common/dynamic_partition_control_interface.h index 58ebfe46..7289deee 100644 --- a/common/dynamic_partition_control_interface.h +++ b/common/dynamic_partition_control_interface.h @@ -21,6 +21,7 @@ #include #include +#include #include "update_engine/common/action.h" #include "update_engine/common/cleanup_previous_update_action_delegate.h" @@ -118,6 +119,17 @@ class DynamicPartitionControlInterface { // progress, while ResetUpdate() forcefully free previously // allocated space for snapshot updates. virtual bool ResetUpdate(PrefsInterface* prefs) = 0; + + // Reads the dynamic partitions metadata from the current slot, and puts the + // name of the dynamic partitions with the current suffix to |partitions|. + // Returns true on success. + virtual bool ListDynamicPartitionsForSlot( + uint32_t current_slot, std::vector* partitions) = 0; + + // Finds a possible location that list all block devices by name; and puts + // the result in |path|. Returns true on success. + // Sample result: /dev/block/by-name/ + virtual bool GetDeviceDir(std::string* path) = 0; }; } // namespace chromeos_update_engine diff --git a/common/dynamic_partition_control_stub.cc b/common/dynamic_partition_control_stub.cc index 903b7ee0..cde36afc 100644 --- a/common/dynamic_partition_control_stub.cc +++ b/common/dynamic_partition_control_stub.cc @@ -67,4 +67,13 @@ bool DynamicPartitionControlStub::ResetUpdate(PrefsInterface* prefs) { return false; } +bool DynamicPartitionControlStub::ListDynamicPartitionsForSlot( + uint32_t current_slot, std::vector* partitions) { + return true; +} + +bool DynamicPartitionControlStub::GetDeviceDir(std::string* path) { + return true; +} + } // namespace chromeos_update_engine diff --git a/common/dynamic_partition_control_stub.h b/common/dynamic_partition_control_stub.h index d8e254ea..28e3e6a5 100644 --- a/common/dynamic_partition_control_stub.h +++ b/common/dynamic_partition_control_stub.h @@ -21,6 +21,7 @@ #include #include +#include #include "update_engine/common/dynamic_partition_control_interface.h" @@ -46,6 +47,10 @@ class DynamicPartitionControlStub : public DynamicPartitionControlInterface { PrefsInterface* prefs, CleanupPreviousUpdateActionDelegateInterface* delegate) override; bool ResetUpdate(PrefsInterface* prefs) override; + + bool ListDynamicPartitionsForSlot( + uint32_t current_slot, std::vector* partitions) override; + bool GetDeviceDir(std::string* path) override; }; } // namespace chromeos_update_engine diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc index 79c269c4..6817c21e 100644 --- a/dynamic_partition_control_android.cc +++ b/dynamic_partition_control_android.cc @@ -21,6 +21,8 @@ #include #include #include +#include +#include #include #include @@ -1081,6 +1083,36 @@ bool DynamicPartitionControlAndroid::ResetUpdate(PrefsInterface* prefs) { return true; } +bool DynamicPartitionControlAndroid::ListDynamicPartitionsForSlot( + uint32_t current_slot, std::vector* partitions) { + if (!GetDynamicPartitionsFeatureFlag().IsEnabled()) { + LOG(ERROR) << "Dynamic partition is not enabled"; + return false; + } + + std::string device_dir_str; + TEST_AND_RETURN_FALSE(GetDeviceDir(&device_dir_str)); + base::FilePath device_dir(device_dir_str); + auto super_device = + device_dir.Append(GetSuperPartitionName(current_slot)).value(); + auto builder = LoadMetadataBuilder(super_device, current_slot); + TEST_AND_RETURN_FALSE(builder != nullptr); + + std::vector result; + auto suffix = SlotSuffixForSlotNumber(current_slot); + for (const auto& group : builder->ListGroups()) { + for (const auto& partition : builder->ListPartitionsInGroup(group)) { + std::string_view partition_name = partition->name(); + if (!android::base::ConsumeSuffix(&partition_name, suffix)) { + continue; + } + result.emplace_back(partition_name); + } + } + *partitions = std::move(result); + return true; +} + bool DynamicPartitionControlAndroid::ExpectMetadataMounted() { // No need to mount metadata for non-Virtual A/B devices. if (!GetVirtualAbFeatureFlag().IsEnabled()) { diff --git a/dynamic_partition_control_android.h b/dynamic_partition_control_android.h index e3bedbcb..69026a40 100644 --- a/dynamic_partition_control_android.h +++ b/dynamic_partition_control_android.h @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -53,6 +54,11 @@ class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { bool ResetUpdate(PrefsInterface* prefs) override; + bool ListDynamicPartitionsForSlot( + uint32_t current_slot, std::vector* partitions) override; + + bool GetDeviceDir(std::string* path) override; + // Return the device for partition |partition_name| at slot |slot|. // |current_slot| should be set to the current active slot. // Note: this function is only used by BootControl*::GetPartitionDevice. @@ -131,9 +137,6 @@ class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { virtual std::unique_ptr LoadMetadataBuilder( const std::string& super_device, uint32_t source_slot); - // Return a possible location for devices listed by name. - virtual bool GetDeviceDir(std::string* path); - // Return the name of the super partition (which stores super partition // metadata) for a given slot. virtual std::string GetSuperPartitionName(uint32_t slot); diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc index d1de9f4d..d9b739de 100644 --- a/payload_consumer/delta_performer.cc +++ b/payload_consumer/delta_performer.cc @@ -879,7 +879,8 @@ bool DeltaPerformer::ParseManifestPartitions(ErrorCode* error) { touched_partitions.insert(partition_update.partition_name()); } - auto generator = partition_update_generator::Create(boot_control_); + auto generator = partition_update_generator::Create(boot_control_, + manifest_.block_size()); std::vector other_partitions; TEST_AND_RETURN_FALSE( generator->GenerateOperationsForPartitionsNotInPayload( diff --git a/payload_consumer/partition_update_generator_android.cc b/payload_consumer/partition_update_generator_android.cc index fcacc86d..aa3f2e57 100644 --- a/payload_consumer/partition_update_generator_android.cc +++ b/payload_consumer/partition_update_generator_android.cc @@ -16,25 +16,241 @@ #include "update_engine/payload_consumer/partition_update_generator_android.h" +#include #include +#include +#include +#include + +#include +#include + +#include "update_engine/common/hash_calculator.h" +#include "update_engine/common/utils.h" + +namespace { +// TODO(xunchang) use definition in fs_mgr, e.g. fs_mgr_get_slot_suffix +const char* SUFFIX_A = "_a"; +const char* SUFFIX_B = "_b"; +} // namespace namespace chromeos_update_engine { +PartitionUpdateGeneratorAndroid::PartitionUpdateGeneratorAndroid( + BootControlInterface* boot_control, + std::string device_dir, + size_t block_size) + : boot_control_(boot_control), + block_device_dir_(std::move(device_dir)), + block_size_(block_size) {} + bool PartitionUpdateGeneratorAndroid:: GenerateOperationsForPartitionsNotInPayload( BootControlInterface::Slot source_slot, BootControlInterface::Slot target_slot, const std::set& partitions_in_payload, std::vector* update_list) { - // TODO(xunchang) implement the function - CHECK(boot_control_); + auto ret = GetStaticAbPartitionsOnDevice(); + if (!ret.has_value()) { + LOG(ERROR) << "Failed to load static a/b partitions"; + return false; + } + auto ab_partitions = ret.value(); + + // Add the dynamic partitions. + auto dynamic_control = boot_control_->GetDynamicPartitionControl(); + std::vector dynamic_partitions; + if (!dynamic_control->ListDynamicPartitionsForSlot(source_slot, + &dynamic_partitions)) { + LOG(ERROR) << "Failed to load dynamic partitions from slot " << source_slot; + return false; + } + ab_partitions.insert(dynamic_partitions.begin(), dynamic_partitions.end()); + + std::vector partition_updates; + for (const auto& partition_name : ab_partitions) { + if (partitions_in_payload.find(partition_name) != + partitions_in_payload.end()) { + LOG(INFO) << partition_name << " has included in payload"; + continue; + } + + auto partition_update = + CreatePartitionUpdate(partition_name, source_slot, target_slot); + if (!partition_update.has_value()) { + LOG(ERROR) << "Failed to create partition update for " << partition_name; + return false; + } + partition_updates.push_back(partition_update.value()); + } + *update_list = std::move(partition_updates); return true; } +std::optional> +PartitionUpdateGeneratorAndroid::GetStaticAbPartitionsOnDevice() { + if (std::error_code error_code; + !std::filesystem::exists(block_device_dir_, error_code) || error_code) { + LOG(ERROR) << "Failed to find " << block_device_dir_ << " " + << error_code.message(); + return std::nullopt; + } + + std::error_code error_code; + auto it = std::filesystem::directory_iterator(block_device_dir_, error_code); + if (error_code) { + LOG(ERROR) << "Failed to iterate " << block_device_dir_ << " " + << error_code.message(); + return std::nullopt; + } + + std::set partitions_with_suffix; + for (const auto& entry : it) { + auto partition_name = entry.path().filename().string(); + if (android::base::EndsWith(partition_name, SUFFIX_A) || + android::base::EndsWith(partition_name, SUFFIX_B)) { + partitions_with_suffix.insert(partition_name); + } + } + + // Second iteration to add the partition name without suffixes. + std::set ab_partitions; + for (std::string_view name : partitions_with_suffix) { + if (!android::base::ConsumeSuffix(&name, SUFFIX_A)) { + continue; + } + + // Add to the output list if the partition exist for both slot a and b. + auto base_name = std::string(name); + if (partitions_with_suffix.find(base_name + SUFFIX_B) != + partitions_with_suffix.end()) { + ab_partitions.insert(base_name); + } else { + LOG(WARNING) << "Failed to find the b partition for " << base_name; + } + } + + return ab_partitions; +} + +std::optional +PartitionUpdateGeneratorAndroid::CreatePartitionUpdate( + const std::string& partition_name, + BootControlInterface::Slot source_slot, + BootControlInterface::Slot target_slot) { + bool is_source_dynamic = false; + std::string source_device; + if (!boot_control_->GetPartitionDevice(partition_name, + source_slot, + true, /* not_in_payload */ + &source_device, + &is_source_dynamic)) { + LOG(ERROR) << "Failed to load source " << partition_name; + return std::nullopt; + } + bool is_target_dynamic = false; + std::string target_device; + if (!boot_control_->GetPartitionDevice(partition_name, + target_slot, + true, + &target_device, + &is_target_dynamic)) { + LOG(ERROR) << "Failed to load target " << partition_name; + return std::nullopt; + } + + if (is_source_dynamic != is_target_dynamic) { + LOG(ERROR) << "Source slot " << source_slot << " for partition " + << partition_name << " is " << (is_source_dynamic ? "" : "not") + << " dynamic, but target slot " << target_slot << " is " + << (is_target_dynamic ? "" : "not") << " dynamic."; + return std::nullopt; + } + auto source_size = utils::FileSize(source_device); + auto target_size = utils::FileSize(target_device); + if (source_size == -1 || target_size == -1 || source_size != target_size || + source_size % block_size_ != 0) { + LOG(ERROR) << "Invalid partition size. source size " << source_size + << ", target size " << target_size; + return std::nullopt; + } + + return CreatePartitionUpdate(partition_name, + source_device, + target_device, + source_size, + is_source_dynamic); +} + +std::optional +PartitionUpdateGeneratorAndroid::CreatePartitionUpdate( + const std::string& partition_name, + const std::string& source_device, + const std::string& target_device, + int64_t partition_size, + bool is_dynamic) { + PartitionUpdate partition_update; + partition_update.set_partition_name(partition_name); + auto old_partition_info = partition_update.mutable_old_partition_info(); + old_partition_info->set_size(partition_size); + + auto raw_hash = CalculateHashForPartition(source_device, partition_size); + if (!raw_hash.has_value()) { + return {}; + } + old_partition_info->set_hash(raw_hash->data(), raw_hash->size()); + auto new_partition_info = partition_update.mutable_new_partition_info(); + new_partition_info->set_size(partition_size); + new_partition_info->set_hash(raw_hash->data(), raw_hash->size()); + // TODO(xunchang) TBD, should we skip hashing and verification of the + // dynamic partitions not in payload? + if (!is_dynamic) { + auto copy_operation = partition_update.add_operations(); + copy_operation->set_type(InstallOperation::SOURCE_COPY); + Extent copy_extent; + copy_extent.set_start_block(0); + copy_extent.set_num_blocks(partition_size / block_size_); + + *copy_operation->add_src_extents() = copy_extent; + *copy_operation->add_dst_extents() = copy_extent; + } + + return partition_update; +} + +std::optional +PartitionUpdateGeneratorAndroid::CalculateHashForPartition( + const std::string& block_device, int64_t partition_size) { + // TODO(xunchang) compute the hash with ecc partitions first, the hashing + // behavior should match the one in SOURCE_COPY. Also, we don't have the + // correct hash for source partition. + // An alternative way is to verify the written bytes match the read bytes + // during filesystem verification. This could probably save us a read of + // partitions here. + brillo::Blob raw_hash; + if (HashCalculator::RawHashOfFile(block_device, partition_size, &raw_hash) != + partition_size) { + LOG(ERROR) << "Failed to calculate hash for " << block_device; + return std::nullopt; + } + + return raw_hash; +} + namespace partition_update_generator { std::unique_ptr Create( - BootControlInterface* boot_control) { - return std::make_unique(boot_control); + BootControlInterface* boot_control, size_t block_size) { + CHECK(boot_control); + auto dynamic_control = boot_control->GetDynamicPartitionControl(); + CHECK(dynamic_control); + std::string dir_path; + if (!dynamic_control->GetDeviceDir(&dir_path)) { + return nullptr; + } + + return std::unique_ptr( + new PartitionUpdateGeneratorAndroid( + boot_control, std::move(dir_path), block_size)); } } // namespace partition_update_generator diff --git a/payload_consumer/partition_update_generator_android.h b/payload_consumer/partition_update_generator_android.h index bb50133a..8f33077f 100644 --- a/payload_consumer/partition_update_generator_android.h +++ b/payload_consumer/partition_update_generator_android.h @@ -17,10 +17,14 @@ #ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_PARTITION_UPDATE_GENERATOR_ANDROID_H_ #define UPDATE_ENGINE_PAYLOAD_CONSUMER_PARTITION_UPDATE_GENERATOR_ANDROID_H_ +#include #include #include #include +#include +#include // for FRIEND_TEST + #include "update_engine/common/boot_control_interface.h" #include "update_engine/payload_consumer/partition_update_generator_interface.h" @@ -28,8 +32,9 @@ namespace chromeos_update_engine { class PartitionUpdateGeneratorAndroid : public PartitionUpdateGeneratorInterface { public: - explicit PartitionUpdateGeneratorAndroid(BootControlInterface* boot_control) - : boot_control_(boot_control) {} + PartitionUpdateGeneratorAndroid(BootControlInterface* boot_control, + std::string device_dir, + size_t block_size); bool GenerateOperationsForPartitionsNotInPayload( BootControlInterface::Slot source_slot, @@ -38,7 +43,34 @@ class PartitionUpdateGeneratorAndroid std::vector* update_list) override; private: + friend class PartitionUpdateGeneratorAndroidTest; + FRIEND_TEST(PartitionUpdateGeneratorAndroidTest, GetStaticPartitions); + FRIEND_TEST(PartitionUpdateGeneratorAndroidTest, CreatePartitionUpdate); + + // Gets the name of the static a/b partitions on the device. + std::optional> GetStaticAbPartitionsOnDevice(); + + // Creates a PartitionUpdate object for a given partition to update from + // source to target. Returns std::nullopt on failure. + std::optional CreatePartitionUpdate( + const std::string& partition_name, + const std::string& source_device, + const std::string& target_device, + int64_t partition_size, + bool is_dynamic); + + std::optional CreatePartitionUpdate( + const std::string& partition_name, + BootControlInterface::Slot source_slot, + BootControlInterface::Slot target_slot); + + std::optional CalculateHashForPartition( + const std::string& block_device, int64_t partition_size); + BootControlInterface* boot_control_; + // Path to look for a/b partitions + std::string block_device_dir_; + size_t block_size_; }; } // namespace chromeos_update_engine diff --git a/payload_consumer/partition_update_generator_android_unittest.cc b/payload_consumer/partition_update_generator_android_unittest.cc new file mode 100644 index 00000000..c3be9dbc --- /dev/null +++ b/payload_consumer/partition_update_generator_android_unittest.cc @@ -0,0 +1,162 @@ +// +// Copyright (C) 2020 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include "update_engine/payload_consumer/partition_update_generator_android.h" + +#include +#include +#include +#include + +#include +#include +#include + +#include "update_engine/common/fake_boot_control.h" +#include "update_engine/common/hash_calculator.h" +#include "update_engine/common/test_utils.h" +#include "update_engine/common/utils.h" + +namespace chromeos_update_engine { + +class PartitionUpdateGeneratorAndroidTest : public ::testing::Test { + protected: + void SetUp() override { + ASSERT_TRUE(device_dir_.CreateUniqueTempDir()); + boot_control_ = std::make_unique(); + boot_control_->SetNumSlots(2); + auto generator = + partition_update_generator::Create(boot_control_.get(), 4096); + generator_.reset( + static_cast(generator.release())); + ASSERT_TRUE(boot_control_); + ASSERT_TRUE(generator_); + generator_->block_device_dir_ = device_dir_.GetPath().value(); + } + + std::unique_ptr generator_; + std::unique_ptr boot_control_; + + base::ScopedTempDir device_dir_; + + void SetUpBlockDevice(const std::map& contents) { + for (const auto& [name, content] : contents) { + auto path = generator_->block_device_dir_ + "/" + name; + ASSERT_TRUE( + utils::WriteFile(path.c_str(), content.data(), content.size())); + + if (android::base::EndsWith(name, "_a")) { + boot_control_->SetPartitionDevice( + name.substr(0, name.size() - 2), 0, path); + } else if (android::base::EndsWith(name, "_b")) { + boot_control_->SetPartitionDevice( + name.substr(0, name.size() - 2), 1, path); + } + } + } + + void CheckPartitionUpdate(const std::string& name, + const std::string& content, + const PartitionUpdate& partition_update) { + ASSERT_EQ(name, partition_update.partition_name()); + + brillo::Blob out_hash; + ASSERT_TRUE(HashCalculator::RawHashOfBytes( + content.data(), content.size(), &out_hash)); + ASSERT_EQ(std::string(out_hash.begin(), out_hash.end()), + partition_update.old_partition_info().hash()); + ASSERT_EQ(std::string(out_hash.begin(), out_hash.end()), + partition_update.new_partition_info().hash()); + + ASSERT_EQ(1, partition_update.operations_size()); + const auto& operation = partition_update.operations(0); + ASSERT_EQ(InstallOperation::SOURCE_COPY, operation.type()); + + ASSERT_EQ(1, operation.src_extents_size()); + ASSERT_EQ(0u, operation.src_extents(0).start_block()); + ASSERT_EQ(content.size() / 4096, operation.src_extents(0).num_blocks()); + + ASSERT_EQ(1, operation.dst_extents_size()); + ASSERT_EQ(0u, operation.dst_extents(0).start_block()); + ASSERT_EQ(content.size() / 4096, operation.dst_extents(0).num_blocks()); + } +}; + +TEST_F(PartitionUpdateGeneratorAndroidTest, GetStaticPartitions) { + std::map contents = { + {"system_a", ""}, + {"system_b", ""}, + {"vendor_a", ""}, + {"vendor_b", ""}, + {"persist", ""}, + {"vbmeta_a", ""}, + {"vbmeta_b", ""}, + {"boot_a", ""}, + {"boot_b", ""}, + }; + + SetUpBlockDevice(contents); + auto partitions = generator_->GetStaticAbPartitionsOnDevice(); + ASSERT_EQ(std::set({"system", "vendor", "vbmeta", "boot"}), + partitions); +} + +TEST_F(PartitionUpdateGeneratorAndroidTest, CreatePartitionUpdate) { + auto system_contents = std::string(4096 * 2, '1'); + auto boot_contents = std::string(4096 * 5, 'b'); + std::map contents = { + {"system_a", system_contents}, + {"system_b", std::string(4096 * 2, 0)}, + {"boot_a", boot_contents}, + {"boot_b", std::string(4096 * 5, 0)}, + }; + SetUpBlockDevice(contents); + + auto system_partition_update = + generator_->CreatePartitionUpdate("system", 0, 1); + ASSERT_TRUE(system_partition_update.has_value()); + CheckPartitionUpdate( + "system", system_contents, system_partition_update.value()); + + auto boot_partition_update = generator_->CreatePartitionUpdate("boot", 0, 1); + ASSERT_TRUE(boot_partition_update.has_value()); + CheckPartitionUpdate("boot", boot_contents, boot_partition_update.value()); +} + +TEST_F(PartitionUpdateGeneratorAndroidTest, GenerateOperations) { + auto system_contents = std::string(4096 * 10, '2'); + auto boot_contents = std::string(4096 * 5, 'b'); + std::map contents = { + {"system_a", system_contents}, + {"system_b", std::string(4096 * 10, 0)}, + {"boot_a", boot_contents}, + {"boot_b", std::string(4096 * 5, 0)}, + {"vendor_a", ""}, + {"vendor_b", ""}, + {"persist", ""}, + }; + SetUpBlockDevice(contents); + + std::vector update_list; + ASSERT_TRUE(generator_->GenerateOperationsForPartitionsNotInPayload( + 0, 1, std::set{"vendor"}, &update_list)); + + ASSERT_EQ(2u, update_list.size()); + CheckPartitionUpdate("boot", boot_contents, update_list[0]); + CheckPartitionUpdate("system", system_contents, update_list[1]); +} + +} // namespace chromeos_update_engine diff --git a/payload_consumer/partition_update_generator_interface.h b/payload_consumer/partition_update_generator_interface.h index 0341d40b..3fa3dfbc 100644 --- a/payload_consumer/partition_update_generator_interface.h +++ b/payload_consumer/partition_update_generator_interface.h @@ -47,7 +47,7 @@ class PartitionUpdateGeneratorInterface { namespace partition_update_generator { std::unique_ptr Create( - BootControlInterface* boot_control); + BootControlInterface* boot_control, size_t block_size); } } // namespace chromeos_update_engine From c3fd86a8be7487278442edac61d2dd509010490c Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Tue, 2 Jun 2020 15:08:37 -0700 Subject: [PATCH 310/624] Fix update_engine libchrome log tag. Test: boot and inspect logcat Fixes: 158030597 Bug: 158817816 Change-Id: I931af4faf6219ce42ff94080ea834721726dc0ba (cherry picked from commit 550ade4e44a72708354597307122a3f46e43a7d6) --- logging_android.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/logging_android.cc b/logging_android.cc index 88b068bc..0219075c 100644 --- a/logging_android.cc +++ b/logging_android.cc @@ -241,8 +241,8 @@ bool RedirectToLiblog(int severity, ignore_result(android::base::ConsumeSuffix(&sv, "\n")); std::string str(sv.data(), sv.size()); // This will eventually be redirected to CombinedLogger. - // |tag| is ignored by CombinedLogger, so just leave it empty. - __android_log_write(priority, "" /* tag */, str.c_str()); + // Use nullptr as tag so that liblog infers log tag from getprogname(). + __android_log_write(priority, nullptr /* tag */, str.c_str()); return true; } From 3e69b4ceb8775c4d5c0f2707fce7b59f92a0aaa5 Mon Sep 17 00:00:00 2001 From: Jae Hoon Kim Date: Tue, 16 Jun 2020 09:23:39 -0700 Subject: [PATCH 311/624] update_engine: Update complete event with exclusions When updates are complete, currently all the AppIDs within the request parameter are considered to be updated. This however is not true with exclusions as non-critical AppIDs (e.g. DLCs) can be excluded. This change sends the correct event for |kTypeUpdateComplete| event type. BUG=chromium:928805 TEST=FEATURES=test emerge-$B update_engine update_engine-client Change-Id: I8c21721688fb8a6501316cb87bd0a6f8e005b7ae Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2247489 Tested-by: Jae Hoon Kim Reviewed-by: Amin Hassani Reviewed-by: Manoj Gupta Auto-Submit: Jae Hoon Kim Commit-Queue: Andrew Lassalle --- common/error_code.h | 1 + common/error_code_utils.cc | 2 + metrics_utils.cc | 2 + omaha_request_action.cc | 6 ++ omaha_request_action_unittest.cc | 10 +- omaha_request_builder_xml.cc | 13 ++- omaha_request_builder_xml_unittest.cc | 135 +++++++++++++++++++++----- omaha_response.h | 2 + payload_state.cc | 1 + update_manager/chromeos_policy.cc | 1 + 10 files changed, 141 insertions(+), 32 deletions(-) diff --git a/common/error_code.h b/common/error_code.h index 3dd74028..7acb3b62 100644 --- a/common/error_code.h +++ b/common/error_code.h @@ -83,6 +83,7 @@ enum class ErrorCode : int { kInternalLibCurlError = 57, kUnresolvedHostError = 58, kUnresolvedHostRecovered = 59, + kPackageExcludedFromUpdate = 60, // VERY IMPORTANT! When adding new error codes: // diff --git a/common/error_code_utils.cc b/common/error_code_utils.cc index 397cdf24..55d876fc 100644 --- a/common/error_code_utils.cc +++ b/common/error_code_utils.cc @@ -167,6 +167,8 @@ string ErrorCodeToString(ErrorCode code) { return "ErrorCode::kUnresolvedHostError"; case ErrorCode::kUnresolvedHostRecovered: return "ErrorCode::kUnresolvedHostRecovered"; + case ErrorCode::kPackageExcludedFromUpdate: + return "ErrorCode::kPackageExcludedFromUpdate"; // Don't add a default case to let the compiler warn about newly added // error codes which should be added here. } diff --git a/metrics_utils.cc b/metrics_utils.cc index efbd067d..0d333ca1 100644 --- a/metrics_utils.cc +++ b/metrics_utils.cc @@ -122,6 +122,7 @@ metrics::AttemptResult GetAttemptResult(ErrorCode code) { case ErrorCode::kOmahaUpdateIgnoredOverCellular: case ErrorCode::kNoUpdate: case ErrorCode::kFirstActiveOmahaPingSentPersistenceError: + case ErrorCode::kPackageExcludedFromUpdate: return metrics::AttemptResult::kInternalError; // Special flags. These can't happen (we mask them out above) but @@ -236,6 +237,7 @@ metrics::DownloadErrorCode GetDownloadErrorCode(ErrorCode code) { case ErrorCode::kRollbackNotPossible: case ErrorCode::kFirstActiveOmahaPingSentPersistenceError: case ErrorCode::kVerityCalculationError: + case ErrorCode::kPackageExcludedFromUpdate: break; // Special flags. These can't happen (we mask them out above) but diff --git a/omaha_request_action.cc b/omaha_request_action.cc index 3a0b91c7..83ee5b22 100644 --- a/omaha_request_action.cc +++ b/omaha_request_action.cc @@ -582,6 +582,7 @@ bool ParsePackage(OmahaParserData::App* app, LOG(INFO) << "Found package " << package.name; OmahaResponse::Package out_package; + out_package.app_id = app->id; out_package.can_exclude = can_exclude; for (const string& codebase : app->url_codebase) { if (codebase.empty()) { @@ -631,6 +632,7 @@ bool ParsePackage(OmahaParserData::App* app, // Removes the candidate URLs which are excluded within packages, if all the // candidate URLs are excluded within a package, the package will be excluded. void ProcessExclusions(OmahaResponse* output_object, + OmahaRequestParams* params, ExcluderInterface* excluder) { for (auto package_it = output_object->packages.begin(); package_it != output_object->packages.end(); @@ -657,6 +659,9 @@ void ProcessExclusions(OmahaResponse* output_object, // If there are no candidate payload URLs, remove the package. if (package_it->payload_urls.empty()) { LOG(INFO) << "Excluding payload hash=" << package_it->hash; + // Need to set DLC as not updated so correct metrics can be sent when an + // update is completed. + params->SetDlcNoUpdate(package_it->app_id); package_it = output_object->packages.erase(package_it); continue; } @@ -1023,6 +1028,7 @@ void OmahaRequestAction::TransferComplete(HttpFetcher* fetcher, if (!ParseResponse(&parser_data, &output_object, &completer)) return; ProcessExclusions(&output_object, + system_state_->request_params(), system_state_->update_attempter()->GetExcluder()); output_object.update_exists = true; SetOutputObject(output_object); diff --git a/omaha_request_action_unittest.cc b/omaha_request_action_unittest.cc index 6a0c2139..e608c077 100644 --- a/omaha_request_action_unittest.cc +++ b/omaha_request_action_unittest.cc @@ -2776,8 +2776,8 @@ TEST_F(OmahaRequestActionTest, UpdateWithDlcTest) { } TEST_F(OmahaRequestActionTest, UpdateWithPartiallyExcludedDlcTest) { - request_params_.set_dlc_apps_params( - {{request_params_.GetDlcAppId(kDlcId1), {.name = kDlcId1}}}); + const string kDlcAppId = request_params_.GetDlcAppId(kDlcId1); + request_params_.set_dlc_apps_params({{kDlcAppId, {.name = kDlcId1}}}); fake_update_response_.dlc_app_update = true; tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); // The first DLC candidate URL is excluded. @@ -2790,11 +2790,12 @@ TEST_F(OmahaRequestActionTest, UpdateWithPartiallyExcludedDlcTest) { // One candidate URL. EXPECT_EQ(response.packages[1].payload_urls.size(), 1u); EXPECT_TRUE(response.update_exists); + EXPECT_TRUE(request_params_.dlc_apps_params().at(kDlcAppId).updated); } TEST_F(OmahaRequestActionTest, UpdateWithExcludedDlcTest) { - request_params_.set_dlc_apps_params( - {{request_params_.GetDlcAppId(kDlcId1), {.name = kDlcId1}}}); + const string kDlcAppId = request_params_.GetDlcAppId(kDlcId1); + request_params_.set_dlc_apps_params({{kDlcAppId, {.name = kDlcId1}}}); fake_update_response_.dlc_app_update = true; tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); // Both DLC candidate URLs are excluded. @@ -2805,6 +2806,7 @@ TEST_F(OmahaRequestActionTest, UpdateWithExcludedDlcTest) { EXPECT_EQ(response.packages.size(), 1u); EXPECT_TRUE(response.update_exists); + EXPECT_FALSE(request_params_.dlc_apps_params().at(kDlcAppId).updated); } TEST_F(OmahaRequestActionTest, UpdateWithDeprecatedDlcTest) { diff --git a/omaha_request_builder_xml.cc b/omaha_request_builder_xml.cc index 097b9f1e..2eb71bb6 100644 --- a/omaha_request_builder_xml.cc +++ b/omaha_request_builder_xml.cc @@ -184,17 +184,26 @@ string OmahaRequestBuilderXml::GetAppBody(const OmahaAppData& app_data) const { } } } else { + int event_result = event_->result; // The error code is an optional attribute so append it only if the result // is not success. string error_code; - if (event_->result != OmahaEvent::kResultSuccess) { + if (event_result != OmahaEvent::kResultSuccess) { error_code = base::StringPrintf(" errorcode=\"%d\"", static_cast(event_->error_code)); + } else if (app_data.is_dlc && !app_data.app_params.updated) { + // On a |OmahaEvent::kResultSuccess|, if the event is for an update + // completion and the App is a DLC, send error for excluded DLCs as they + // did not update. + event_result = OmahaEvent::Result::kResultError; + error_code = base::StringPrintf( + " errorcode=\"%d\"", + static_cast(ErrorCode::kPackageExcludedFromUpdate)); } app_body = base::StringPrintf( " \n", event_->type, - event_->result, + event_result, error_code.c_str()); } diff --git a/omaha_request_builder_xml_unittest.cc b/omaha_request_builder_xml_unittest.cc index 017acecf..291189d6 100644 --- a/omaha_request_builder_xml_unittest.cc +++ b/omaha_request_builder_xml_unittest.cc @@ -148,10 +148,10 @@ TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlRequestIdTest) { 0, fake_system_state_.prefs(), ""}; - const string request_xml = omaha_request.GetRequest(); + const string kRequestXml = omaha_request.GetRequest(); const string key = "requestid"; const string request_id = - FindAttributeKeyValueInXml(request_xml, key, kGuidSize); + FindAttributeKeyValueInXml(kRequestXml, key, kGuidSize); // A valid |request_id| is either a GUID version 4 or empty string. if (!request_id.empty()) EXPECT_TRUE(base::IsValidGUID(request_id)); @@ -169,10 +169,10 @@ TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlSessionIdTest) { 0, fake_system_state_.prefs(), gen_session_id}; - const string request_xml = omaha_request.GetRequest(); + const string kRequestXml = omaha_request.GetRequest(); const string key = "sessionid"; const string session_id = - FindAttributeKeyValueInXml(request_xml, key, kGuidSize); + FindAttributeKeyValueInXml(kRequestXml, key, kGuidSize); // A valid |session_id| is either a GUID version 4 or empty string. if (!session_id.empty()) { EXPECT_TRUE(base::IsValidGUID(session_id)); @@ -191,9 +191,9 @@ TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlPlatformUpdateTest) { 0, fake_system_state_.prefs(), ""}; - const string request_xml = omaha_request.GetRequest(); - EXPECT_EQ(1, CountSubstringInString(request_xml, " size_t { - return request_xml.find(" size_t { + return kRequestXml.find("")) + << kRequestXml; +} + +TEST_F(OmahaRequestBuilderXmlTest, + GetRequestXmlUpdateCompleteEventSomeDlcsExcluded) { + OmahaRequestParams omaha_request_params{&fake_system_state_}; + omaha_request_params.set_dlc_apps_params({ + {omaha_request_params.GetDlcAppId("dlc_1"), {.updated = true}}, + {omaha_request_params.GetDlcAppId("dlc_2"), {.updated = false}}, + }); + OmahaEvent event(OmahaEvent::kTypeUpdateComplete); + OmahaRequestBuilderXml omaha_request{&event, + &omaha_request_params, + false, + false, + 0, + 0, + 0, + fake_system_state_.prefs(), + ""}; + const string kRequestXml = omaha_request.GetRequest(); + EXPECT_EQ( + 2, + CountSubstringInString( + kRequestXml, "")) + << kRequestXml; + EXPECT_EQ( + 1, + CountSubstringInString( + kRequestXml, + "")) + << kRequestXml; +} + +TEST_F(OmahaRequestBuilderXmlTest, + GetRequestXmlUpdateCompleteEventAllDlcsExcluded) { + OmahaRequestParams omaha_request_params{&fake_system_state_}; + omaha_request_params.set_dlc_apps_params({ + {omaha_request_params.GetDlcAppId("dlc_1"), {.updated = false}}, + {omaha_request_params.GetDlcAppId("dlc_2"), {.updated = false}}, + }); + OmahaEvent event(OmahaEvent::kTypeUpdateComplete); + OmahaRequestBuilderXml omaha_request{&event, + &omaha_request_params, + false, + false, + 0, + 0, + 0, + fake_system_state_.prefs(), + ""}; + const string kRequestXml = omaha_request.GetRequest(); + EXPECT_EQ( + 1, + CountSubstringInString( + kRequestXml, "")) + << kRequestXml; + EXPECT_EQ( + 2, + CountSubstringInString( + kRequestXml, + "")) + << kRequestXml; } } // namespace chromeos_update_engine diff --git a/omaha_response.h b/omaha_response.h index 2b86fe70..77f90831 100644 --- a/omaha_response.h +++ b/omaha_response.h @@ -54,6 +54,8 @@ struct OmahaResponse { // True if the payload can be excluded from updating if consistently faulty. // False if the payload is critical to update. bool can_exclude = false; + // The App ID associated with the package. + std::string app_id; }; std::vector packages; diff --git a/payload_state.cc b/payload_state.cc index cf3aab91..4b4120c4 100644 --- a/payload_state.cc +++ b/payload_state.cc @@ -373,6 +373,7 @@ void PayloadState::UpdateFailed(ErrorCode error) { case ErrorCode::kInternalLibCurlError: case ErrorCode::kUnresolvedHostError: case ErrorCode::kUnresolvedHostRecovered: + case ErrorCode::kPackageExcludedFromUpdate: LOG(INFO) << "Not incrementing URL index or failure count for this error"; break; diff --git a/update_manager/chromeos_policy.cc b/update_manager/chromeos_policy.cc index dd6cc8d6..cc10b570 100644 --- a/update_manager/chromeos_policy.cc +++ b/update_manager/chromeos_policy.cc @@ -154,6 +154,7 @@ bool HandleErrorCode(ErrorCode err_code, int* url_num_error_p) { case ErrorCode::kInternalLibCurlError: case ErrorCode::kUnresolvedHostError: case ErrorCode::kUnresolvedHostRecovered: + case ErrorCode::kPackageExcludedFromUpdate: LOG(INFO) << "Not changing URL index or failure count due to error " << chromeos_update_engine::utils::ErrorCodeToString(err_code) << " (" << static_cast(err_code) << ")"; From 7cfadf747db34e5672295d45318b26201c5a24ed Mon Sep 17 00:00:00 2001 From: Matt Ziegelbaum Date: Tue, 9 Jun 2020 18:51:16 -0400 Subject: [PATCH 312/624] Only check for requisition on boards that are CfM-enabled. BUG=b:158604816 TEST=compiled for fizz and eve, checked that IUSE flag was respected Change-Id: Ifbc87e342c10c97bdb32522c5a4664b27e7a85f1 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2239007 Tested-by: Matthew Ziegelbaum Commit-Queue: Matthew Ziegelbaum Reviewed-by: Amin Hassani --- BUILD.gn | 1 + hardware_chromeos.cc | 7 ++++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/BUILD.gn b/BUILD.gn index e438af46..c37d5b9c 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -75,6 +75,7 @@ pkg_config("target_defaults") { "__CHROMEOS__", "_FILE_OFFSET_BITS=64", "_POSIX_C_SOURCE=199309L", + "USE_CFM=${use.cfm}", "USE_DBUS=${use.dbus}", "USE_FEC=0", "USE_HWID_OVERRIDE=${use.hwid_override}", diff --git a/hardware_chromeos.cc b/hardware_chromeos.cc index de1d7c01..916b2e58 100644 --- a/hardware_chromeos.cc +++ b/hardware_chromeos.cc @@ -81,7 +81,9 @@ const char* kConfigOptsIsOOBEEnabled = "is_oobe_enabled"; const char* kActivePingKey = "first_active_omaha_ping_sent"; +#if USE_CFM const char* kOemRequisitionKey = "oem_device_requisition"; +#endif // Gets a string value from the vpd for a given key using the `vpd_get_value` // shell command. Returns true on success. @@ -216,7 +218,10 @@ string HardwareChromeOS::GetECVersion() const { string HardwareChromeOS::GetDeviceRequisition() const { string requisition; - return GetVpdValue(kOemRequisitionKey, &requisition) ? requisition : ""; +#if USE_CFM + GetVpdValue(kOemRequisitionKey, &requisition); +#endif + return requisition; } int HardwareChromeOS::GetMinKernelKeyVersion() const { From 91ba9be20507fae08397a4e8349c0d997a846849 Mon Sep 17 00:00:00 2001 From: Matt Ziegelbaum Date: Wed, 10 Jun 2020 16:56:40 -0400 Subject: [PATCH 313/624] Check the enrollment in /home/chronos/Local State in addition to the VPD. This will allow us to migrate the remaining CfMs to the -cfm flavors of their board images. BUG=b:157901191 TEST=unit tests, compared behavior with fizz on a Teemo with and without requisition in VPD, with and without enrollment in Local State JSON. Cq-Depend: chromium:2239007 Change-Id: I99b05b8530265d4ef4c81472d0be6ba251f7049c Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2242361 Tested-by: Matthew Ziegelbaum Reviewed-by: Amin Hassani Commit-Queue: Matthew Ziegelbaum --- BUILD.gn | 2 + common/utils.cc | 19 ++++++++ common/utils.h | 4 ++ hardware_chromeos.cc | 37 +++----------- requisition_util.cc | 69 ++++++++++++++++++++++++++ requisition_util.h | 32 ++++++++++++ requisition_util_unittest.cc | 94 ++++++++++++++++++++++++++++++++++++ 7 files changed, 228 insertions(+), 29 deletions(-) create mode 100644 requisition_util.cc create mode 100644 requisition_util.h create mode 100644 requisition_util_unittest.cc diff --git a/BUILD.gn b/BUILD.gn index c37d5b9c..5d2e498b 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -212,6 +212,7 @@ static_library("libupdate_engine") { "payload_state.cc", "power_manager_chromeos.cc", "real_system_state.cc", + "requisition_util.cc", "shill_proxy.cc", "update_attempter.cc", "update_boot_flags_action.cc", @@ -509,6 +510,7 @@ if (use.test) { "payload_generator/squashfs_filesystem_unittest.cc", "payload_generator/zip_unittest.cc", "payload_state_unittest.cc", + "requisition_util_unittest.cc", "testrunner.cc", "update_attempter_unittest.cc", "update_boot_flags_action_unittest.cc", diff --git a/common/utils.cc b/common/utils.cc index 644493d5..50b45fa0 100644 --- a/common/utils.cc +++ b/common/utils.cc @@ -910,6 +910,25 @@ bool ReadExtents(const string& path, return true; } +bool GetVpdValue(string key, string* result) { + int exit_code = 0; + string value, error; + vector cmd = {"vpd_get_value", key}; + if (!chromeos_update_engine::Subprocess::SynchronousExec( + cmd, &exit_code, &value, &error) || + exit_code) { + LOG(ERROR) << "Failed to get vpd key for " << value + << " with exit code: " << exit_code << " and error: " << error; + return false; + } else if (!error.empty()) { + LOG(INFO) << "vpd_get_value succeeded but with following errors: " << error; + } + + base::TrimWhitespaceASCII(value, base::TRIM_ALL, &value); + *result = value; + return true; +} + bool GetBootId(string* boot_id) { TEST_AND_RETURN_FALSE( base::ReadFileToString(base::FilePath(kBootIdPath), boot_id)); diff --git a/common/utils.h b/common/utils.h index ee2dce08..b6880ed0 100644 --- a/common/utils.h +++ b/common/utils.h @@ -291,6 +291,10 @@ bool ReadExtents(const std::string& path, // reboot. Returns whether it succeeded getting the boot_id. bool GetBootId(std::string* boot_id); +// Gets a string value from the vpd for a given key using the `vpd_get_value` +// shell command. Returns true on success. +bool GetVpdValue(std::string key, std::string* result); + // Divide |x| by |y| and round up to the nearest integer. constexpr uint64_t DivRoundUp(uint64_t x, uint64_t y) { return (x + y - 1) / y; diff --git a/hardware_chromeos.cc b/hardware_chromeos.cc index 916b2e58..5c326489 100644 --- a/hardware_chromeos.cc +++ b/hardware_chromeos.cc @@ -38,6 +38,9 @@ extern "C" { #include "update_engine/common/subprocess.h" #include "update_engine/common/utils.h" #include "update_engine/dbus_connection.h" +#if USE_CFM +#include "update_engine/requisition_util.h" +#endif using std::string; using std::vector; @@ -81,31 +84,6 @@ const char* kConfigOptsIsOOBEEnabled = "is_oobe_enabled"; const char* kActivePingKey = "first_active_omaha_ping_sent"; -#if USE_CFM -const char* kOemRequisitionKey = "oem_device_requisition"; -#endif - -// Gets a string value from the vpd for a given key using the `vpd_get_value` -// shell command. Returns true on success. -int GetVpdValue(string key, string* result) { - int exit_code = 0; - string value, error; - vector cmd = {"vpd_get_value", key}; - if (!chromeos_update_engine::Subprocess::SynchronousExec( - cmd, &exit_code, &value, &error) || - exit_code) { - LOG(ERROR) << "Failed to get vpd key for " << value - << " with exit code: " << exit_code << " and error: " << error; - return false; - } else if (!error.empty()) { - LOG(INFO) << "vpd_get_value succeeded but with following errors: " << error; - } - - base::TrimWhitespaceASCII(value, base::TRIM_ALL, &value); - *result = value; - return true; -} - } // namespace namespace chromeos_update_engine { @@ -217,11 +195,12 @@ string HardwareChromeOS::GetECVersion() const { } string HardwareChromeOS::GetDeviceRequisition() const { - string requisition; #if USE_CFM - GetVpdValue(kOemRequisitionKey, &requisition); + const char* kLocalStatePath = "/home/chronos/Local State"; + return ReadDeviceRequisition(base::FilePath(kLocalStatePath)); +#else + return ""; #endif - return requisition; } int HardwareChromeOS::GetMinKernelKeyVersion() const { @@ -346,7 +325,7 @@ void HardwareChromeOS::LoadConfig(const string& root_prefix, bool normal_mode) { bool HardwareChromeOS::GetFirstActiveOmahaPingSent() const { string active_ping_str; - if (!GetVpdValue(kActivePingKey, &active_ping_str)) { + if (!utils::GetVpdValue(kActivePingKey, &active_ping_str)) { return false; } diff --git a/requisition_util.cc b/requisition_util.cc new file mode 100644 index 00000000..5445bceb --- /dev/null +++ b/requisition_util.cc @@ -0,0 +1,69 @@ +// +// Copyright (C) 2020 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include "update_engine/requisition_util.h" + +#include +#include + +#include +#include +#include +#include + +#include "update_engine/common/subprocess.h" +#include "update_engine/common/utils.h" + +using std::string; +using std::vector; + +namespace { + +constexpr char kOemRequisitionKey[] = "oem_device_requisition"; + +} // namespace + +namespace chromeos_update_engine { + +string ReadDeviceRequisition(const base::FilePath& local_state) { + string requisition; + bool vpd_retval = utils::GetVpdValue(kOemRequisitionKey, &requisition); + + // Some users manually convert non-CfM hardware at enrollment time, so VPD + // value may be missing. So check the Local State JSON as well. + if ((requisition.empty() || !vpd_retval) && base::PathExists(local_state)) { + int error_code; + std::string error_msg; + JSONFileValueDeserializer deserializer(local_state); + std::unique_ptr root = + deserializer.Deserialize(&error_code, &error_msg); + if (!root) { + if (error_code != 0) { + LOG(ERROR) << "Unable to deserialize Local State with exit code: " + << error_code << " and error: " << error_msg; + } + return ""; + } + auto* path = root->FindPath({"enrollment", "device_requisition"}); + if (!path || !path->is_string()) { + return ""; + } + path->GetAsString(&requisition); + } + return requisition; +} + +} // namespace chromeos_update_engine diff --git a/requisition_util.h b/requisition_util.h new file mode 100644 index 00000000..8577ee7d --- /dev/null +++ b/requisition_util.h @@ -0,0 +1,32 @@ +// +// Copyright (C) 2020 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef UPDATE_ENGINE_REQUISITION_UTIL_H_ +#define UPDATE_ENGINE_REQUISITION_UTIL_H_ + +#include + +#include + +namespace chromeos_update_engine { + +// Checks the VPD and Local State for the device's requisition and returns it, +// or an empty string if the device has no requisition. +std::string ReadDeviceRequisition(const base::FilePath& local_state); + +} // namespace chromeos_update_engine + +#endif // UPDATE_ENGINE_REQUISITION_UTIL_H_ diff --git a/requisition_util_unittest.cc b/requisition_util_unittest.cc new file mode 100644 index 00000000..c21c9c74 --- /dev/null +++ b/requisition_util_unittest.cc @@ -0,0 +1,94 @@ +// +// Copyright (C) 2020 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include "update_engine/requisition_util.h" + +#include + +#include +#include +#include +#include + +#include "update_engine/common/test_utils.h" + +using chromeos_update_engine::test_utils::WriteFileString; +using std::string; + +namespace { + +const char kRemoraJSON[] = + "{\n" + " \"the_list\": [ \"val1\", \"val2\" ],\n" + " \"enrollment\": {\n" + " \"autostart\": true,\n" + " \"can_exit\": false,\n" + " \"device_requisition\": \"remora\"\n" + " },\n" + " \"some_String\": \"1337\",\n" + " \"some_int\": 42\n" + "}\n"; + +const char kNoEnrollmentJSON[] = + "{\n" + " \"the_list\": [ \"val1\", \"val2\" ],\n" + " \"enrollment\": {\n" + " \"autostart\": true,\n" + " \"can_exit\": false,\n" + " \"device_requisition\": \"\"\n" + " },\n" + " \"some_String\": \"1337\",\n" + " \"some_int\": 42\n" + "}\n"; +} // namespace + +namespace chromeos_update_engine { + +class RequisitionUtilTest : public ::testing::Test { + protected: + void SetUp() override { ASSERT_TRUE(root_dir_.CreateUniqueTempDir()); } + + void WriteJsonToFile(const string& json) { + path_ = + base::FilePath(root_dir_.GetPath().value() + "/chronos/Local State"); + ASSERT_TRUE(base::CreateDirectory(path_.DirName())); + ASSERT_TRUE(WriteFileString(path_.value(), json)); + } + + base::ScopedTempDir root_dir_; + base::FilePath path_; +}; + +TEST_F(RequisitionUtilTest, BadJsonReturnsEmpty) { + WriteJsonToFile("this isn't JSON"); + EXPECT_EQ("", ReadDeviceRequisition(path_)); +} + +TEST_F(RequisitionUtilTest, NoFileReturnsEmpty) { + EXPECT_EQ("", ReadDeviceRequisition(path_)); +} + +TEST_F(RequisitionUtilTest, EnrollmentRequisition) { + WriteJsonToFile(kRemoraJSON); + EXPECT_EQ("remora", ReadDeviceRequisition(path_)); +} + +TEST_F(RequisitionUtilTest, BlankEnrollment) { + WriteJsonToFile(kNoEnrollmentJSON); + EXPECT_EQ("", ReadDeviceRequisition(path_)); +} + +} // namespace chromeos_update_engine From 70eef23d3a4fa570220d3f18592a2711b5fc1f68 Mon Sep 17 00:00:00 2001 From: Kelvin Zhang Date: Fri, 12 Jun 2020 20:32:40 +0000 Subject: [PATCH 314/624] Add progress updates to FilesystemVerificationAction An attempt to fix b/142525610. This CL adds progress reports to FilesystemVerificationAction. So that clients(e.x. GMSCore) could properly display a progress bar instead of hanging there for a few minutes. GMSCore support is already done, see https://critique-ng.corp.google.com/cl/317362455 Test: Did an OTA update and observed that verification progress is ported from android OTA client Bug: 142525610 Change-Id: I1cdb1970cb65fc767b49cd38650894eed4d90960 --- .../filesystem_verifier_action.cc | 15 +++++++++++++- payload_consumer/filesystem_verifier_action.h | 20 +++++++++++++++++++ update_attempter_android.cc | 15 ++++++++++---- update_attempter_android.h | 5 +++++ 4 files changed, 50 insertions(+), 5 deletions(-) diff --git a/payload_consumer/filesystem_verifier_action.cc b/payload_consumer/filesystem_verifier_action.cc index 36e5a35b..b6affc3c 100644 --- a/payload_consumer/filesystem_verifier_action.cc +++ b/payload_consumer/filesystem_verifier_action.cc @@ -76,9 +76,16 @@ void FilesystemVerifierAction::Cleanup(ErrorCode code) { return; if (code == ErrorCode::kSuccess && HasOutputPipe()) SetOutputObject(install_plan_); + UpdateProgress(1.0); processor_->ActionComplete(this, code); } +void FilesystemVerifierAction::UpdateProgress(double progress) { + if (delegate_ != nullptr) { + delegate_->OnVerifyProgressUpdate(progress); + } +} + void FilesystemVerifierAction::StartPartitionHashing() { if (partition_index_ == install_plan_.partitions.size()) { Cleanup(ErrorCode::kSuccess); @@ -187,7 +194,6 @@ void FilesystemVerifierAction::OnReadDoneCallback(size_t bytes_read) { Cleanup(ErrorCode::kError); return; } - if (bytes_read == 0) { LOG(ERROR) << "Failed to read the remaining " << partition_size_ - offset_ << " bytes from partition " @@ -202,6 +208,13 @@ void FilesystemVerifierAction::OnReadDoneCallback(size_t bytes_read) { return; } + // WE don't consider sizes of each partition. Every partition + // has the same length on progress bar. + // TODO(zhangkelvin) Take sizes of each partition into account + + UpdateProgress( + (static_cast(offset_) / partition_size_ + partition_index_) / + install_plan_.partitions.size()); if (verifier_step_ == VerifierStep::kVerifyTargetHash && install_plan_.write_verity) { if (!verity_writer_->Update(offset_, buffer_.data(), bytes_read)) { diff --git a/payload_consumer/filesystem_verifier_action.h b/payload_consumer/filesystem_verifier_action.h index 83d66687..6ef3d164 100644 --- a/payload_consumer/filesystem_verifier_action.h +++ b/payload_consumer/filesystem_verifier_action.h @@ -49,6 +49,12 @@ enum class VerifierStep { kVerifySourceHash, }; +class FilesystemVerifyDelegate { + public: + virtual ~FilesystemVerifyDelegate() = default; + virtual void OnVerifyProgressUpdate(double progress) = 0; +}; + class FilesystemVerifierAction : public InstallPlanAction { public: FilesystemVerifierAction() @@ -58,6 +64,14 @@ class FilesystemVerifierAction : public InstallPlanAction { void PerformAction() override; void TerminateProcessing() override; + // Used for listening to progress updates + void set_delegate(FilesystemVerifyDelegate* delegate) { + this->delegate_ = delegate; + } + [[nodiscard]] FilesystemVerifyDelegate* get_delegate() const { + return this->delegate_; + } + // Debugging/logging static std::string StaticType() { return "FilesystemVerifierAction"; } std::string Type() const override { return StaticType(); } @@ -85,6 +99,9 @@ class FilesystemVerifierAction : public InstallPlanAction { // true if TerminateProcessing() was called. void Cleanup(ErrorCode code); + // Invoke delegate callback to report progress, if delegate is not null + void UpdateProgress(double progress); + // The type of the partition that we are verifying. VerifierStep verifier_step_ = VerifierStep::kVerifyTargetHash; @@ -119,6 +136,9 @@ class FilesystemVerifierAction : public InstallPlanAction { // The byte offset that we are reading in the current partition. uint64_t offset_{0}; + // An observer that observes progress updates of this action. + FilesystemVerifyDelegate* delegate_{}; + DISALLOW_COPY_AND_ASSIGN(FilesystemVerifierAction); }; diff --git a/update_attempter_android.cc b/update_attempter_android.cc index b7d119f2..4d743791 100644 --- a/update_attempter_android.cc +++ b/update_attempter_android.cc @@ -576,9 +576,9 @@ void UpdateAttempterAndroid::ActionCompleted(ActionProcessor* processor, cleanup_previous_update_code_ = code; NotifyCleanupPreviousUpdateCallbacksAndClear(); } - if (type == DownloadAction::StaticType()) { - download_progress_ = 0; - } + // download_progress_ is actually used by other actions, such as + // filesystem_verify_action. Therefore we always clear it. + download_progress_ = 0; if (type == PostinstallRunnerAction::StaticType()) { bool succeeded = code == ErrorCode::kSuccess || code == ErrorCode::kUpdatedButNotActive; @@ -592,8 +592,9 @@ void UpdateAttempterAndroid::ActionCompleted(ActionProcessor* processor, SetStatusAndNotify(UpdateStatus::CLEANUP_PREVIOUS_UPDATE); } if (type == DownloadAction::StaticType()) { - SetStatusAndNotify(UpdateStatus::FINALIZING); + SetStatusAndNotify(UpdateStatus::VERIFYING); } else if (type == FilesystemVerifierAction::StaticType()) { + SetStatusAndNotify(UpdateStatus::FINALIZING); prefs_->SetBoolean(kPrefsVerityWritten, true); } } @@ -644,6 +645,11 @@ void UpdateAttempterAndroid::ProgressUpdate(double progress) { } } +void UpdateAttempterAndroid::OnVerifyProgressUpdate(double progress) { + assert(status_ == UpdateStatus::VERIFYING); + ProgressUpdate(progress); +} + void UpdateAttempterAndroid::ScheduleProcessingStart() { LOG(INFO) << "Scheduling an action processor start."; brillo::MessageLoop::current()->PostTask( @@ -734,6 +740,7 @@ void UpdateAttempterAndroid::BuildUpdateActions(HttpFetcher* fetcher) { std::make_unique(); auto postinstall_runner_action = std::make_unique(boot_control_, hardware_); + filesystem_verifier_action->set_delegate(this); postinstall_runner_action->set_delegate(this); // Bond them together. We have to use the leaf-types when calling diff --git a/update_attempter_android.h b/update_attempter_android.h index f8c78de1..55003a09 100644 --- a/update_attempter_android.h +++ b/update_attempter_android.h @@ -37,6 +37,7 @@ #include "update_engine/metrics_utils.h" #include "update_engine/network_selector_interface.h" #include "update_engine/payload_consumer/download_action.h" +#include "update_engine/payload_consumer/filesystem_verifier_action.h" #include "update_engine/payload_consumer/postinstall_runner_action.h" #include "update_engine/service_delegate_android_interface.h" #include "update_engine/service_observer_interface.h" @@ -47,6 +48,7 @@ class UpdateAttempterAndroid : public ServiceDelegateAndroidInterface, public ActionProcessorDelegate, public DownloadActionDelegate, + public FilesystemVerifyDelegate, public PostinstallRunnerAction::DelegateInterface, public CleanupPreviousUpdateActionDelegateInterface { public: @@ -101,6 +103,9 @@ class UpdateAttempterAndroid bool ShouldCancel(ErrorCode* cancel_reason) override; void DownloadComplete() override; + // FilesystemVerifyDelegate overrides + void OnVerifyProgressUpdate(double progress) override; + // PostinstallRunnerAction::DelegateInterface void ProgressUpdate(double progress) override; From 45061b6ddc19b24cf6a934e82391ea4915f143de Mon Sep 17 00:00:00 2001 From: Qijiang Fan Date: Wed, 24 Jun 2020 15:47:47 +0900 Subject: [PATCH 315/624] update_engine: update for libchrome uprev 1. log_file will be renamed to log_file_path to take string paramemters. log_file will take FILE* instead. 2. message_loop_current.h is no longer implicitly included, due to deprecation of base::MessageLoop. BUG=chromium:1094927 TEST=unittest Change-Id: Ie18b09343200815f8e64631dd30f7a49c4a7b569 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2262083 Reviewed-by: Jae Hoon Kim Tested-by: Qijiang Fan Commit-Queue: Qijiang Fan --- client_library/client_dbus.cc | 1 + main.cc | 4 ++++ payload_generator/generate_delta_main.cc | 4 ++++ 3 files changed, 9 insertions(+) diff --git a/client_library/client_dbus.cc b/client_library/client_dbus.cc index 8e9a7fd1..caf7befe 100644 --- a/client_library/client_dbus.cc +++ b/client_library/client_dbus.cc @@ -17,6 +17,7 @@ #include "update_engine/client_library/client_dbus.h" #include +#include #include diff --git a/main.cc b/main.cc index b4354673..1d161e3b 100644 --- a/main.cc +++ b/main.cc @@ -144,7 +144,11 @@ void SetupLogging(bool log_to_system, bool log_to_file) { if (log_to_file) { log_file = SetupLogFile(kSystemLogsRoot); log_settings.delete_old = logging::APPEND_TO_OLD_LOG_FILE; +#if BASE_VER < 780000 log_settings.log_file = log_file.c_str(); +#else + log_settings.log_file_path = log_file.c_str(); +#endif } logging::InitLogging(log_settings); diff --git a/payload_generator/generate_delta_main.cc b/payload_generator/generate_delta_main.cc index 69ac8bbc..fe0a10be 100644 --- a/payload_generator/generate_delta_main.cc +++ b/payload_generator/generate_delta_main.cc @@ -450,7 +450,11 @@ int Main(int argc, char** argv) { Terminator::Init(); logging::LoggingSettings log_settings; +#if BASE_VER < 780000 log_settings.log_file = "delta_generator.log"; +#else + log_settings.log_file_path = "delta_generator.log"; +#endif log_settings.logging_dest = logging::LOG_TO_SYSTEM_DEBUG_LOG; log_settings.lock_log = logging::LOCK_LOG_FILE; log_settings.delete_old = logging::APPEND_TO_OLD_LOG_FILE; From f4d1196edca97a666e20d16a334aa67f47bbcf0a Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Wed, 24 Jun 2020 12:14:10 -0700 Subject: [PATCH 316/624] update_engine: Remove case conditions for deprecated operations MOVE and BSDIFF were related to minor version 1 and major version 1 which both are deprecated so we should remove these so the builders don't complain (with warning) about these values. BUG=None TEST=unittests pass Change-Id: I7ccc2c18d2dfc8e80b7c5d560988762a4c4cbdc3 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2265160 Tested-by: Amin Hassani Reviewed-by: Jae Hoon Kim Commit-Queue: Amin Hassani --- payload_consumer/payload_constants.cc | 4 ---- payload_generator/payload_generation_config.cc | 4 ---- 2 files changed, 8 deletions(-) diff --git a/payload_consumer/payload_constants.cc b/payload_consumer/payload_constants.cc index 908a8933..299bcfc1 100644 --- a/payload_consumer/payload_constants.cc +++ b/payload_consumer/payload_constants.cc @@ -64,10 +64,6 @@ const char* InstallOperationTypeName(InstallOperation_Type op_type) { return "PUFFDIFF"; case InstallOperation::BROTLI_BSDIFF: return "BROTLI_BSDIFF"; - - case InstallOperation::BSDIFF: - case InstallOperation::MOVE: - NOTREACHED(); } return ""; } diff --git a/payload_generator/payload_generation_config.cc b/payload_generator/payload_generation_config.cc index e1f700a2..71587963 100644 --- a/payload_generator/payload_generation_config.cc +++ b/payload_generator/payload_generation_config.cc @@ -256,10 +256,6 @@ bool PayloadVersion::OperationAllowed(InstallOperation_Type operation) const { case InstallOperation::PUFFDIFF: return minor >= kPuffdiffMinorPayloadVersion; - - case InstallOperation::MOVE: - case InstallOperation::BSDIFF: - NOTREACHED(); } return false; } From 76583910d6685e6e75c6edaa3f17795e5d85405f Mon Sep 17 00:00:00 2001 From: Jae Hoon Kim Date: Tue, 23 Jun 2020 10:24:03 -0700 Subject: [PATCH 317/624] update_engine: Skip post DownloadAction exclusions Post |DownloadAction| don't have direct reference to |Payload|s held within the |PayloadState|. Hence it's required to halt exclusions for |Action|s post |DownloadAction|. This is done by setting the |payload_index_| within |PayloadState| >= to the |candidate_urls_|/|response_.packages| size. DCHECKs added where |payload_index_| is used as usage may cause out of bounds indexing. This change removes the dangling reference to the last |Payload|, as previously |NextPayload()| kept |payload_index_| pointing to the last |Payload| within |PayloadState|. BUG=chromium:928805 TEST=FEATURES=test emerge-$B update_engine Change-Id: I3f6a9a3cc26bb84f94506e45e1d6e906624e5dd7 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2261292 Tested-by: Jae Hoon Kim Reviewed-by: Amin Hassani Commit-Queue: Amin Hassani Auto-Submit: Jae Hoon Kim --- payload_state.cc | 12 +++++++++-- payload_state.h | 2 ++ payload_state_unittest.cc | 45 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 57 insertions(+), 2 deletions(-) diff --git a/payload_state.cc b/payload_state.cc index 4b4120c4..d626056c 100644 --- a/payload_state.cc +++ b/payload_state.cc @@ -462,6 +462,7 @@ void PayloadState::IncrementPayloadAttemptNumber() { } void PayloadState::IncrementFullPayloadAttemptNumber() { + DCHECK(payload_index_ < response_.packages.size()); // Update the payload attempt number for full payloads and the backoff time. if (response_.packages[payload_index_].is_delta) { LOG(INFO) << "Not incrementing payload attempt number for delta payloads"; @@ -474,6 +475,7 @@ void PayloadState::IncrementFullPayloadAttemptNumber() { } void PayloadState::IncrementUrlIndex() { + DCHECK(payload_index_ < candidate_urls_.size()); size_t next_url_index = url_index_ + 1; size_t max_url_size = candidate_urls_[payload_index_].size(); if (next_url_index < max_url_size) { @@ -510,6 +512,10 @@ void PayloadState::IncrementFailureCount() { } void PayloadState::ExcludeCurrentPayload() { + if (payload_index_ >= response_.packages.size()) { + LOG(INFO) << "Skipping exclusion of the current payload."; + return; + } const auto& package = response_.packages[payload_index_]; if (!package.can_exclude) { LOG(INFO) << "Not excluding as marked non-excludable for package hash=" @@ -923,10 +929,12 @@ void PayloadState::SetPayloadIndex(size_t payload_index) { } bool PayloadState::NextPayload() { - if (payload_index_ + 1 >= candidate_urls_.size()) + if (payload_index_ >= candidate_urls_.size()) return false; - SetUrlIndex(0); SetPayloadIndex(payload_index_ + 1); + if (payload_index_ >= candidate_urls_.size()) + return false; + SetUrlIndex(0); return true; } diff --git a/payload_state.h b/payload_state.h index d13c6420..5713a54c 100644 --- a/payload_state.h +++ b/payload_state.h @@ -161,6 +161,8 @@ class PayloadState : public PayloadStateInterface { FRIEND_TEST(PayloadStateTest, ExcludeNoopForNonExcludables); FRIEND_TEST(PayloadStateTest, ExcludeOnlyCanExcludables); FRIEND_TEST(PayloadStateTest, IncrementFailureExclusionTest); + FRIEND_TEST(PayloadStateTest, HaltExclusionPostPayloadExhaustion); + FRIEND_TEST(PayloadStateTest, NonInfinitePayloadIndexIncrement); // Helper called when an attempt has begun, is called by // UpdateResumed(), UpdateRestarted() and Rollback(). diff --git a/payload_state_unittest.cc b/payload_state_unittest.cc index bf9aed44..0bf52d90 100644 --- a/payload_state_unittest.cc +++ b/payload_state_unittest.cc @@ -1778,4 +1778,49 @@ TEST(PayloadStateTest, IncrementFailureExclusionTest) { payload_state.IncrementFailureCount(); } +TEST(PayloadStateTest, HaltExclusionPostPayloadExhaustion) { + PayloadState payload_state; + FakeSystemState fake_system_state; + StrictMock mock_excluder; + EXPECT_CALL(*fake_system_state.mock_update_attempter(), GetExcluder()) + .WillOnce(Return(&mock_excluder)); + EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); + + OmahaResponse response; + // Non-critical package. + response.packages.push_back( + {.payload_urls = {"http://test1a", "http://test2a"}, + .size = 123456789, + .metadata_size = 58123, + .metadata_signature = "msign", + .hash = "hash", + .can_exclude = true}); + payload_state.SetResponse(response); + + // Exclusion should be called when excluded. + EXPECT_CALL(mock_excluder, Exclude(utils::GetExclusionName("http://test1a"))) + .WillOnce(Return(true)); + payload_state.ExcludeCurrentPayload(); + + // No more paylods to go through. + EXPECT_FALSE(payload_state.NextPayload()); + + // Exclusion should not be called as all |Payload|s are exhausted. + payload_state.ExcludeCurrentPayload(); +} + +TEST(PayloadStateTest, NonInfinitePayloadIndexIncrement) { + PayloadState payload_state; + FakeSystemState fake_system_state; + EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); + + payload_state.SetResponse({}); + + EXPECT_FALSE(payload_state.NextPayload()); + int payload_index = payload_state.payload_index_; + + EXPECT_FALSE(payload_state.NextPayload()); + EXPECT_EQ(payload_index, payload_state.payload_index_); +} + } // namespace chromeos_update_engine From cc6ab9f076694a816fa35f133e98e7737542ddd8 Mon Sep 17 00:00:00 2001 From: Andrew Date: Thu, 25 Jun 2020 07:41:40 -0700 Subject: [PATCH 318/624] update_engine: Fix non inclusive vocabulary BUG=None TEST=CQ Passes Change-Id: Ic321806ab6029c88723c220f243e2c2c7a9e94f0 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2267058 Tested-by: Andrew Lassalle Auto-Submit: Andrew Lassalle Reviewed-by: Amin Hassani Commit-Queue: Amin Hassani --- common/test_utils.h | 2 +- hardware_chromeos.cc | 2 +- omaha_request_params.h | 2 +- payload_generator/delta_diff_utils.cc | 6 +++--- payload_state.cc | 2 +- payload_state.h | 8 ++++---- payload_state_unittest.cc | 2 +- pylintrc | 2 +- update_manager/chromeos_policy.cc | 2 +- 9 files changed, 14 insertions(+), 14 deletions(-) diff --git a/common/test_utils.h b/common/test_utils.h index 44b7aa14..63ea7492 100644 --- a/common/test_utils.h +++ b/common/test_utils.h @@ -78,7 +78,7 @@ std::string Readlink(const std::string& path); void FillWithData(brillo::Blob* buffer); -// Compare the value of native array for download source parameter. +// Compare the value of builtin array for download source parameter. MATCHER_P(DownloadSourceMatcher, source_array, "") { return std::equal(source_array, source_array + kNumDownloadSources, arg); } diff --git a/hardware_chromeos.cc b/hardware_chromeos.cc index 5c326489..2aae9f05 100644 --- a/hardware_chromeos.cc +++ b/hardware_chromeos.cc @@ -50,7 +50,7 @@ namespace { const char kOOBECompletedMarker[] = "/home/chronos/.oobe_completed"; // The stateful directory used by update_engine to store powerwash-safe files. -// The files stored here must be whitelisted in the powerwash scripts. +// The files stored here must be added to the powerwash script allowlist. const char kPowerwashSafeDirectory[] = "/mnt/stateful_partition/unencrypted/preserve"; diff --git a/omaha_request_params.h b/omaha_request_params.h index 34529658..76fc8060 100644 --- a/omaha_request_params.h +++ b/omaha_request_params.h @@ -328,7 +328,7 @@ class OmahaRequestParams { bool ToMoreStableChannel() const; // Returns True if we should store the fw/ec versions based on our hwid_. - // Compares hwid to a set of whitelisted prefixes. + // Compares hwid to a set of prefixes in the allowlist. bool CollectECFWVersions() const; // Gets the machine type (e.g. "i686"). diff --git a/payload_generator/delta_diff_utils.cc b/payload_generator/delta_diff_utils.cc index 53a3cf17..ded30fb6 100644 --- a/payload_generator/delta_diff_utils.cc +++ b/payload_generator/delta_diff_utils.cc @@ -708,15 +708,15 @@ bool ReadExtentsToDiff(const string& old_part, version.OperationAllowed(InstallOperation::SOURCE_BSDIFF); if (bsdiff_allowed && blocks_to_read * kBlockSize > kMaxBsdiffDestinationSize) { - LOG(INFO) << "bsdiff blacklisted, data too big: " - << blocks_to_read * kBlockSize << " bytes"; + LOG(INFO) << "bsdiff ignored, data too big: " << blocks_to_read * kBlockSize + << " bytes"; bsdiff_allowed = false; } bool puffdiff_allowed = version.OperationAllowed(InstallOperation::PUFFDIFF); if (puffdiff_allowed && blocks_to_read * kBlockSize > kMaxPuffdiffDestinationSize) { - LOG(INFO) << "puffdiff blacklisted, data too big: " + LOG(INFO) << "puffdiff ignored, data too big: " << blocks_to_read * kBlockSize << " bytes"; puffdiff_allowed = false; } diff --git a/payload_state.cc b/payload_state.cc index d626056c..ce3e1d54 100644 --- a/payload_state.cc +++ b/payload_state.cc @@ -1161,7 +1161,7 @@ void PayloadState::LoadRollbackVersion() { void PayloadState::SetRollbackVersion(const string& rollback_version) { CHECK(powerwash_safe_prefs_); - LOG(INFO) << "Blacklisting version " << rollback_version; + LOG(INFO) << "Excluding version " << rollback_version; rollback_version_ = rollback_version; powerwash_safe_prefs_->SetString(kPrefsRollbackVersion, rollback_version); } diff --git a/payload_state.h b/payload_state.h index 5713a54c..77197a7a 100644 --- a/payload_state.h +++ b/payload_state.h @@ -368,14 +368,14 @@ class PayloadState : public PayloadStateInterface { // check where policy was available. This info is preserved over powerwash. void LoadRollbackHappened(); - // Loads the blacklisted version from our prefs file. + // Loads the excluded version from our prefs file. void LoadRollbackVersion(); - // Blacklists this version from getting AU'd to until we receive a new update + // Excludes this version from getting AU'd to until we receive a new update // response. void SetRollbackVersion(const std::string& rollback_version); - // Clears any blacklisted version. + // Clears any excluded version. void ResetRollbackVersion(); inline uint32_t GetUrlIndex() { @@ -567,7 +567,7 @@ class PayloadState : public PayloadStateInterface { // forced updates to avoid update-rollback loops. bool rollback_happened_; - // This stores a blacklisted version set as part of rollback. When we rollback + // This stores an excluded version set as part of rollback. When we rollback // we store the version of the os from which we are rolling back from in order // to guarantee that we do not re-update to it on the next au attempt after // reboot. diff --git a/payload_state_unittest.cc b/payload_state_unittest.cc index 0bf52d90..8667548f 100644 --- a/payload_state_unittest.cc +++ b/payload_state_unittest.cc @@ -1016,7 +1016,7 @@ TEST(PayloadStateTest, RollbackVersion) { NiceMock* mock_powerwash_safe_prefs = fake_system_state.mock_powerwash_safe_prefs(); - // Mock out the os version and make sure it's blacklisted correctly. + // Mock out the os version and make sure it's excluded correctly. string rollback_version = "2345.0.0"; OmahaRequestParams params(&fake_system_state); params.Init(rollback_version, "", false); diff --git a/pylintrc b/pylintrc index 33adec24..a4338680 100644 --- a/pylintrc +++ b/pylintrc @@ -24,7 +24,7 @@ # Profiled execution. profile=no -# Add files or directories to the blacklist. They should be base names, not +# Add files or directories to the ignorelist. They should be base names, not # paths. ignore=CVS,.svn,.git,update_metadata_pb2.py diff --git a/update_manager/chromeos_policy.cc b/update_manager/chromeos_policy.cc index cc10b570..c310e421 100644 --- a/update_manager/chromeos_policy.cc +++ b/update_manager/chromeos_policy.cc @@ -465,7 +465,7 @@ EvalStatus ChromeOSPolicy::UpdateCanStart( // ConnectionManager::IsUpdateAllowedOver(); be sure to deprecate the latter. // // TODO(garnold) The current logic generally treats the list of allowed -// connections coming from the device policy as a whitelist, meaning that it +// connections coming from the device policy as an allowlist, meaning that it // can only be used for enabling connections, but not disable them. Further, // certain connection types cannot be enabled even by policy. // In effect, the only thing that device policy can change is to enable From 33d180914ef2cb9286595fb1fdcc3511c5fb1c34 Mon Sep 17 00:00:00 2001 From: Kelvin Zhang Date: Mon, 29 Jun 2020 16:33:10 -0400 Subject: [PATCH 319/624] Fall back to memory pref if pref dir doesn't exist Test: Run existing unit tests Bug: 153004173 Change-Id: Iba5c2d3e7988e78050f05ad00dddf2ebdc03fa22 --- daemon_state_android.cc | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/daemon_state_android.cc b/daemon_state_android.cc index c9c09b88..3376e64a 100644 --- a/daemon_state_android.cc +++ b/daemon_state_android.cc @@ -45,17 +45,17 @@ bool DaemonStateAndroid::Initialize() { // Initialize prefs. base::FilePath non_volatile_path; - // TODO(deymo): Fall back to in-memory prefs if there's no physical directory - // available. if (!hardware_->GetNonVolatileDirectory(&non_volatile_path)) { - LOG(ERROR) << "Failed to get a non-volatile directory."; - return false; - } - Prefs* prefs = new Prefs(); - prefs_.reset(prefs); - if (!prefs->Init(non_volatile_path.Append(kPrefsSubDirectory))) { - LOG(ERROR) << "Failed to initialize preferences."; - return false; + prefs_.reset(new MemoryPrefs()); + LOG(WARNING) + << "Could not get a non-volatile directory, fall back to memory prefs"; + } else { + Prefs* prefs = new Prefs(); + prefs_.reset(prefs); + if (!prefs->Init(non_volatile_path.Append(kPrefsSubDirectory))) { + LOG(ERROR) << "Failed to initialize preferences."; + return false; + } } // The CertificateChecker singleton is used by the update attempter. From d282252007f3ccdf862a743baabea0bb774149fa Mon Sep 17 00:00:00 2001 From: Kelvin Zhang Date: Tue, 7 Jul 2020 17:20:58 -0400 Subject: [PATCH 320/624] Correctly check if a directory exists Test: Run unit test cases Change-Id: I61a50450afd638da646e16e07cfdf800d4975d13 --- hardware_android.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hardware_android.cc b/hardware_android.cc index ac6cf16c..0bf05e46 100644 --- a/hardware_android.cc +++ b/hardware_android.cc @@ -181,7 +181,7 @@ bool HardwareAndroid::CancelPowerwash() { bool HardwareAndroid::GetNonVolatileDirectory(base::FilePath* path) const { base::FilePath local_path(constants::kNonVolatileDirectory); - if (!base::PathExists(local_path)) { + if (!base::DirectoryExists(local_path)) { LOG(ERROR) << "Non-volatile directory not found: " << local_path.value(); return false; } From d2da7b1990e0fee1c99bf64aa562cef572aaa061 Mon Sep 17 00:00:00 2001 From: Kelvin Zhang Date: Tue, 7 Jul 2020 17:19:21 -0400 Subject: [PATCH 321/624] Report actual payload type Test: Run existing unit tests Bug: 159849344 Change-Id: I870531d42d668072db4b56b86a9f3d40e46d0cfa --- payload_consumer/download_action.h | 3 --- payload_consumer/filesystem_verifier_action.h | 3 --- payload_consumer/install_plan.h | 3 ++- payload_consumer/postinstall_runner_action.h | 2 -- update_attempter_android.cc | 2 ++ 5 files changed, 4 insertions(+), 9 deletions(-) diff --git a/payload_consumer/download_action.h b/payload_consumer/download_action.h index 1777e22f..61c93d28 100644 --- a/payload_consumer/download_action.h +++ b/payload_consumer/download_action.h @@ -134,9 +134,6 @@ class DownloadAction : public InstallPlanAction, public HttpFetcherDelegate { // Start downloading the current payload using delta_performer. void StartDownloading(); - // The InstallPlan passed in - InstallPlan install_plan_; - // Pointer to the current payload in install_plan_.payloads. InstallPlan::Payload* payload_{nullptr}; diff --git a/payload_consumer/filesystem_verifier_action.h b/payload_consumer/filesystem_verifier_action.h index 6ef3d164..7d179dfc 100644 --- a/payload_consumer/filesystem_verifier_action.h +++ b/payload_consumer/filesystem_verifier_action.h @@ -117,9 +117,6 @@ class FilesystemVerifierAction : public InstallPlanAction { bool cancelled_{false}; // true if the action has been cancelled. - // The install plan we're passed in via the input pipe. - InstallPlan install_plan_; - // Calculates the hash of the data. std::unique_ptr hasher_; diff --git a/payload_consumer/install_plan.h b/payload_consumer/install_plan.h index 7a95ab43..63178bd8 100644 --- a/payload_consumer/install_plan.h +++ b/payload_consumer/install_plan.h @@ -195,9 +195,10 @@ class InstallPlanAction : public Action { typedef ActionTraits::InputObjectType InputObjectType; typedef ActionTraits::OutputObjectType OutputObjectType; - private: + protected: InstallPlan install_plan_; + private: DISALLOW_COPY_AND_ASSIGN(InstallPlanAction); }; diff --git a/payload_consumer/postinstall_runner_action.h b/payload_consumer/postinstall_runner_action.h index e5dfc405..bbc9e8cc 100644 --- a/payload_consumer/postinstall_runner_action.h +++ b/payload_consumer/postinstall_runner_action.h @@ -97,8 +97,6 @@ class PostinstallRunnerAction : public InstallPlanAction { // ready. Called when the post-install script was run for all the partitions. void CompletePostinstall(ErrorCode error_code); - InstallPlan install_plan_; - // The path where the filesystem will be mounted during post-install. std::string fs_mount_dir_; diff --git a/update_attempter_android.cc b/update_attempter_android.cc index 4d743791..a554d380 100644 --- a/update_attempter_android.cc +++ b/update_attempter_android.cc @@ -592,6 +592,8 @@ void UpdateAttempterAndroid::ActionCompleted(ActionProcessor* processor, SetStatusAndNotify(UpdateStatus::CLEANUP_PREVIOUS_UPDATE); } if (type == DownloadAction::StaticType()) { + auto download_action = static_cast(action); + install_plan_ = *download_action->install_plan(); SetStatusAndNotify(UpdateStatus::VERIFYING); } else if (type == FilesystemVerifierAction::StaticType()) { SetStatusAndNotify(UpdateStatus::FINALIZING); From 24f960986b1337f14eb8e86382cb62aed5d1153c Mon Sep 17 00:00:00 2001 From: Tianjie Date: Tue, 30 Jun 2020 12:26:25 -0700 Subject: [PATCH 322/624] Verify the extents for untouched dynamic partitions during partial update For partial updates, the metadata for untouched dynamic partitions are just copied over to the target slot. So, verifying the extents of these partitions in the target metadata should be sufficient for correctness. This saves the work to read & hash the bytes on these partitions for each resumed update. Bug: 151088567 Test: unit tests pass, apply a partial update Change-Id: I9d40ed2643e145a1546ea17b146fcdcfb91f213f --- common/dynamic_partition_control_interface.h | 7 +++ common/dynamic_partition_control_stub.cc | 7 +++ common/dynamic_partition_control_stub.h | 5 ++ dynamic_partition_control_android.cc | 57 +++++++++++++------ dynamic_partition_control_android.h | 27 +++++---- dynamic_partition_control_android_unittest.cc | 8 +++ mock_dynamic_partition_control.h | 4 ++ payload_consumer/delta_performer.cc | 25 ++++++-- .../filesystem_verifier_action.cc | 15 +++++ payload_consumer/filesystem_verifier_action.h | 12 +++- .../filesystem_verifier_action_unittest.cc | 8 ++- payload_consumer/install_plan.h | 4 ++ .../partition_update_generator_android.cc | 56 +++++++----------- .../partition_update_generator_android.h | 3 +- payload_generator/generate_delta_main.cc | 4 +- update_attempter.cc | 4 +- update_attempter_android.cc | 4 +- update_attempter_unittest.cc | 3 +- 18 files changed, 169 insertions(+), 84 deletions(-) diff --git a/common/dynamic_partition_control_interface.h b/common/dynamic_partition_control_interface.h index 7289deee..7c2d0b0c 100644 --- a/common/dynamic_partition_control_interface.h +++ b/common/dynamic_partition_control_interface.h @@ -130,6 +130,13 @@ class DynamicPartitionControlInterface { // the result in |path|. Returns true on success. // Sample result: /dev/block/by-name/ virtual bool GetDeviceDir(std::string* path) = 0; + + // Verifies that the untouched dynamic partitions in the target metadata have + // the same extents as the source metadata. + virtual bool VerifyExtentsForUntouchedPartitions( + uint32_t source_slot, + uint32_t target_slot, + const std::vector& partitions) = 0; }; } // namespace chromeos_update_engine diff --git a/common/dynamic_partition_control_stub.cc b/common/dynamic_partition_control_stub.cc index cde36afc..5a8ca434 100644 --- a/common/dynamic_partition_control_stub.cc +++ b/common/dynamic_partition_control_stub.cc @@ -76,4 +76,11 @@ bool DynamicPartitionControlStub::GetDeviceDir(std::string* path) { return true; } +bool DynamicPartitionControlStub::VerifyExtentsForUntouchedPartitions( + uint32_t source_slot, + uint32_t target_slot, + const std::vector& partitions) { + return true; +} + } // namespace chromeos_update_engine diff --git a/common/dynamic_partition_control_stub.h b/common/dynamic_partition_control_stub.h index 28e3e6a5..94dba1bc 100644 --- a/common/dynamic_partition_control_stub.h +++ b/common/dynamic_partition_control_stub.h @@ -51,6 +51,11 @@ class DynamicPartitionControlStub : public DynamicPartitionControlInterface { bool ListDynamicPartitionsForSlot( uint32_t current_slot, std::vector* partitions) override; bool GetDeviceDir(std::string* path) override; + + bool VerifyExtentsForUntouchedPartitions( + uint32_t source_slot, + uint32_t target_slot, + const std::vector& partitions) override; }; } // namespace chromeos_update_engine diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc index 6817c21e..ba749d97 100644 --- a/dynamic_partition_control_android.cc +++ b/dynamic_partition_control_android.cc @@ -293,9 +293,16 @@ bool DynamicPartitionControlAndroid::GetDmDevicePathByName( std::unique_ptr DynamicPartitionControlAndroid::LoadMetadataBuilder( - const std::string& super_device, uint32_t source_slot) { - return LoadMetadataBuilder( - super_device, source_slot, BootControlInterface::kInvalidSlot); + const std::string& super_device, uint32_t slot) { + auto builder = MetadataBuilder::New(PartitionOpener(), super_device, slot); + if (builder == nullptr) { + LOG(WARNING) << "No metadata slot " << BootControlInterface::SlotName(slot) + << " in " << super_device; + return nullptr; + } + LOG(INFO) << "Loaded metadata from slot " + << BootControlInterface::SlotName(slot) << " in " << super_device; + return builder; } std::unique_ptr @@ -303,26 +310,19 @@ DynamicPartitionControlAndroid::LoadMetadataBuilder( const std::string& super_device, uint32_t source_slot, uint32_t target_slot) { - std::unique_ptr builder; - if (target_slot == BootControlInterface::kInvalidSlot) { - builder = - MetadataBuilder::New(PartitionOpener(), super_device, source_slot); - } else { - bool always_keep_source_slot = !target_supports_snapshot_; - builder = MetadataBuilder::NewForUpdate(PartitionOpener(), - super_device, - source_slot, - target_slot, - always_keep_source_slot); - } - + bool always_keep_source_slot = !target_supports_snapshot_; + auto builder = MetadataBuilder::NewForUpdate(PartitionOpener(), + super_device, + source_slot, + target_slot, + always_keep_source_slot); if (builder == nullptr) { LOG(WARNING) << "No metadata slot " << BootControlInterface::SlotName(source_slot) << " in " << super_device; return nullptr; } - LOG(INFO) << "Loaded metadata from slot " + LOG(INFO) << "Created metadata for new update from slot " << BootControlInterface::SlotName(source_slot) << " in " << super_device; return builder; @@ -495,6 +495,7 @@ bool DynamicPartitionControlAndroid::PreparePartitionsForUpdate( } } + // TODO(xunchang) support partial update on non VAB enabled devices. TEST_AND_RETURN_FALSE(PrepareDynamicPartitionsForUpdate( source_slot, target_slot, manifest, delete_source)); @@ -1113,6 +1114,28 @@ bool DynamicPartitionControlAndroid::ListDynamicPartitionsForSlot( return true; } +bool DynamicPartitionControlAndroid::VerifyExtentsForUntouchedPartitions( + uint32_t source_slot, + uint32_t target_slot, + const std::vector& partitions) { + std::string device_dir_str; + TEST_AND_RETURN_FALSE(GetDeviceDir(&device_dir_str)); + base::FilePath device_dir(device_dir_str); + + auto source_super_device = + device_dir.Append(GetSuperPartitionName(source_slot)).value(); + auto source_builder = LoadMetadataBuilder(source_super_device, source_slot); + TEST_AND_RETURN_FALSE(source_builder != nullptr); + + auto target_super_device = + device_dir.Append(GetSuperPartitionName(target_slot)).value(); + auto target_builder = LoadMetadataBuilder(target_super_device, target_slot); + TEST_AND_RETURN_FALSE(target_builder != nullptr); + + return MetadataBuilder::VerifyExtentsAgainstSourceMetadata( + *source_builder, source_slot, *target_builder, target_slot, partitions); +} + bool DynamicPartitionControlAndroid::ExpectMetadataMounted() { // No need to mount metadata for non-Virtual A/B devices. if (!GetVirtualAbFeatureFlag().IsEnabled()) { diff --git a/dynamic_partition_control_android.h b/dynamic_partition_control_android.h index 69026a40..08656fdc 100644 --- a/dynamic_partition_control_android.h +++ b/dynamic_partition_control_android.h @@ -57,6 +57,11 @@ class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { bool ListDynamicPartitionsForSlot( uint32_t current_slot, std::vector* partitions) override; + bool VerifyExtentsForUntouchedPartitions( + uint32_t source_slot, + uint32_t target_slot, + const std::vector& partitions) override; + bool GetDeviceDir(std::string* path) override; // Return the device for partition |partition_name| at slot |slot|. @@ -85,16 +90,14 @@ class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { virtual bool UnmapPartitionOnDeviceMapper( const std::string& target_partition_name); - // Retrieve metadata from |super_device| at slot |source_slot|. - // - // If |target_slot| != kInvalidSlot, before returning the metadata, this - // function modifies the metadata so that during updates, the metadata can be - // written to |target_slot|. In particular, on retrofit devices, the returned - // metadata automatically includes block devices at |target_slot|. - // - // If |target_slot| == kInvalidSlot, this function returns metadata at - // |source_slot| without modifying it. This is the same as - // LoadMetadataBuilder(const std::string&, uint32_t). + // Retrieves metadata from |super_device| at slot |slot|. + virtual std::unique_ptr LoadMetadataBuilder( + const std::string& super_device, uint32_t slot); + + // Retrieves metadata from |super_device| at slot |source_slot|. And modifies + // the metadata so that during updates, the metadata can be written to + // |target_slot|. In particular, on retrofit devices, the returned metadata + // automatically includes block devices at |target_slot|. virtual std::unique_ptr LoadMetadataBuilder( const std::string& super_device, uint32_t source_slot, @@ -133,10 +136,6 @@ class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { virtual bool GetDmDevicePathByName(const std::string& name, std::string* path); - // Retrieve metadata from |super_device| at slot |source_slot|. - virtual std::unique_ptr LoadMetadataBuilder( - const std::string& super_device, uint32_t source_slot); - // Return the name of the super partition (which stores super partition // metadata) for a given slot. virtual std::string GetSuperPartitionName(uint32_t slot); diff --git a/dynamic_partition_control_android_unittest.cc b/dynamic_partition_control_android_unittest.cc index 37381708..4154b36c 100644 --- a/dynamic_partition_control_android_unittest.cc +++ b/dynamic_partition_control_android_unittest.cc @@ -114,6 +114,14 @@ class DynamicPartitionControlAndroidTest : public ::testing::Test { void SetMetadata(uint32_t slot, const PartitionSuffixSizes& sizes, uint32_t partition_attr = 0) { + EXPECT_CALL(dynamicControl(), + LoadMetadataBuilder(GetSuperDevice(slot), slot)) + .Times(AnyNumber()) + .WillRepeatedly(Invoke([sizes, partition_attr](auto, auto) { + return NewFakeMetadata(PartitionSuffixSizesToManifest(sizes), + partition_attr); + })); + EXPECT_CALL(dynamicControl(), LoadMetadataBuilder(GetSuperDevice(slot), slot, _)) .Times(AnyNumber()) diff --git a/mock_dynamic_partition_control.h b/mock_dynamic_partition_control.h index 1aaebd8b..e85df327 100644 --- a/mock_dynamic_partition_control.h +++ b/mock_dynamic_partition_control.h @@ -50,6 +50,10 @@ class MockDynamicPartitionControlAndroid GetDmDevicePathByName, (const std::string&, std::string*), (override)); + MOCK_METHOD(std::unique_ptr<::android::fs_mgr::MetadataBuilder>, + LoadMetadataBuilder, + (const std::string&, uint32_t), + (override)); MOCK_METHOD(std::unique_ptr<::android::fs_mgr::MetadataBuilder>, LoadMetadataBuilder, (const std::string&, uint32_t, uint32_t), diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc index af1baa4a..7d837db4 100644 --- a/payload_consumer/delta_performer.cc +++ b/payload_consumer/delta_performer.cc @@ -806,15 +806,32 @@ bool DeltaPerformer::ParseManifestPartitions(ErrorCode* error) { auto generator = partition_update_generator::Create(boot_control_, manifest_.block_size()); - std::vector other_partitions; + std::vector untouched_static_partitions; TEST_AND_RETURN_FALSE( generator->GenerateOperationsForPartitionsNotInPayload( install_plan_->source_slot, install_plan_->target_slot, touched_partitions, - &other_partitions)); - partitions_.insert( - partitions_.end(), other_partitions.begin(), other_partitions.end()); + &untouched_static_partitions)); + partitions_.insert(partitions_.end(), + untouched_static_partitions.begin(), + untouched_static_partitions.end()); + + // Save the untouched dynamic partitions in install plan. + std::vector dynamic_partitions; + if (!boot_control_->GetDynamicPartitionControl() + ->ListDynamicPartitionsForSlot(install_plan_->source_slot, + &dynamic_partitions)) { + LOG(ERROR) << "Failed to load dynamic partitions from slot " + << install_plan_->source_slot; + return false; + } + install_plan_->untouched_dynamic_partitions.clear(); + for (const auto& name : dynamic_partitions) { + if (touched_partitions.find(name) == touched_partitions.end()) { + install_plan_->untouched_dynamic_partitions.push_back(name); + } + } } // Fill in the InstallPlan::partitions based on the partitions from the diff --git a/payload_consumer/filesystem_verifier_action.cc b/payload_consumer/filesystem_verifier_action.cc index 5b2b5f43..61917ea5 100644 --- a/payload_consumer/filesystem_verifier_action.cc +++ b/payload_consumer/filesystem_verifier_action.cc @@ -28,6 +28,7 @@ #include #include #include +#include #include "update_engine/common/utils.h" @@ -89,6 +90,20 @@ void FilesystemVerifierAction::UpdateProgress(double progress) { void FilesystemVerifierAction::StartPartitionHashing() { if (partition_index_ == install_plan_.partitions.size()) { + if (!install_plan_.untouched_dynamic_partitions.empty()) { + LOG(INFO) << "Verifying extents of untouched dynamic partitions [" + << base::JoinString(install_plan_.untouched_dynamic_partitions, + ", ") + << "]"; + if (!dynamic_control_->VerifyExtentsForUntouchedPartitions( + install_plan_.source_slot, + install_plan_.target_slot, + install_plan_.untouched_dynamic_partitions)) { + Cleanup(ErrorCode::kFilesystemVerifierError); + return; + } + } + Cleanup(ErrorCode::kSuccess); return; } diff --git a/payload_consumer/filesystem_verifier_action.h b/payload_consumer/filesystem_verifier_action.h index 7d179dfc..6a8823a5 100644 --- a/payload_consumer/filesystem_verifier_action.h +++ b/payload_consumer/filesystem_verifier_action.h @@ -57,8 +57,13 @@ class FilesystemVerifyDelegate { class FilesystemVerifierAction : public InstallPlanAction { public: - FilesystemVerifierAction() - : verity_writer_(verity_writer::CreateVerityWriter()) {} + explicit FilesystemVerifierAction( + DynamicPartitionControlInterface* dynamic_control) + : verity_writer_(verity_writer::CreateVerityWriter()), + dynamic_control_(dynamic_control) { + CHECK(dynamic_control_); + } + ~FilesystemVerifierAction() override = default; void PerformAction() override; @@ -123,6 +128,9 @@ class FilesystemVerifierAction : public InstallPlanAction { // Write verity data of the current partition. std::unique_ptr verity_writer_; + // Verifies the untouched dynamic partitions for partial updates. + DynamicPartitionControlInterface* dynamic_control_{nullptr}; + // Reads and hashes this many bytes from the head of the input stream. When // the partition starts to be hashed, this field is initialized from the // corresponding InstallPlan::Partition size which is the total size diff --git a/payload_consumer/filesystem_verifier_action_unittest.cc b/payload_consumer/filesystem_verifier_action_unittest.cc index cb33404d..2971849c 100644 --- a/payload_consumer/filesystem_verifier_action_unittest.cc +++ b/payload_consumer/filesystem_verifier_action_unittest.cc @@ -27,6 +27,7 @@ #include #include +#include "update_engine/common/dynamic_partition_control_stub.h" #include "update_engine/common/hash_calculator.h" #include "update_engine/common/test_utils.h" #include "update_engine/common/utils.h" @@ -51,6 +52,7 @@ class FilesystemVerifierActionTest : public ::testing::Test { brillo::FakeMessageLoop loop_{nullptr}; ActionProcessor processor_; + DynamicPartitionControlStub dynamic_control_stub_; }; class FilesystemVerifierActionTestDelegate : public ActionProcessorDelegate { @@ -188,7 +190,8 @@ bool FilesystemVerifierActionTest::DoTest(bool terminate_early, void FilesystemVerifierActionTest::BuildActions( const InstallPlan& install_plan) { auto feeder_action = std::make_unique>(); - auto verifier_action = std::make_unique(); + auto verifier_action = + std::make_unique(&dynamic_control_stub_); auto collector_action = std::make_unique>(); @@ -217,7 +220,8 @@ class FilesystemVerifierActionTest2Delegate : public ActionProcessorDelegate { }; TEST_F(FilesystemVerifierActionTest, MissingInputObjectTest) { - auto copier_action = std::make_unique(); + auto copier_action = + std::make_unique(&dynamic_control_stub_); auto collector_action = std::make_unique>(); diff --git a/payload_consumer/install_plan.h b/payload_consumer/install_plan.h index 63178bd8..f04c6504 100644 --- a/payload_consumer/install_plan.h +++ b/payload_consumer/install_plan.h @@ -158,6 +158,10 @@ struct InstallPlan { // If not blank, a base-64 encoded representation of the PEM-encoded // public key in the response. std::string public_key_rsa; + + // The name of dynamic partitions not included in the payload. Only used + // for partial updates. + std::vector untouched_dynamic_partitions; }; class InstallPlanAction; diff --git a/payload_consumer/partition_update_generator_android.cc b/payload_consumer/partition_update_generator_android.cc index aa3f2e57..5768dd6f 100644 --- a/payload_consumer/partition_update_generator_android.cc +++ b/payload_consumer/partition_update_generator_android.cc @@ -50,25 +50,14 @@ bool PartitionUpdateGeneratorAndroid:: BootControlInterface::Slot target_slot, const std::set& partitions_in_payload, std::vector* update_list) { - auto ret = GetStaticAbPartitionsOnDevice(); - if (!ret.has_value()) { + auto ab_partitions = GetStaticAbPartitionsOnDevice(); + if (!ab_partitions.has_value()) { LOG(ERROR) << "Failed to load static a/b partitions"; return false; } - auto ab_partitions = ret.value(); - - // Add the dynamic partitions. - auto dynamic_control = boot_control_->GetDynamicPartitionControl(); - std::vector dynamic_partitions; - if (!dynamic_control->ListDynamicPartitionsForSlot(source_slot, - &dynamic_partitions)) { - LOG(ERROR) << "Failed to load dynamic partitions from slot " << source_slot; - return false; - } - ab_partitions.insert(dynamic_partitions.begin(), dynamic_partitions.end()); std::vector partition_updates; - for (const auto& partition_name : ab_partitions) { + for (const auto& partition_name : ab_partitions.value()) { if (partitions_in_payload.find(partition_name) != partitions_in_payload.end()) { LOG(INFO) << partition_name << " has included in payload"; @@ -159,13 +148,15 @@ PartitionUpdateGeneratorAndroid::CreatePartitionUpdate( return std::nullopt; } - if (is_source_dynamic != is_target_dynamic) { - LOG(ERROR) << "Source slot " << source_slot << " for partition " - << partition_name << " is " << (is_source_dynamic ? "" : "not") - << " dynamic, but target slot " << target_slot << " is " + if (is_source_dynamic || is_target_dynamic) { + LOG(ERROR) << "Partition " << partition_name << " is expected to be a" + << " static partition. source slot is " + << (is_source_dynamic ? "" : "not") + << " dynamic, and target slot " << target_slot << " is " << (is_target_dynamic ? "" : "not") << " dynamic."; return std::nullopt; } + auto source_size = utils::FileSize(source_device); auto target_size = utils::FileSize(target_device); if (source_size == -1 || target_size == -1 || source_size != target_size || @@ -175,11 +166,8 @@ PartitionUpdateGeneratorAndroid::CreatePartitionUpdate( return std::nullopt; } - return CreatePartitionUpdate(partition_name, - source_device, - target_device, - source_size, - is_source_dynamic); + return CreatePartitionUpdate( + partition_name, source_device, target_device, source_size); } std::optional @@ -187,8 +175,7 @@ PartitionUpdateGeneratorAndroid::CreatePartitionUpdate( const std::string& partition_name, const std::string& source_device, const std::string& target_device, - int64_t partition_size, - bool is_dynamic) { + int64_t partition_size) { PartitionUpdate partition_update; partition_update.set_partition_name(partition_name); auto old_partition_info = partition_update.mutable_old_partition_info(); @@ -202,18 +189,15 @@ PartitionUpdateGeneratorAndroid::CreatePartitionUpdate( auto new_partition_info = partition_update.mutable_new_partition_info(); new_partition_info->set_size(partition_size); new_partition_info->set_hash(raw_hash->data(), raw_hash->size()); - // TODO(xunchang) TBD, should we skip hashing and verification of the - // dynamic partitions not in payload? - if (!is_dynamic) { - auto copy_operation = partition_update.add_operations(); - copy_operation->set_type(InstallOperation::SOURCE_COPY); - Extent copy_extent; - copy_extent.set_start_block(0); - copy_extent.set_num_blocks(partition_size / block_size_); - *copy_operation->add_src_extents() = copy_extent; - *copy_operation->add_dst_extents() = copy_extent; - } + auto copy_operation = partition_update.add_operations(); + copy_operation->set_type(InstallOperation::SOURCE_COPY); + Extent copy_extent; + copy_extent.set_start_block(0); + copy_extent.set_num_blocks(partition_size / block_size_); + + *copy_operation->add_src_extents() = copy_extent; + *copy_operation->add_dst_extents() = copy_extent; return partition_update; } diff --git a/payload_consumer/partition_update_generator_android.h b/payload_consumer/partition_update_generator_android.h index 8f33077f..97b7d838 100644 --- a/payload_consumer/partition_update_generator_android.h +++ b/payload_consumer/partition_update_generator_android.h @@ -56,8 +56,7 @@ class PartitionUpdateGeneratorAndroid const std::string& partition_name, const std::string& source_device, const std::string& target_device, - int64_t partition_size, - bool is_dynamic); + int64_t partition_size); std::optional CreatePartitionUpdate( const std::string& partition_name, diff --git a/payload_generator/generate_delta_main.cc b/payload_generator/generate_delta_main.cc index f7df2118..eb00333b 100644 --- a/payload_generator/generate_delta_main.cc +++ b/payload_generator/generate_delta_main.cc @@ -252,8 +252,8 @@ bool ApplyPayload(const string& payload_file, nullptr, new FileFetcher(), true /* interactive */); - auto filesystem_verifier_action = - std::make_unique(); + auto filesystem_verifier_action = std::make_unique( + fake_boot_control.GetDynamicPartitionControl()); BondActions(install_plan_action.get(), download_action.get()); BondActions(download_action.get(), filesystem_verifier_action.get()); diff --git a/update_attempter.cc b/update_attempter.cc index 60c2c36c..f37973ef 100644 --- a/update_attempter.cc +++ b/update_attempter.cc @@ -818,8 +818,8 @@ void UpdateAttempter::BuildUpdateActions(bool interactive) { system_state_->hardware()), false, session_id_); - auto filesystem_verifier_action = - std::make_unique(); + auto filesystem_verifier_action = std::make_unique( + system_state_->boot_control()->GetDynamicPartitionControl()); auto update_complete_action = std::make_unique( system_state_, new OmahaEvent(OmahaEvent::kTypeUpdateComplete), diff --git a/update_attempter_android.cc b/update_attempter_android.cc index a554d380..7fc13e11 100644 --- a/update_attempter_android.cc +++ b/update_attempter_android.cc @@ -738,8 +738,8 @@ void UpdateAttempterAndroid::BuildUpdateActions(HttpFetcher* fetcher) { true /* interactive */); download_action->set_delegate(this); download_action->set_base_offset(base_offset_); - auto filesystem_verifier_action = - std::make_unique(); + auto filesystem_verifier_action = std::make_unique( + boot_control_->GetDynamicPartitionControl()); auto postinstall_runner_action = std::make_unique(boot_control_, hardware_); filesystem_verifier_action->set_delegate(this); diff --git a/update_attempter_unittest.cc b/update_attempter_unittest.cc index 0086dd5d..305dbdb9 100644 --- a/update_attempter_unittest.cc +++ b/update_attempter_unittest.cc @@ -663,7 +663,8 @@ TEST_F(UpdateAttempterTest, GetErrorCodeForActionTest) { EXPECT_EQ( ErrorCode::kOmahaResponseHandlerError, GetErrorCodeForAction(&omaha_response_handler_action, ErrorCode::kError)); - FilesystemVerifierAction filesystem_verifier_action; + DynamicPartitionControlStub dynamic_control_stub; + FilesystemVerifierAction filesystem_verifier_action(&dynamic_control_stub); EXPECT_EQ( ErrorCode::kFilesystemVerifierError, GetErrorCodeForAction(&filesystem_verifier_action, ErrorCode::kError)); From ef49160c9bd2621dd3084fa061f09d176304ca49 Mon Sep 17 00:00:00 2001 From: Tianjie Date: Thu, 9 Jul 2020 17:04:28 -0700 Subject: [PATCH 323/624] Validate payload data for each operation For streaming update, we currently verify: 1. the hash of manifest before applying ops 2. the hash of the entire payload after we apply all ops 3. the final patched on filesystems after the update So there is some potential to exploit the patching libraries, if some attacker manage to provide us malicious patch data after the manifest verification. Therefore, this cl enables the validation of patch data for each install operation. The hash itself is embedded in the payload manifest; and thus has been verified upfront. Bug: 160800689 Test: unittests, apply an OTA Change-Id: Idd4cbe167ce63f197d821752f75e45add0ea829c --- payload_consumer/delta_performer.cc | 37 +++++++++++++---------------- 1 file changed, 17 insertions(+), 20 deletions(-) diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc index 7d837db4..3a09ec1f 100644 --- a/payload_consumer/delta_performer.cc +++ b/payload_consumer/delta_performer.cc @@ -667,27 +667,24 @@ bool DeltaPerformer::Write(const void* bytes, size_t count, ErrorCode* error) { if (!CanPerformInstallOperation(op)) return true; - // Validate the operation only if the metadata signature is present. - // Otherwise, keep the old behavior. This serves as a knob to disable - // the validation logic in case we find some regression after rollout. - // NOTE: If hash checks are mandatory and if metadata_signature is empty, - // we would have already failed in ParsePayloadMetadata method and thus not - // even be here. So no need to handle that case again here. - if (!payload_->metadata_signature.empty()) { - // Note: Validate must be called only if CanPerformInstallOperation is - // called. Otherwise, we might be failing operations before even if there - // isn't sufficient data to compute the proper hash. - *error = ValidateOperationHash(op); - if (*error != ErrorCode::kSuccess) { - if (install_plan_->hash_checks_mandatory) { - LOG(ERROR) << "Mandatory operation hash check failed"; - return false; - } - - // For non-mandatory cases, just send a UMA stat. - LOG(WARNING) << "Ignoring operation validation errors"; - *error = ErrorCode::kSuccess; + // Validate the operation unconditionally. This helps prevent the + // exploitation of vulnerabilities in the patching libraries, e.g. bspatch. + // The hash of the patch data for a given operation is embedded in the + // payload metadata; and thus has been verified against the public key on + // device. + // Note: Validate must be called only if CanPerformInstallOperation is + // called. Otherwise, we might be failing operations before even if there + // isn't sufficient data to compute the proper hash. + *error = ValidateOperationHash(op); + if (*error != ErrorCode::kSuccess) { + if (install_plan_->hash_checks_mandatory) { + LOG(ERROR) << "Mandatory operation hash check failed"; + return false; } + + // For non-mandatory cases, just send a UMA stat. + LOG(WARNING) << "Ignoring operation validation errors"; + *error = ErrorCode::kSuccess; } // Makes sure we unblock exit when this operation completes. From 1205ea684e81a6f14db7d5556f3eda2fd014b8d8 Mon Sep 17 00:00:00 2001 From: Tianjie Date: Thu, 9 Jul 2020 17:04:28 -0700 Subject: [PATCH 324/624] Validate payload data for each operation For streaming update, we currently verify: 1. the hash of manifest before applying ops 2. the hash of the entire payload after we apply all ops 3. the final patched on filesystems after the update So there is some potential to exploit the patching libraries, if some attacker manage to provide us malicious patch data after the manifest verification. Therefore, this cl enables the validation of patch data for each install operation. The hash itself is embedded in the payload manifest; and thus has been verified upfront. Bug: 160800689 Test: unittests, apply an OTA Change-Id: Idd4cbe167ce63f197d821752f75e45add0ea829c (cherry picked from commit ef49160c9bd2621dd3084fa061f09d176304ca49) --- payload_consumer/delta_performer.cc | 37 +++++++++++++---------------- 1 file changed, 17 insertions(+), 20 deletions(-) diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc index 4c4ff041..15973e93 100644 --- a/payload_consumer/delta_performer.cc +++ b/payload_consumer/delta_performer.cc @@ -690,27 +690,24 @@ bool DeltaPerformer::Write(const void* bytes, size_t count, ErrorCode* error) { if (!CanPerformInstallOperation(op)) return true; - // Validate the operation only if the metadata signature is present. - // Otherwise, keep the old behavior. This serves as a knob to disable - // the validation logic in case we find some regression after rollout. - // NOTE: If hash checks are mandatory and if metadata_signature is empty, - // we would have already failed in ParsePayloadMetadata method and thus not - // even be here. So no need to handle that case again here. - if (!payload_->metadata_signature.empty()) { - // Note: Validate must be called only if CanPerformInstallOperation is - // called. Otherwise, we might be failing operations before even if there - // isn't sufficient data to compute the proper hash. - *error = ValidateOperationHash(op); - if (*error != ErrorCode::kSuccess) { - if (install_plan_->hash_checks_mandatory) { - LOG(ERROR) << "Mandatory operation hash check failed"; - return false; - } - - // For non-mandatory cases, just send a UMA stat. - LOG(WARNING) << "Ignoring operation validation errors"; - *error = ErrorCode::kSuccess; + // Validate the operation unconditionally. This helps prevent the + // exploitation of vulnerabilities in the patching libraries, e.g. bspatch. + // The hash of the patch data for a given operation is embedded in the + // payload metadata; and thus has been verified against the public key on + // device. + // Note: Validate must be called only if CanPerformInstallOperation is + // called. Otherwise, we might be failing operations before even if there + // isn't sufficient data to compute the proper hash. + *error = ValidateOperationHash(op); + if (*error != ErrorCode::kSuccess) { + if (install_plan_->hash_checks_mandatory) { + LOG(ERROR) << "Mandatory operation hash check failed"; + return false; } + + // For non-mandatory cases, just send a UMA stat. + LOG(WARNING) << "Ignoring operation validation errors"; + *error = ErrorCode::kSuccess; } // Makes sure we unblock exit when this operation completes. From ef1f614b3567fef8a5503204edde3c3e293f7840 Mon Sep 17 00:00:00 2001 From: Sen Jiang Date: Mon, 13 Jul 2020 19:20:50 -0700 Subject: [PATCH 325/624] Add new Android owners. Test: None Change-Id: Ida8735515089b363e6c714721671b81901e0a9bc --- OWNERS | 2 ++ 1 file changed, 2 insertions(+) diff --git a/OWNERS b/OWNERS index 75fd9f16..938752f6 100644 --- a/OWNERS +++ b/OWNERS @@ -2,7 +2,9 @@ set noparent # Android et. al. maintainers: deymo@google.com +elsk@google.com senj@google.com +xunchang@google.com # Chromium OS maintainers: ahassani@google.com From cc011d3b0ad7f359415a63399e94b62752572a28 Mon Sep 17 00:00:00 2001 From: Kelvin Zhang Date: Fri, 10 Jul 2020 18:20:08 -0400 Subject: [PATCH 326/624] Cache OTA manifest in update_engine When resuming download of an OTA package, update_engine needs to redownload the manifest of OTA package, located at beginning of file. However, the manifest blob can be as large as ~150K for some updates. This CL caches manifest on disk, so that update engine no longer has to redownload the manifest for every resume. Test: Perform OTA, pause and resume, verify that update succeeds Bug: 70736331 Change-Id: Iaf157ef57e68e4842d5867dea5467a3ab34f8286 --- common/constants.cc | 1 + common/constants.h | 1 + payload_consumer/delta_performer.cc | 3 + payload_consumer/delta_performer.h | 4 +- .../delta_performer_integration_test.cc | 40 +++++----- payload_consumer/download_action.cc | 79 +++++++++++++++---- payload_consumer/download_action.h | 4 + 7 files changed, 95 insertions(+), 37 deletions(-) diff --git a/common/constants.cc b/common/constants.cc index fa13a38a..c85ba543 100644 --- a/common/constants.cc +++ b/common/constants.cc @@ -108,6 +108,7 @@ const char kPrefsVerityWritten[] = "verity-written"; const char kPrefsWallClockScatteringWaitPeriod[] = "wall-clock-wait-period"; const char kPrefsWallClockStagingWaitPeriod[] = "wall-clock-staging-wait-period"; +const char kPrefsManifestBytes[] = "manifest-bytes"; // These four fields are generated by scripts/brillo_update_payload. const char kPayloadPropertyFileSize[] = "FILE_SIZE"; diff --git a/common/constants.h b/common/constants.h index eb489fca..71702016 100644 --- a/common/constants.h +++ b/common/constants.h @@ -104,6 +104,7 @@ extern const char kPrefsUrlSwitchCount[]; extern const char kPrefsVerityWritten[]; extern const char kPrefsWallClockScatteringWaitPeriod[]; extern const char kPrefsWallClockStagingWaitPeriod[]; +extern const char kPrefsManifestBytes[]; // Keys used when storing and loading payload properties. extern const char kPayloadPropertyFileSize[]; diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc index 7d837db4..68f38df8 100644 --- a/payload_consumer/delta_performer.cc +++ b/payload_consumer/delta_performer.cc @@ -585,6 +585,9 @@ bool DeltaPerformer::Write(const void* bytes, size_t count, ErrorCode* error) { if ((*error = ValidateManifest()) != ErrorCode::kSuccess) return false; manifest_valid_ = true; + if (!install_plan_->is_resume) { + prefs_->SetString(kPrefsManifestBytes, {buffer_.begin(), buffer_.end()}); + } // Clear the download buffer. DiscardBuffer(false, metadata_size_); diff --git a/payload_consumer/delta_performer.h b/payload_consumer/delta_performer.h index 7b30a83d..2d1768dd 100644 --- a/payload_consumer/delta_performer.h +++ b/payload_consumer/delta_performer.h @@ -78,7 +78,9 @@ class DeltaPerformer : public FileWriter { download_delegate_(download_delegate), install_plan_(install_plan), payload_(payload), - interactive_(interactive) {} + interactive_(interactive) { + CHECK(install_plan_); + } // FileWriter's Write implementation where caller doesn't care about // error codes. diff --git a/payload_consumer/delta_performer_integration_test.cc b/payload_consumer/delta_performer_integration_test.cc index 16641c6d..acbecad5 100644 --- a/payload_consumer/delta_performer_integration_test.cc +++ b/payload_consumer/delta_performer_integration_test.cc @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include @@ -188,7 +189,7 @@ static void SignGeneratedPayload(const string& payload_path, string private_key_path = GetBuildArtifactsPath(kUnittestPrivateKeyPath); size_t signature_size; ASSERT_TRUE(PayloadSigner::GetMaximumSignatureSize(private_key_path, - &signature_size)); + &signature_size)); brillo::Blob metadata_hash, payload_hash; ASSERT_TRUE(PayloadSigner::HashPayloadForSigning( payload_path, {signature_size}, &payload_hash, &metadata_hash)); @@ -226,12 +227,13 @@ static void SignGeneratedShellPayloadWithKeys( string delta_generator_path = GetBuildArtifactsPath("delta_generator"); ASSERT_EQ(0, System(base::StringPrintf( - "%s -in_file=%s -signature_size=%s -out_hash_file=%s " - "-out_metadata_hash_file=%s", + "%s -in_file=%s -signature_size=%s -out_hash_file=%s " + "-out_metadata_hash_file=%s", delta_generator_path.c_str(), payload_path.c_str(), signature_size_string.c_str(), - hash_file.path().c_str(), metadata_hash_file.path().c_str()))); + hash_file.path().c_str(), + metadata_hash_file.path().c_str()))); // Sign the hash with all private keys. vector sig_files, metadata_sig_files; @@ -248,16 +250,19 @@ static void SignGeneratedShellPayloadWithKeys( brillo::Blob metadata_hash, metadata_signature; ASSERT_TRUE(utils::ReadFile(metadata_hash_file.path(), &metadata_hash)); - ASSERT_TRUE(PayloadSigner::SignHash(metadata_hash, key_path, &metadata_signature)); + ASSERT_TRUE( + PayloadSigner::SignHash(metadata_hash, key_path, &metadata_signature)); test_utils::ScopedTempFile metadata_sig_file("signature.XXXXXX"); - ASSERT_TRUE(test_utils::WriteFileVector(metadata_sig_file.path(), metadata_signature)); + ASSERT_TRUE(test_utils::WriteFileVector(metadata_sig_file.path(), + metadata_signature)); metadata_sig_file_paths.push_back(metadata_sig_file.path()); metadata_sig_files.push_back(std::move(metadata_sig_file)); } string sig_files_string = base::JoinString(sig_file_paths, ":"); - string metadata_sig_files_string = base::JoinString(metadata_sig_file_paths, ":"); + string metadata_sig_files_string = + base::JoinString(metadata_sig_file_paths, ":"); // Add the signature to the payload. ASSERT_EQ(0, @@ -735,6 +740,11 @@ static void ApplyDeltaFile(bool full_kernel, .WillRepeatedly(Return(true)); EXPECT_CALL(prefs, SetString(kPrefsDynamicPartitionMetadataUpdated, _)) .WillRepeatedly(Return(true)); + EXPECT_CALL(prefs, + SetString(kPrefsManifestBytes, + testing::SizeIs(state->metadata_signature_size + + state->metadata_size))) + .WillRepeatedly(Return(true)); if (op_hash_test == kValidOperationData && signature_test != kSignatureNone) { EXPECT_CALL(prefs, SetString(kPrefsUpdateStateSignatureBlob, _)) .WillOnce(Return(true)); @@ -1026,12 +1036,8 @@ TEST(DeltaPerformerIntegrationTest, RunAsRootFullKernelSmallImageTest) { } TEST(DeltaPerformerIntegrationTest, RunAsRootFullSmallImageTest) { - DoSmallImageTest(true, - true, - -1, - kSignatureGenerator, - true, - kFullPayloadMinorVersion); + DoSmallImageTest( + true, true, -1, kSignatureGenerator, true, kFullPayloadMinorVersion); } TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignNoneTest) { @@ -1094,12 +1100,8 @@ TEST(DeltaPerformerIntegrationTest, } TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSourceOpsTest) { - DoSmallImageTest(false, - false, - -1, - kSignatureGenerator, - false, - kSourceMinorPayloadVersion); + DoSmallImageTest( + false, false, -1, kSignatureGenerator, false, kSourceMinorPayloadVersion); } TEST(DeltaPerformerIntegrationTest, diff --git a/payload_consumer/download_action.cc b/payload_consumer/download_action.cc index 45df5a92..ea998926 100644 --- a/payload_consumer/download_action.cc +++ b/payload_consumer/download_action.cc @@ -55,8 +55,7 @@ DownloadAction::DownloadAction(PrefsInterface* prefs, code_(ErrorCode::kSuccess), delegate_(nullptr), p2p_sharing_fd_(-1), - p2p_visible_(true) { -} + p2p_visible_(true) {} DownloadAction::~DownloadAction() {} @@ -203,18 +202,76 @@ void DownloadAction::PerformAction() { StartDownloading(); } +bool DownloadAction::LoadCachedManifest(int64_t manifest_size) { + std::string cached_manifest_bytes; + if (!prefs_->GetString(kPrefsManifestBytes, &cached_manifest_bytes) || + cached_manifest_bytes.size() <= 0) { + LOG(INFO) << "Cached Manifest data not found"; + return false; + } + if (static_cast(cached_manifest_bytes.size()) != manifest_size) { + LOG(WARNING) << "Cached metadata has unexpected size: " + << cached_manifest_bytes.size() << " vs. " << manifest_size; + return false; + } + + ErrorCode error; + const bool success = + delta_performer_->Write( + cached_manifest_bytes.data(), cached_manifest_bytes.size(), &error) && + delta_performer_->IsManifestValid(); + if (success) { + LOG(INFO) << "Successfully parsed cached manifest"; + } else { + // If parsing of cached data failed, fall back to fetch them using HTTP + LOG(WARNING) << "Cached manifest data fails to load, error code:" + << static_cast(error) << "," << error; + } + return success; +} + void DownloadAction::StartDownloading() { download_active_ = true; http_fetcher_->ClearRanges(); + + if (writer_ && writer_ != delta_performer_.get()) { + LOG(INFO) << "Using writer for test."; + } else { + delta_performer_.reset(new DeltaPerformer(prefs_, + boot_control_, + hardware_, + delegate_, + &install_plan_, + payload_, + interactive_)); + writer_ = delta_performer_.get(); + } + if (install_plan_.is_resume && payload_ == &install_plan_.payloads[resume_payload_index_]) { - // Resuming an update so fetch the update manifest metadata first. + // Resuming an update so parse the cached manifest first int64_t manifest_metadata_size = 0; int64_t manifest_signature_size = 0; prefs_->GetInt64(kPrefsManifestMetadataSize, &manifest_metadata_size); prefs_->GetInt64(kPrefsManifestSignatureSize, &manifest_signature_size); - http_fetcher_->AddRange(base_offset_, - manifest_metadata_size + manifest_signature_size); + + // TODO(zhangkelvin) Add unittest for success and fallback route + if (!LoadCachedManifest(manifest_metadata_size + manifest_signature_size)) { + if (delta_performer_) { + // Create a new DeltaPerformer to reset all its state + delta_performer_ = std::make_unique(prefs_, + boot_control_, + hardware_, + delegate_, + &install_plan_, + payload_, + interactive_); + writer_ = delta_performer_.get(); + } + http_fetcher_->AddRange(base_offset_, + manifest_metadata_size + manifest_signature_size); + } + // If there're remaining unprocessed data blobs, fetch them. Be careful not // to request data beyond the end of the payload to avoid 416 HTTP response // error codes. @@ -238,18 +295,6 @@ void DownloadAction::StartDownloading() { } } - if (writer_ && writer_ != delta_performer_.get()) { - LOG(INFO) << "Using writer for test."; - } else { - delta_performer_.reset(new DeltaPerformer(prefs_, - boot_control_, - hardware_, - delegate_, - &install_plan_, - payload_, - interactive_)); - writer_ = delta_performer_.get(); - } if (system_state_ != nullptr) { const PayloadStateInterface* payload_state = system_state_->payload_state(); string file_id = utils::CalculateP2PFileId(payload_->hash, payload_->size); diff --git a/payload_consumer/download_action.h b/payload_consumer/download_action.h index 61c93d28..740416da 100644 --- a/payload_consumer/download_action.h +++ b/payload_consumer/download_action.h @@ -131,6 +131,10 @@ class DownloadAction : public InstallPlanAction, public HttpFetcherDelegate { // called or if CloseP2PSharingFd() has been called. void WriteToP2PFile(const void* data, size_t length, off_t file_offset); + // Attempt to load cached manifest data from prefs + // return true on success, false otherwise. + bool LoadCachedManifest(int64_t manifest_size); + // Start downloading the current payload using delta_performer. void StartDownloading(); From f5baff4655a9b0c13510d7f2dc222f5e66bed505 Mon Sep 17 00:00:00 2001 From: Tianjie Date: Fri, 17 Jul 2020 21:43:22 -0700 Subject: [PATCH 327/624] Support generation of partial updates Add a new minor version kPartialUpdateMinorPayloadVersion for partial updates. Also, we always treat the partial update as a delta update in payload consumer, so new update_engine can perform minor version check correctly. Conceptually, partial update is indeed a delta update; because we need to copy | use the untouched partitions. Since the payload for the partial update doesn't carry old partition info, old update engines will treat them as full update. So old UE will also fail the minor version check correctly; because we always expect kFullPayloadMinorVersion for full updates. Bug: 157778739 Test: generate & apply partial full|incremental updates, generate regular updates, unittests pass Change-Id: I7f8365cf99098269150dd08e028120354944f3c6 --- payload_consumer/delta_performer.cc | 7 +++++-- payload_consumer/payload_constants.cc | 1 + payload_consumer/payload_constants.h | 3 +++ payload_generator/generate_delta_main.cc | 11 +++++++++- payload_generator/payload_file.cc | 3 +++ .../payload_generation_config.cc | 12 ++++++++--- payload_generator/payload_generation_config.h | 8 +++++-- scripts/brillo_update_payload | 21 ++++++++++++++++--- 8 files changed, 55 insertions(+), 11 deletions(-) diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc index 68f38df8..d8f0ef56 100644 --- a/payload_consumer/delta_performer.cc +++ b/payload_consumer/delta_performer.cc @@ -1579,9 +1579,12 @@ ErrorCode DeltaPerformer::ValidateManifest() { }); // The presence of an old partition hash is the sole indicator for a delta - // update. + // update. Also, always treat the partial update as delta so that we can + // perform the minor version check correctly. InstallPayloadType actual_payload_type = - has_old_fields ? InstallPayloadType::kDelta : InstallPayloadType::kFull; + (has_old_fields || manifest_.partial_update()) + ? InstallPayloadType::kDelta + : InstallPayloadType::kFull; if (payload_->type == InstallPayloadType::kUnknown) { LOG(INFO) << "Detected a '" diff --git a/payload_consumer/payload_constants.cc b/payload_consumer/payload_constants.cc index 1c987bdc..28404fee 100644 --- a/payload_consumer/payload_constants.cc +++ b/payload_consumer/payload_constants.cc @@ -33,6 +33,7 @@ const uint32_t kOpSrcHashMinorPayloadVersion = 3; const uint32_t kBrotliBsdiffMinorPayloadVersion = 4; const uint32_t kPuffdiffMinorPayloadVersion = 5; const uint32_t kVerityMinorPayloadVersion = 6; +const uint32_t kPartialUpdateMinorPayloadVersion = 7; const uint32_t kMinSupportedMinorPayloadVersion = kSourceMinorPayloadVersion; const uint32_t kMaxSupportedMinorPayloadVersion = kVerityMinorPayloadVersion; diff --git a/payload_consumer/payload_constants.h b/payload_consumer/payload_constants.h index 5c2d17cb..03647ee7 100644 --- a/payload_consumer/payload_constants.h +++ b/payload_consumer/payload_constants.h @@ -56,6 +56,9 @@ extern const uint32_t kPuffdiffMinorPayloadVersion; // The minor version that allows Verity hash tree and FEC generation. extern const uint32_t kVerityMinorPayloadVersion; +// The minor version that allows partial update, e.g. kernel only update. +extern const uint32_t kPartialUpdateMinorPayloadVersion; + // The minimum and maximum supported minor version. extern const uint32_t kMinSupportedMinorPayloadVersion; extern const uint32_t kMaxSupportedMinorPayloadVersion; diff --git a/payload_generator/generate_delta_main.cc b/payload_generator/generate_delta_main.cc index eb00333b..18cff4b1 100644 --- a/payload_generator/generate_delta_main.cc +++ b/payload_generator/generate_delta_main.cc @@ -445,6 +445,10 @@ int Main(int argc, char** argv) { out_maximum_signature_size_file, "", "Path to the output maximum signature size given a private key."); + DEFINE_bool(is_partial_update, + false, + "The payload only targets a subset of partitions on the device," + "e.g. generic kernel image update."); brillo::FlagHelper::Init( argc, @@ -629,6 +633,10 @@ int Main(int argc, char** argv) { CHECK(payload_config.target.ValidateDynamicPartitionMetadata()); } + if (FLAGS_is_partial_update) { + payload_config.is_partial_update = true; + } + CHECK(!FLAGS_out_file.empty()); // Ignore failures. These are optional arguments. @@ -702,7 +710,8 @@ int Main(int argc, char** argv) { payload_config.max_timestamp = FLAGS_max_timestamp; - if (payload_config.version.minor >= kVerityMinorPayloadVersion) + if (payload_config.is_delta && + payload_config.version.minor >= kVerityMinorPayloadVersion) CHECK(payload_config.target.LoadVerityConfig()); LOG(INFO) << "Generating " << (payload_config.is_delta ? "delta" : "full") diff --git a/payload_generator/payload_file.cc b/payload_generator/payload_file.cc index 69325d77..940e9bdc 100644 --- a/payload_generator/payload_file.cc +++ b/payload_generator/payload_file.cc @@ -78,6 +78,9 @@ bool PayloadFile::Init(const PayloadGenerationConfig& config) { *(manifest_.mutable_dynamic_partition_metadata()) = *(config.target.dynamic_partition_metadata); + if (config.is_partial_update) { + manifest_.set_partial_update(true); + } return true; } diff --git a/payload_generator/payload_generation_config.cc b/payload_generator/payload_generation_config.cc index b653a034..9c5832d4 100644 --- a/payload_generator/payload_generation_config.cc +++ b/payload_generator/payload_generation_config.cc @@ -234,7 +234,8 @@ bool PayloadVersion::Validate() const { minor == kOpSrcHashMinorPayloadVersion || minor == kBrotliBsdiffMinorPayloadVersion || minor == kPuffdiffMinorPayloadVersion || - minor == kVerityMinorPayloadVersion); + minor == kVerityMinorPayloadVersion || + minor == kPartialUpdateMinorPayloadVersion); return true; } @@ -273,13 +274,14 @@ bool PayloadVersion::OperationAllowed(InstallOperation::Type operation) const { return false; } -bool PayloadVersion::IsDelta() const { +bool PayloadVersion::IsDeltaOrPartial() const { return minor != kFullPayloadMinorVersion; } bool PayloadGenerationConfig::Validate() const { TEST_AND_RETURN_FALSE(version.Validate()); - TEST_AND_RETURN_FALSE(version.IsDelta() == is_delta); + TEST_AND_RETURN_FALSE(version.IsDeltaOrPartial() == + (is_delta || is_partial_update)); if (is_delta) { for (const PartitionConfig& part : source.partitions) { if (!part.path.empty()) { @@ -307,6 +309,10 @@ bool PayloadGenerationConfig::Validate() const { TEST_AND_RETURN_FALSE(part.verity.IsEmpty()); } + if (version.minor < kPartialUpdateMinorPayloadVersion) { + TEST_AND_RETURN_FALSE(!is_partial_update); + } + TEST_AND_RETURN_FALSE(hard_chunk_size == -1 || hard_chunk_size % block_size == 0); TEST_AND_RETURN_FALSE(soft_chunk_size % block_size == 0); diff --git a/payload_generator/payload_generation_config.h b/payload_generator/payload_generation_config.h index af6f181b..9abb97f3 100644 --- a/payload_generator/payload_generation_config.h +++ b/payload_generator/payload_generation_config.h @@ -170,8 +170,8 @@ struct PayloadVersion { // Return whether the passed |operation| is allowed by this payload. bool OperationAllowed(InstallOperation::Type operation) const; - // Whether this payload version is a delta payload. - bool IsDelta() const; + // Whether this payload version is a delta or partial payload. + bool IsDeltaOrPartial() const; // The major version of the payload. uint64_t major; @@ -198,6 +198,10 @@ struct PayloadGenerationConfig { // Whether the requested payload is a delta payload. bool is_delta = false; + // Whether the requested payload is a partial payload, i.e. only update a + // subset of partitions on device. + bool is_partial_update = false; + // The major/minor version of the payload. PayloadVersion version; diff --git a/scripts/brillo_update_payload b/scripts/brillo_update_payload index d9c18ff6..63b6d24e 100755 --- a/scripts/brillo_update_payload +++ b/scripts/brillo_update_payload @@ -189,6 +189,9 @@ system running on the device, 0 if not specified." DEFINE_string disable_fec_computation "" \ "Optional: Disables the on device fec data computation for incremental \ update. This feature is enabled by default." + DEFINE_string is_partial_update "" \ + "Optional: True if the payload is for partial update. i.e. it only updates \ +a subset of partitions on device." fi if [[ "${COMMAND}" == "hash" || "${COMMAND}" == "sign" ]]; then DEFINE_string unsigned_payload "" "Path to the input unsigned payload." @@ -654,21 +657,33 @@ cmd_generate() { --new_mapfiles="${new_mapfiles}" ) + if [[ "${FLAGS_is_partial_update}" == "true" ]]; then + GENERATOR_ARGS+=( --is_partial_update="true" ) + # Need at least minor version 7 for partial update, so generate with minor + # version 7 if we don't have a source image. Let the delta_generator to fail + # the other incompatiable minor versions. + if [[ -z "${FORCE_MINOR_VERSION}" ]]; then + FORCE_MINOR_VERSION="7" + fi + fi + if [[ "${payload_type}" == "delta" ]]; then # Source image args: GENERATOR_ARGS+=( --old_partitions="${old_partitions}" --old_mapfiles="${old_mapfiles}" ) - if [[ -n "${FORCE_MINOR_VERSION}" ]]; then - GENERATOR_ARGS+=( --minor_version="${FORCE_MINOR_VERSION}" ) - fi if [[ -n "${FLAGS_disable_fec_computation}" ]]; then GENERATOR_ARGS+=( --disable_fec_computation="${FLAGS_disable_fec_computation}" ) fi fi + # minor version is set only for delta or partial payload. + if [[ -n "${FORCE_MINOR_VERSION}" ]]; then + GENERATOR_ARGS+=( --minor_version="${FORCE_MINOR_VERSION}" ) + fi + if [[ -n "${FORCE_MAJOR_VERSION}" ]]; then GENERATOR_ARGS+=( --major_version="${FORCE_MAJOR_VERSION}" ) fi From ebd5e25c24c787f33a1377d7b5c6d8f6a3bbbd01 Mon Sep 17 00:00:00 2001 From: Kelvin Zhang Date: Wed, 22 Jul 2020 18:27:06 -0400 Subject: [PATCH 328/624] Fix wording to comply with respectful-code https://source.android.com/setup/contribute/respectful-code Test: Run unit tests Change-Id: Ia6647c9cf3224b962286151932118093b9ad979a --- boot_control_chromeos.h | 2 +- hardware_chromeos.cc | 2 +- omaha_request_action.cc | 2 +- omaha_request_builder_xml.cc | 2 +- omaha_request_params.h | 2 +- payload_state.h | 6 +++--- payload_state_unittest.cc | 2 +- 7 files changed, 9 insertions(+), 9 deletions(-) diff --git a/boot_control_chromeos.h b/boot_control_chromeos.h index f90e65bd..42716720 100644 --- a/boot_control_chromeos.h +++ b/boot_control_chromeos.h @@ -82,7 +82,7 @@ class BootControlChromeOS : public BootControlInterface { // Extracts DLC module ID and package ID from partition name. The structure of // the partition name is dlc//. For example: - // dlc/dummy-dlc/dummy-package + // dlc/fake-dlc/fake-package bool ParseDlcPartitionName(const std::string partition_name, std::string* dlc_id, std::string* dlc_package) const; diff --git a/hardware_chromeos.cc b/hardware_chromeos.cc index 5ff1b29a..3adef98f 100644 --- a/hardware_chromeos.cc +++ b/hardware_chromeos.cc @@ -47,7 +47,7 @@ namespace { const char kOOBECompletedMarker[] = "/home/chronos/.oobe_completed"; // The stateful directory used by update_engine to store powerwash-safe files. -// The files stored here must be whitelisted in the powerwash scripts. +// The files stored here must be safelisted in the powerwash scripts. const char kPowerwashSafeDirectory[] = "/mnt/stateful_partition/unencrypted/preserve"; diff --git a/omaha_request_action.cc b/omaha_request_action.cc index 3a0b91c7..e37ebab4 100644 --- a/omaha_request_action.cc +++ b/omaha_request_action.cc @@ -351,7 +351,7 @@ int OmahaRequestAction::GetInstallDate(SystemState* system_state) { // If we have the value stored on disk, just return it. int64_t stored_value; if (prefs->GetInt64(kPrefsInstallDateDays, &stored_value)) { - // Convert and sanity-check. + // Convert and validity-check. int install_date_days = static_cast(stored_value); if (install_date_days >= 0) return install_date_days; diff --git a/omaha_request_builder_xml.cc b/omaha_request_builder_xml.cc index 097b9f1e..e2857f16 100644 --- a/omaha_request_builder_xml.cc +++ b/omaha_request_builder_xml.cc @@ -216,7 +216,7 @@ string OmahaRequestBuilderXml::GetCohortArg(const string arg_name, if (!prefs_->GetString(prefs_key, &cohort_value) || cohort_value.empty()) return ""; } - // This is a sanity check to avoid sending a huge XML file back to Ohama due + // This is a validity check to avoid sending a huge XML file back to Ohama due // to a compromised stateful partition making the update check fail in low // network environments envent after a reboot. if (cohort_value.size() > 1024) { diff --git a/omaha_request_params.h b/omaha_request_params.h index 34529658..727c2ad4 100644 --- a/omaha_request_params.h +++ b/omaha_request_params.h @@ -328,7 +328,7 @@ class OmahaRequestParams { bool ToMoreStableChannel() const; // Returns True if we should store the fw/ec versions based on our hwid_. - // Compares hwid to a set of whitelisted prefixes. + // Compares hwid to a set of safelisted prefixes. bool CollectECFWVersions() const; // Gets the machine type (e.g. "i686"). diff --git a/payload_state.h b/payload_state.h index d13c6420..90c3b139 100644 --- a/payload_state.h +++ b/payload_state.h @@ -366,14 +366,14 @@ class PayloadState : public PayloadStateInterface { // check where policy was available. This info is preserved over powerwash. void LoadRollbackHappened(); - // Loads the blacklisted version from our prefs file. + // Loads the blocklisted version from our prefs file. void LoadRollbackVersion(); // Blacklists this version from getting AU'd to until we receive a new update // response. void SetRollbackVersion(const std::string& rollback_version); - // Clears any blacklisted version. + // Clears any blocklisted version. void ResetRollbackVersion(); inline uint32_t GetUrlIndex() { @@ -565,7 +565,7 @@ class PayloadState : public PayloadStateInterface { // forced updates to avoid update-rollback loops. bool rollback_happened_; - // This stores a blacklisted version set as part of rollback. When we rollback + // This stores a blocklisted version set as part of rollback. When we rollback // we store the version of the os from which we are rolling back from in order // to guarantee that we do not re-update to it on the next au attempt after // reboot. diff --git a/payload_state_unittest.cc b/payload_state_unittest.cc index bf9aed44..d8d9afac 100644 --- a/payload_state_unittest.cc +++ b/payload_state_unittest.cc @@ -1016,7 +1016,7 @@ TEST(PayloadStateTest, RollbackVersion) { NiceMock* mock_powerwash_safe_prefs = fake_system_state.mock_powerwash_safe_prefs(); - // Mock out the os version and make sure it's blacklisted correctly. + // Mock out the os version and make sure it's blocklisted correctly. string rollback_version = "2345.0.0"; OmahaRequestParams params(&fake_system_state); params.Init(rollback_version, "", false); From 4a8880c6e69e1a9547b102dfb8e9231fe581d3d2 Mon Sep 17 00:00:00 2001 From: Andrew Lassalle Date: Tue, 21 Jul 2020 23:03:59 +0000 Subject: [PATCH 329/624] update_engine: Remove solved TODO The bug that the TODO mentions has already been fixed. BUG=chromium:357676 TEST=None Change-Id: If68b3119e3eee8a0686bd6a9869427dd204e91b7 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2311357 Tested-by: Andrew Lassalle Auto-Submit: Andrew Lassalle Reviewed-by: Amin Hassani Commit-Queue: Andrew Lassalle --- payload_state.cc | 4 ---- 1 file changed, 4 deletions(-) diff --git a/payload_state.cc b/payload_state.cc index ce3e1d54..f227026e 100644 --- a/payload_state.cc +++ b/payload_state.cc @@ -618,10 +618,6 @@ PayloadType PayloadState::CalculatePayloadType() { return kPayloadTypeForcedFull; } -// TODO(zeuthen): Currently we don't report the UpdateEngine.Attempt.* -// metrics if the attempt ends abnormally, e.g. if the update_engine -// process crashes or the device is rebooted. See -// http://crbug.com/357676 void PayloadState::CollectAndReportAttemptMetrics(ErrorCode code) { int attempt_number = GetPayloadAttemptNumber(); From b57c16e8e593af2984e28cc53ec8cf5750aa8051 Mon Sep 17 00:00:00 2001 From: Andrew Date: Wed, 22 Jul 2020 14:32:39 -0700 Subject: [PATCH 330/624] update_engine: Add new metric to AttemptResult Add more granularity to metric |AttemptResult| to avoid most error metrics falling into the category |kInternalError|. BUG=chromium:1086956 TEST=cros_workon_make update_engine --test Change-Id: I37a4b55543011cc2eb5ced38cebf14d5794e9482 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2314898 Commit-Queue: Andrew Lassalle Tested-by: Andrew Lassalle Reviewed-by: Jae Hoon Kim Reviewed-by: Amin Hassani --- metrics_constants.h | 2 +- metrics_utils.cc | 8 ++++---- payload_state.cc | 1 + 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/metrics_constants.h b/metrics_constants.h index db21d905..679680c5 100644 --- a/metrics_constants.h +++ b/metrics_constants.h @@ -106,7 +106,7 @@ enum class AttemptResult { kUpdateCanceled, // Update canceled by the user. kUpdateSucceededNotActive, // Update succeeded but the new slot is not // active. - + kUpdateSkipped, // Current update skipped. kNumConstants, kUnset = -1 diff --git a/metrics_utils.cc b/metrics_utils.cc index 0d333ca1..9f0caa52 100644 --- a/metrics_utils.cc +++ b/metrics_utils.cc @@ -109,10 +109,6 @@ metrics::AttemptResult GetAttemptResult(ErrorCode code) { case ErrorCode::kDownloadInvalidMetadataSignature: case ErrorCode::kOmahaResponseInvalid: case ErrorCode::kOmahaUpdateIgnoredPerPolicy: - // TODO(deymo): The next two items belong in their own category; they - // should not be counted as internal errors. b/27112092 - case ErrorCode::kOmahaUpdateDeferredPerPolicy: - case ErrorCode::kNonCriticalUpdateInOOBE: case ErrorCode::kOmahaErrorInHTTPResponse: case ErrorCode::kDownloadMetadataSignatureMissingError: case ErrorCode::kOmahaUpdateDeferredForBackoff: @@ -125,6 +121,10 @@ metrics::AttemptResult GetAttemptResult(ErrorCode code) { case ErrorCode::kPackageExcludedFromUpdate: return metrics::AttemptResult::kInternalError; + case ErrorCode::kOmahaUpdateDeferredPerPolicy: + case ErrorCode::kNonCriticalUpdateInOOBE: + return metrics::AttemptResult::kUpdateSkipped; + // Special flags. These can't happen (we mask them out above) but // the compiler doesn't know that. Just break out so we can warn and // return |kInternalError|. diff --git a/payload_state.cc b/payload_state.cc index f227026e..b6c054bb 100644 --- a/payload_state.cc +++ b/payload_state.cc @@ -672,6 +672,7 @@ void PayloadState::CollectAndReportAttemptMetrics(ErrorCode code) { case metrics::AttemptResult::kAbnormalTermination: case metrics::AttemptResult::kUpdateCanceled: case metrics::AttemptResult::kUpdateSucceededNotActive: + case metrics::AttemptResult::kUpdateSkipped: case metrics::AttemptResult::kNumConstants: case metrics::AttemptResult::kUnset: break; From 934b847620e664159f3952eeb3d48c3510a43676 Mon Sep 17 00:00:00 2001 From: Tianjie Date: Wed, 24 Jun 2020 23:10:49 -0700 Subject: [PATCH 331/624] Update UE to remove MessageLoop::current()->WatchFileDescriptor. MessageLoop::current()->WatchFileDescriptor is deprecated. And UE should remove usages of it. Test: mma && unittest Change-Id: Ib1ef2e6b6a38ad2a8d07b78bcd72fdb3b7f82226 --- common/subprocess.cc | 19 ---- common/subprocess.h | 6 +- common/subprocess_unittest.cc | 20 ---- libcurl_http_fetcher.cc | 100 +++--------------- libcurl_http_fetcher.h | 4 - libcurl_http_fetcher_unittest.cc | 52 +++++---- payload_consumer/postinstall_runner_action.cc | 22 ---- payload_consumer/postinstall_runner_action.h | 4 - 8 files changed, 45 insertions(+), 182 deletions(-) diff --git a/common/subprocess.cc b/common/subprocess.cc index 298a65c2..3e197fb2 100644 --- a/common/subprocess.cc +++ b/common/subprocess.cc @@ -129,12 +129,7 @@ void Subprocess::OnStdoutReady(SubprocessRecord* record) { if (!ok || eof) { // There was either an error or an EOF condition, so we are done watching // the file descriptor. -#ifdef __ANDROID__ - MessageLoop::current()->CancelTask(record->stdout_task_id); - record->stdout_task_id = MessageLoop::kTaskIdNull; -#else record->stdout_controller.reset(); -#endif // __ANDROID__ return; } } while (bytes_read); @@ -149,12 +144,7 @@ void Subprocess::ChildExitedCallback(const siginfo_t& info) { // Make sure we read any remaining process output and then close the pipe. OnStdoutReady(record); -#ifdef __ANDROID__ - MessageLoop::current()->CancelTask(record->stdout_task_id); - record->stdout_task_id = MessageLoop::kTaskIdNull; -#else record->stdout_controller.reset(); -#endif // __ANDROID__ // Don't print any log if the subprocess exited with exit code 0. if (info.si_code != CLD_EXITED) { @@ -209,18 +199,9 @@ pid_t Subprocess::ExecFlags(const vector& cmd, << record->stdout_fd << "."; } -#ifdef __ANDROID__ - record->stdout_task_id = MessageLoop::current()->WatchFileDescriptor( - FROM_HERE, - record->stdout_fd, - MessageLoop::WatchMode::kWatchRead, - true, - base::Bind(&Subprocess::OnStdoutReady, record.get())); -#else record->stdout_controller = base::FileDescriptorWatcher::WatchReadable( record->stdout_fd, base::BindRepeating(&Subprocess::OnStdoutReady, record.get())); -#endif // __ANDROID__ subprocess_records_[pid] = std::move(record); return pid; diff --git a/common/subprocess.h b/common/subprocess.h index f1b9f1f7..432d4cb8 100644 --- a/common/subprocess.h +++ b/common/subprocess.h @@ -123,12 +123,8 @@ class Subprocess { // These are used to monitor the stdout of the running process, including // the stderr if it was redirected. -#ifdef __ANDROID__ - brillo::MessageLoop::TaskId stdout_task_id{ - brillo::MessageLoop::kTaskIdNull}; -#else std::unique_ptr stdout_controller; -#endif // __ANDROID__ + int stdout_fd{-1}; std::string stdout; }; diff --git a/common/subprocess_unittest.cc b/common/subprocess_unittest.cc index bc52b83e..e71d3d81 100644 --- a/common/subprocess_unittest.cc +++ b/common/subprocess_unittest.cc @@ -74,9 +74,7 @@ class SubprocessTest : public ::testing::Test { brillo::BaseMessageLoop loop_{&base_loop_}; brillo::AsynchronousSignalHandler async_signal_handler_; Subprocess subprocess_; -#ifndef __ANDROID__ unique_ptr watcher_; -#endif // __ANDROID__ }; @@ -261,23 +259,6 @@ TEST_F(SubprocessTest, CancelTest) { int fifo_fd = HANDLE_EINTR(open(fifo_path.c_str(), O_RDONLY)); EXPECT_GE(fifo_fd, 0); -#ifdef __ANDROID__ - loop_.WatchFileDescriptor(FROM_HERE, - fifo_fd, - MessageLoop::WatchMode::kWatchRead, - false, - base::Bind( - [](int fifo_fd, uint32_t tag) { - char c; - EXPECT_EQ(1, - HANDLE_EINTR(read(fifo_fd, &c, 1))); - EXPECT_EQ('X', c); - LOG(INFO) << "Killing tag " << tag; - Subprocess::Get().KillExec(tag); - }, - fifo_fd, - tag)); -#else watcher_ = base::FileDescriptorWatcher::WatchReadable( fifo_fd, base::Bind( @@ -295,7 +276,6 @@ TEST_F(SubprocessTest, CancelTest) { base::Unretained(&watcher_), fifo_fd, tag)); -#endif // __ANDROID__ // This test would leak a callback that runs when the child process exits // unless we wait for it to run. diff --git a/libcurl_http_fetcher.cc b/libcurl_http_fetcher.cc index 7c53a2d9..f8aed7c9 100644 --- a/libcurl_http_fetcher.cc +++ b/libcurl_http_fetcher.cc @@ -31,6 +31,8 @@ #include #include #include +#include + #ifdef __ANDROID__ #include @@ -81,23 +83,9 @@ int LibcurlHttpFetcher::LibcurlCloseSocketCallback(void* clientp, LibcurlHttpFetcher* fetcher = static_cast(clientp); // Stop watching the socket before closing it. -#ifdef __ANDROID__ - for (size_t t = 0; t < arraysize(fetcher->fd_task_maps_); ++t) { - const auto fd_task_pair = fetcher->fd_task_maps_[t].find(item); - if (fd_task_pair != fetcher->fd_task_maps_[t].end()) { - if (!MessageLoop::current()->CancelTask(fd_task_pair->second)) { - LOG(WARNING) << "Error canceling the watch task " - << fd_task_pair->second << " for " - << (t ? "writing" : "reading") << " the fd " << item; - } - fetcher->fd_task_maps_[t].erase(item); - } - } -#else for (size_t t = 0; t < base::size(fetcher->fd_controller_maps_); ++t) { fetcher->fd_controller_maps_[t].erase(item); } -#endif // __ANDROID__ // Documentation for this callback says to return 0 on success or 1 on error. if (!IGNORE_EINTR(close(item))) @@ -471,6 +459,19 @@ void LibcurlHttpFetcher::CurlPerformOnce() { // There's either more work to do or we are paused, so we just keep the // file descriptors to watch up to date and exit, until we are done with the // work and we are not paused. +#ifdef __ANDROID__ + // When there's no base::SingleThreadTaskRunner on current thread, it's not + // possible to watch file descriptors. Just poll it later. This usually + // happens if brillo::FakeMessageLoop is used. + if (!base::ThreadTaskRunnerHandle::IsSet()) { + MessageLoop::current()->PostDelayedTask( + FROM_HERE, + base::Bind(&LibcurlHttpFetcher::CurlPerformOnce, + base::Unretained(this)), + TimeDelta::FromSeconds(1)); + return; + } +#endif SetupMessageLoopSources(); return; } @@ -691,63 +692,6 @@ void LibcurlHttpFetcher::SetupMessageLoopSources() { // We should iterate through all file descriptors up to libcurl's fd_max or // the highest one we're tracking, whichever is larger. -#ifdef __ANDROID__ - for (size_t t = 0; t < arraysize(fd_task_maps_); ++t) { - if (!fd_task_maps_[t].empty()) - fd_max = max(fd_max, fd_task_maps_[t].rbegin()->first); - } - - // For each fd, if we're not tracking it, track it. If we are tracking it, but - // libcurl doesn't care about it anymore, stop tracking it. After this loop, - // there should be exactly as many tasks scheduled in fd_task_maps_[0|1] as - // there are read/write fds that we're tracking. - for (int fd = 0; fd <= fd_max; ++fd) { - // Note that fd_exc is unused in the current version of libcurl so is_exc - // should always be false. - bool is_exc = FD_ISSET(fd, &fd_exc) != 0; - bool must_track[2] = { - is_exc || (FD_ISSET(fd, &fd_read) != 0), // track 0 -- read - is_exc || (FD_ISSET(fd, &fd_write) != 0) // track 1 -- write - }; - MessageLoop::WatchMode watch_modes[2] = { - MessageLoop::WatchMode::kWatchRead, - MessageLoop::WatchMode::kWatchWrite, - }; - - for (size_t t = 0; t < arraysize(fd_task_maps_); ++t) { - auto fd_task_it = fd_task_maps_[t].find(fd); - bool tracked = fd_task_it != fd_task_maps_[t].end(); - - if (!must_track[t]) { - // If we have an outstanding io_channel, remove it. - if (tracked) { - MessageLoop::current()->CancelTask(fd_task_it->second); - fd_task_maps_[t].erase(fd_task_it); - } - continue; - } - - // If we are already tracking this fd, continue -- nothing to do. - if (tracked) - continue; - - // Track a new fd. - fd_task_maps_[t][fd] = MessageLoop::current()->WatchFileDescriptor( - FROM_HERE, - fd, - watch_modes[t], - true, // persistent - base::Bind(&LibcurlHttpFetcher::CurlPerformOnce, - base::Unretained(this))); - - static int io_counter = 0; - io_counter++; - if (io_counter % 50 == 0) { - LOG(INFO) << "io_counter = " << io_counter; - } - } - } -#else for (size_t t = 0; t < base::size(fd_controller_maps_); ++t) { if (!fd_controller_maps_[t].empty()) fd_max = max(fd_max, fd_controller_maps_[t].rbegin()->first); @@ -803,7 +747,6 @@ void LibcurlHttpFetcher::SetupMessageLoopSources() { } } } -#endif // __ANDROID__ // Set up a timeout callback for libcurl. if (timeout_id_ == MessageLoop::kTaskIdNull) { @@ -848,22 +791,9 @@ void LibcurlHttpFetcher::CleanUp() { MessageLoop::current()->CancelTask(timeout_id_); timeout_id_ = MessageLoop::kTaskIdNull; -#ifdef __ANDROID__ - for (size_t t = 0; t < arraysize(fd_task_maps_); ++t) { - for (const auto& fd_taks_pair : fd_task_maps_[t]) { - if (!MessageLoop::current()->CancelTask(fd_taks_pair.second)) { - LOG(WARNING) << "Error canceling the watch task " << fd_taks_pair.second - << " for " << (t ? "writing" : "reading") << " the fd " - << fd_taks_pair.first; - } - } - fd_task_maps_[t].clear(); - } -#else for (size_t t = 0; t < base::size(fd_controller_maps_); ++t) { fd_controller_maps_[t].clear(); } -#endif // __ANDROID__ if (curl_http_headers_) { curl_slist_free_all(curl_http_headers_); diff --git a/libcurl_http_fetcher.h b/libcurl_http_fetcher.h index 4854f40b..4e91b693 100644 --- a/libcurl_http_fetcher.h +++ b/libcurl_http_fetcher.h @@ -255,12 +255,8 @@ class LibcurlHttpFetcher : public HttpFetcher { // the message loop. libcurl may open/close descriptors and switch their // directions so maintain two separate lists so that watch conditions can be // set appropriately. -#ifdef __ANDROID__ - std::map fd_task_maps_[2]; -#else std::map> fd_controller_maps_[2]; -#endif // __ANDROID__ // The TaskId of the timer we're waiting on. kTaskIdNull if we are not waiting // on it. diff --git a/libcurl_http_fetcher_unittest.cc b/libcurl_http_fetcher_unittest.cc index 8064b999..874ef2e9 100644 --- a/libcurl_http_fetcher_unittest.cc +++ b/libcurl_http_fetcher_unittest.cc @@ -94,37 +94,24 @@ TEST_F(LibcurlHttpFetcherTest, InvalidURLTest) { no_network_max_retries); } -#ifdef __ANDROID__ -TEST_F(LibcurlHttpFetcherTest, CouldntResolveHostTest) { +TEST_F(LibcurlHttpFetcherTest, CouldNotResolveHostTest) { int no_network_max_retries = 1; libcurl_fetcher_.set_no_network_max_retries(no_network_max_retries); - // This test actually sends request to internet but according to - // https://tools.ietf.org/html/rfc2606#section-2, .invalid domain names are - // reserved and sure to be invalid. Ideally we should mock libcurl or - // reorganize LibcurlHttpFetcher so the part that sends request can be mocked - // easily. - // TODO(xiaochu) Refactor LibcurlHttpFetcher (and its relates) so it's - // easier to mock the part that depends on internet connectivity. libcurl_fetcher_.BeginTransfer("https://An-uNres0lvable-uRl.invalid"); - while (loop_.PendingTasks()) { + +#ifdef __ANDROID__ + // It's slower on Android that libcurl handle may not finish within 1 cycle. + // Will need to wait for more cycles until it finishes. Original test didn't + // correctly handle when we need to re-watch libcurl fds. + while (loop_.PendingTasks() && + libcurl_fetcher_.GetAuxiliaryErrorCode() == ErrorCode::kSuccess) { loop_.RunOnce(true); } - - // If libcurl fails to resolve the name, we call res_init() to reload - // resolv.conf and retry exactly once more. See crbug.com/982813 for details. - EXPECT_EQ(libcurl_fetcher_.get_no_network_max_retries(), - no_network_max_retries + 1); -} #else -TEST_F(LibcurlHttpFetcherTest, CouldNotResolveHostTest) { - int no_network_max_retries = 1; - libcurl_fetcher_.set_no_network_max_retries(no_network_max_retries); - - libcurl_fetcher_.BeginTransfer("https://An-uNres0lvable-uRl.invalid"); - // The first time it can't resolve. loop_.RunOnce(true); +#endif EXPECT_EQ(libcurl_fetcher_.GetAuxiliaryErrorCode(), ErrorCode::kUnresolvedHostError); @@ -154,8 +141,18 @@ TEST_F(LibcurlHttpFetcherTest, HostResolvedTest) { // easier to mock the part that depends on internet connectivity. libcurl_fetcher_.BeginTransfer("https://An-uNres0lvable-uRl.invalid"); +#ifdef __ANDROID__ + // It's slower on Android that libcurl handle may not finish within 1 cycle. + // Will need to wait for more cycles until it finishes. Original test didn't + // correctly handle when we need to re-watch libcurl fds. + while (loop_.PendingTasks() && + libcurl_fetcher_.GetAuxiliaryErrorCode() == ErrorCode::kSuccess) { + loop_.RunOnce(true); + } +#else // The first time it can't resolve. loop_.RunOnce(true); +#endif EXPECT_EQ(libcurl_fetcher_.GetAuxiliaryErrorCode(), ErrorCode::kUnresolvedHostError); @@ -168,9 +165,19 @@ TEST_F(LibcurlHttpFetcherTest, HostResolvedTest) { [this]() { libcurl_fetcher_.http_response_code_ = 0; })); libcurl_fetcher_.transfer_size_ = 10; +#ifdef __ANDROID__ + // It's slower on Android that libcurl handle may not finish within 1 cycle. + // Will need to wait for more cycles until it finishes. Original test didn't + // correctly handle when we need to re-watch libcurl fds. + while (loop_.PendingTasks() && libcurl_fetcher_.GetAuxiliaryErrorCode() == + ErrorCode::kUnresolvedHostError) { + loop_.RunOnce(true); + } +#else // This time the host is resolved. But after that again we can't resolve // anymore (See above). loop_.RunOnce(true); +#endif EXPECT_EQ(libcurl_fetcher_.GetAuxiliaryErrorCode(), ErrorCode::kUnresolvedHostRecovered); @@ -186,7 +193,6 @@ TEST_F(LibcurlHttpFetcherTest, HostResolvedTest) { EXPECT_EQ(libcurl_fetcher_.get_no_network_max_retries(), no_network_max_retries + 1); } -#endif // __ANDROID__ TEST_F(LibcurlHttpFetcherTest, HttpFetcherStateMachineRetryFailedTest) { state_machine_.UpdateState(true); diff --git a/payload_consumer/postinstall_runner_action.cc b/payload_consumer/postinstall_runner_action.cc index c520c7e8..94d03920 100644 --- a/payload_consumer/postinstall_runner_action.cc +++ b/payload_consumer/postinstall_runner_action.cc @@ -220,20 +220,10 @@ void PostinstallRunnerAction::PerformPartitionPostinstall() { PLOG(ERROR) << "Unable to set non-blocking I/O mode on fd " << progress_fd_; } -#ifdef __ANDROID__ - progress_task_ = MessageLoop::current()->WatchFileDescriptor( - FROM_HERE, - progress_fd_, - MessageLoop::WatchMode::kWatchRead, - true, - base::Bind(&PostinstallRunnerAction::OnProgressFdReady, - base::Unretained(this))); -#else progress_controller_ = base::FileDescriptorWatcher::WatchReadable( progress_fd_, base::BindRepeating(&PostinstallRunnerAction::OnProgressFdReady, base::Unretained(this))); -#endif // __ANDROID__ } @@ -259,12 +249,7 @@ void PostinstallRunnerAction::OnProgressFdReady() { if (!ok || eof) { // There was either an error or an EOF condition, so we are done watching // the file descriptor. -#ifdef __ANDROID__ - MessageLoop::current()->CancelTask(progress_task_); - progress_task_ = MessageLoop::kTaskIdNull; -#else progress_controller_.reset(); -#endif // __ANDROID__ return; } } while (bytes_read); @@ -308,14 +293,7 @@ void PostinstallRunnerAction::Cleanup() { fs_mount_dir_.clear(); progress_fd_ = -1; -#ifdef __ANDROID__ - if (progress_task_ != MessageLoop::kTaskIdNull) { - MessageLoop::current()->CancelTask(progress_task_); - progress_task_ = MessageLoop::kTaskIdNull; - } -#else progress_controller_.reset(); -#endif // __ANDROID__ progress_buffer_.clear(); } diff --git a/payload_consumer/postinstall_runner_action.h b/payload_consumer/postinstall_runner_action.h index bbc9e8cc..e4041079 100644 --- a/payload_consumer/postinstall_runner_action.h +++ b/payload_consumer/postinstall_runner_action.h @@ -140,11 +140,7 @@ class PostinstallRunnerAction : public InstallPlanAction { // the postinstall program and the task watching for them. int progress_fd_{-1}; -#ifdef __ANDROID__ - brillo::MessageLoop::TaskId progress_task_{brillo::MessageLoop::kTaskIdNull}; -#else std::unique_ptr progress_controller_; -#endif // __ANDROID__ // A buffer of a partial read line from the progress file descriptor. std::string progress_buffer_; From c3806663263ba01c35c66f8185402bbbd456380f Mon Sep 17 00:00:00 2001 From: Tianjie Date: Mon, 13 Jul 2020 19:28:15 -0700 Subject: [PATCH 332/624] Bump the minor version Test: unit tests pass, generate & apply a partial update Change-Id: I05f8466c4a92cb3774030451d83005474d8fdc9d --- payload_consumer/payload_constants.cc | 3 ++- update_engine.conf | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/payload_consumer/payload_constants.cc b/payload_consumer/payload_constants.cc index 28404fee..d62a0ec6 100644 --- a/payload_consumer/payload_constants.cc +++ b/payload_consumer/payload_constants.cc @@ -36,7 +36,8 @@ const uint32_t kVerityMinorPayloadVersion = 6; const uint32_t kPartialUpdateMinorPayloadVersion = 7; const uint32_t kMinSupportedMinorPayloadVersion = kSourceMinorPayloadVersion; -const uint32_t kMaxSupportedMinorPayloadVersion = kVerityMinorPayloadVersion; +const uint32_t kMaxSupportedMinorPayloadVersion = + kPartialUpdateMinorPayloadVersion; const uint64_t kMaxPayloadHeaderSize = 24; diff --git a/update_engine.conf b/update_engine.conf index af213ad9..b6ca3c47 100644 --- a/update_engine.conf +++ b/update_engine.conf @@ -1,2 +1,2 @@ PAYLOAD_MAJOR_VERSION=2 -PAYLOAD_MINOR_VERSION=6 +PAYLOAD_MINOR_VERSION=7 From 3756c3e54fd61d2a27737020928b2b079f356caf Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Fri, 24 Jul 2020 20:25:51 -0700 Subject: [PATCH 333/624] brillo_update_payload: allow specifying delta_generator from env Help solve the following error when using brillo_update_payload inside sandboxes: "delta_generator" is not allowed to be used. See https://android.googlesource.com/platform/build/+/master/Changes.md#PATH_Tools for more information." ... by specifying GENERATOR=path/to/delta_generator in the environment. Test: build GKI Bug: 162116212 Change-Id: I8e3444539e3692738b0309ab7b1f829d0e840ac2 --- scripts/brillo_update_payload | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/brillo_update_payload b/scripts/brillo_update_payload index 63b6d24e..4b581fb1 100755 --- a/scripts/brillo_update_payload +++ b/scripts/brillo_update_payload @@ -897,7 +897,7 @@ cmd_check() { } # Sanity check that the real generator exists: -GENERATOR="$(which delta_generator || true)" +[[ -x "${GENERATOR}" ]] || GENERATOR="$(which delta_generator || true)" [[ -x "${GENERATOR}" ]] || die "can't find delta_generator" case "$COMMAND" in From 15a6ead86b5610b4960aec71d104848a932947f0 Mon Sep 17 00:00:00 2001 From: Qijiang Fan Date: Sat, 25 Jul 2020 17:19:49 +0900 Subject: [PATCH 334/624] Reduce wait time in SubprocessTest.CancelTest With AsynchronousSignalHandler switching to base::FileDescriptorWatcher, there's no more IOTask under the MessageLoop. Thus MessageLoopRunUntil will block until the timeout task (unless there's some other task being posted). It was designed to call the callback to check whether it should stop for every posted task run. But now IOTask is gone, and no other task is posted, it will wait until timeout. On Chrome OS, 120s timeout doesn't appear a problem. But on AOSP, it looks the test is failing timeout. Test: mma && unittest SubprocessTest.CancelTest finish quickly. Change-Id: I8bbc5f0371dfe344e6173ff1a81444813affe808 --- common/subprocess_unittest.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/subprocess_unittest.cc b/common/subprocess_unittest.cc index e71d3d81..74fee612 100644 --- a/common/subprocess_unittest.cc +++ b/common/subprocess_unittest.cc @@ -280,7 +280,7 @@ TEST_F(SubprocessTest, CancelTest) { // This test would leak a callback that runs when the child process exits // unless we wait for it to run. brillo::MessageLoopRunUntil( - &loop_, TimeDelta::FromSeconds(120), base::Bind([] { + &loop_, TimeDelta::FromSeconds(20), base::Bind([] { return Subprocess::Get().subprocess_records_.empty(); })); EXPECT_TRUE(Subprocess::Get().subprocess_records_.empty()); From a9997f04744cbf8bf2dd83785f5f6271b46fcca7 Mon Sep 17 00:00:00 2001 From: Saint Chou Date: Wed, 29 Jul 2020 10:14:56 +0000 Subject: [PATCH 335/624] Update language to comply with Android's inclusive language guidance See https://source.android.com/setup/contribute/respectful-code for reference #inclusivefixit Bug: 161896447 Change-Id: I839b1833dc06e00190f3557ca0292685197b1323 Test: NA (Comment only) --- payload_consumer/download_action.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/payload_consumer/download_action.h b/payload_consumer/download_action.h index 740416da..69284438 100644 --- a/payload_consumer/download_action.h +++ b/payload_consumer/download_action.h @@ -124,7 +124,7 @@ class DownloadAction : public InstallPlanAction, public HttpFetcherDelegate { bool SetupP2PSharingFd(); // Writes |length| bytes of payload from |data| into |file_offset| - // of the p2p file. Also does sanity checks; for example ensures we + // of the p2p file. Also does validation checks; for example ensures we // don't end up with a file with holes in it. // // This method does nothing if SetupP2PSharingFd() hasn't been From c036b106d1e282f8cd60af9c133ddcaa24094c78 Mon Sep 17 00:00:00 2001 From: Saint Chou Date: Wed, 29 Jul 2020 12:02:08 +0000 Subject: [PATCH 336/624] Update language to comply with Android's inclusive language guidance See https://source.android.com/setup/contribute/respectful-code for reference #inclusivefixit Bug: 161896447 Change-Id: I28df0e1a45fb625fd70b5cd471e01e7f60fcd652 Test: NA (Comment only) --- payload_generator/delta_diff_utils.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/payload_generator/delta_diff_utils.cc b/payload_generator/delta_diff_utils.cc index 22752e8f..9a088158 100644 --- a/payload_generator/delta_diff_utils.cc +++ b/payload_generator/delta_diff_utils.cc @@ -938,7 +938,7 @@ bool IsExtFilesystem(const string& device) { if (magic != EXT2_SUPER_MAGIC) return false; - // Sanity check the parameters. + // Validation check the parameters. TEST_AND_RETURN_FALSE(log_block_size >= EXT2_MIN_BLOCK_LOG_SIZE && log_block_size <= EXT2_MAX_BLOCK_LOG_SIZE); TEST_AND_RETURN_FALSE(block_count > 0); From fc24afe2556769a584f6e3a6d0c58b18f30da9ba Mon Sep 17 00:00:00 2001 From: Saint Chou Date: Wed, 29 Jul 2020 11:54:31 +0000 Subject: [PATCH 337/624] Update language to comply with Android's inclusive language guidance See https://source.android.com/setup/contribute/respectful-code for reference #inclusivefixit Bug: 161896447 Change-Id: Icb9e2fca7e27a2ee81380897134deae6c4a24053 Test: NA (Comment only) --- payload_consumer/postinstall_runner_action_unittest.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/payload_consumer/postinstall_runner_action_unittest.cc b/payload_consumer/postinstall_runner_action_unittest.cc index 0041d314..cf5158b2 100644 --- a/payload_consumer/postinstall_runner_action_unittest.cc +++ b/payload_consumer/postinstall_runner_action_unittest.cc @@ -227,7 +227,7 @@ void PostinstallRunnerActionTest::RunPostinstallActionWithInstallPlan( EXPECT_TRUE(processor_delegate_.processing_stopped_called_ || processor_delegate_.processing_done_called_); if (processor_delegate_.processing_done_called_) { - // Sanity check that the code was set when the processor finishes. + // Validation check that the code was set when the processor finishes. EXPECT_TRUE(processor_delegate_.code_set_); } } From aff51c2ceb2137b6f649ecf91e16e08e60a2ee2d Mon Sep 17 00:00:00 2001 From: Saint Chou Date: Wed, 29 Jul 2020 14:14:21 +0000 Subject: [PATCH 338/624] Update language to comply with Android's inclusive language guidance See https://source.android.com/setup/contribute/respectful-code for reference #inclusivefixit Bug: 161896447 Change-Id: I62b6d5016433cd8b971326c771fb39ccb85827d1 Test: Build pass --- omaha_request_params.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/omaha_request_params.cc b/omaha_request_params.cc index d4b8d649..8a2e3dcb 100644 --- a/omaha_request_params.cc +++ b/omaha_request_params.cc @@ -66,7 +66,7 @@ bool OmahaRequestParams::Init(const string& in_app_version, image_props_ = LoadImageProperties(system_state_); mutable_image_props_ = LoadMutableImageProperties(system_state_); - // Sanity check the channel names. + // Validation check the channel names. if (!IsValidChannel(image_props_.current_channel)) image_props_.current_channel = "stable-channel"; if (!IsValidChannel(mutable_image_props_.target_channel)) From 6431c33594f85d2fd58b371192639ccf7d283565 Mon Sep 17 00:00:00 2001 From: Saint Chou Date: Wed, 29 Jul 2020 14:04:14 +0000 Subject: [PATCH 339/624] Update language to comply with Android's inclusive language guidance See https://source.android.com/setup/contribute/respectful-code for reference #inclusivefixit Bug: 161896447 Change-Id: I389684e574684566cd500244e0c22bc7a309756e Test: Build pass --- omaha_request_action.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/omaha_request_action.cc b/omaha_request_action.cc index e37ebab4..95e1250d 100644 --- a/omaha_request_action.cc +++ b/omaha_request_action.cc @@ -952,10 +952,10 @@ void OmahaRequestAction::TransferComplete(HttpFetcher* fetcher, int code = GetHTTPResponseCode(); LOG(ERROR) << "Omaha request network transfer failed with HTTPResponseCode=" << code; - // Makes sure we send sane error values. + // Makes sure we send proper error values. if (code < 0 || code >= 1000) { code = 999; - LOG(WARNING) << "Converting to sane HTTPResponseCode=" << code; + LOG(WARNING) << "Converting to proper HTTPResponseCode=" << code; } completer.set_code(static_cast( static_cast(ErrorCode::kOmahaRequestHTTPResponseBase) + code)); From 0a92a62625580bf408c8a75284b572057fdc4f2c Mon Sep 17 00:00:00 2001 From: Saint Chou Date: Wed, 29 Jul 2020 14:25:35 +0000 Subject: [PATCH 340/624] Update language to comply with Android's inclusive language guidance See https://source.android.com/setup/contribute/respectful-code for reference #inclusivefixit Bug: 161896447 Change-Id: Id2cc29f9a95587476d2c66ead348420c989d04bc Test: Build pass --- payload_consumer/delta_performer.cc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc index d8f0ef56..4690d856 100644 --- a/payload_consumer/delta_performer.cc +++ b/payload_consumer/delta_performer.cc @@ -740,7 +740,7 @@ bool DeltaPerformer::Write(const void* bytes, size_t count, ErrorCode* error) { CheckpointUpdateProgress(false); } - // In major version 2, we don't add dummy operation to the payload. + // In major version 2, we don't add unused operation to the payload. // If we already extracted the signature we should skip this step. if (manifest_.has_signatures_offset() && manifest_.has_signatures_size() && signatures_message_data_.empty()) { @@ -1570,7 +1570,7 @@ DeltaPerformer::CreatePayloadVerifier() { } ErrorCode DeltaPerformer::ValidateManifest() { - // Perform assorted checks to sanity check the manifest, make sure it + // Perform assorted checks to validation check the manifest, make sure it // matches data from other sources, and that it is a supported version. bool has_old_fields = std::any_of(manifest_.partitions().begin(), manifest_.partitions().end(), @@ -1663,7 +1663,7 @@ ErrorCode DeltaPerformer::ValidateOperationHash( // corresponding update should have been produced with the operation // hashes. So if it happens it means either we've turned operation hash // generation off in DeltaDiffGenerator or it's a regression of some sort. - // One caveat though: The last operation is a dummy signature operation + // One caveat though: The last operation is a unused signature operation // that doesn't have a hash at the time the manifest is created. So we // should not complaint about that operation. This operation can be // recognized by the fact that it's offset is mentioned in the manifest. @@ -1798,7 +1798,7 @@ bool DeltaPerformer::CanResumeUpdate(PrefsInterface* prefs, resumed_update_failures > kMaxResumedUpdateFailures) return false; - // Sanity check the rest. + // Validation check the rest. int64_t next_data_offset = -1; if (!(prefs->GetInt64(kPrefsUpdateStateNextDataOffset, &next_data_offset) && next_data_offset >= 0)) From b0b9c20eba3b27d98174edb24038b33769813644 Mon Sep 17 00:00:00 2001 From: Kelvin Zhang Date: Fri, 24 Jul 2020 16:02:09 -0400 Subject: [PATCH 341/624] Parallelize delta generation across partition On my machine, this change alone reduces incremental OTA generation time from 56 minutes to 29 minutes Test: Generate and serve an OTA Change-Id: Id4ffc6f02f28594eb60cb934777b82f1899bbbc2 --- payload_generator/delta_diff_generator.cc | 70 +++++++++++++++++++++-- payload_generator/payload_file.cc | 8 +-- payload_generator/payload_file.h | 2 +- 3 files changed, 70 insertions(+), 10 deletions(-) diff --git a/payload_generator/delta_diff_generator.cc b/payload_generator/delta_diff_generator.cc index 595a41ec..aa492524 100644 --- a/payload_generator/delta_diff_generator.cc +++ b/payload_generator/delta_diff_generator.cc @@ -29,11 +29,13 @@ #include #include +#include #include "update_engine/common/utils.h" #include "update_engine/payload_consumer/delta_performer.h" #include "update_engine/payload_consumer/payload_constants.h" #include "update_engine/payload_generator/ab_generator.h" +#include "update_engine/payload_generator/annotated_operation.h" #include "update_engine/payload_generator/blob_file_writer.h" #include "update_engine/payload_generator/delta_diff_utils.h" #include "update_engine/payload_generator/full_update_generator.h" @@ -49,6 +51,45 @@ namespace chromeos_update_engine { const size_t kRootFSPartitionSize = static_cast(2) * 1024 * 1024 * 1024; const size_t kBlockSize = 4096; // bytes +class PartitionProcessor : public base::DelegateSimpleThread::Delegate { + public: + explicit PartitionProcessor( + const PayloadGenerationConfig& config, + const PartitionConfig& old_part, + const PartitionConfig& new_part, + BlobFileWriter* file_writer, + std::vector* aops, + std::unique_ptr strategy) + : config_(config), + old_part_(old_part), + new_part_(new_part), + file_writer_(file_writer), + aops_(aops), + strategy_(std::move(strategy)) {} + PartitionProcessor(PartitionProcessor&&) noexcept = default; + void Run() override { + LOG(INFO) << "Started an async task to process partition " + << old_part_.name; + bool success = strategy_->GenerateOperations( + config_, old_part_, new_part_, file_writer_, aops_); + if (!success) { + // ABORT the entire process, so that developer can look + // at recent logs and diagnose what happened + LOG(FATAL) << "GenerateOperations(" << old_part_.name << ", " + << new_part_.name << ") failed"; + } + } + + private: + const PayloadGenerationConfig& config_; + const PartitionConfig& old_part_; + const PartitionConfig& new_part_; + BlobFileWriter* file_writer_; + std::vector* aops_; + std::unique_ptr strategy_; + DISALLOW_COPY_AND_ASSIGN(PartitionProcessor); +}; + bool GenerateUpdatePayloadFile(const PayloadGenerationConfig& config, const string& output_path, const string& private_key_path, @@ -80,6 +121,13 @@ bool GenerateUpdatePayloadFile(const PayloadGenerationConfig& config, config.target.partitions.size()); } PartitionConfig empty_part(""); + std::vector> all_aops; + all_aops.resize(config.target.partitions.size()); + std::vector partition_tasks{}; + auto thread_count = std::min(diff_utils::GetMaxThreads(), + config.target.partitions.size()); + base::DelegateSimpleThreadPool thread_pool{"partition-thread-pool", + thread_count}; for (size_t i = 0; i < config.target.partitions.size(); i++) { const PartitionConfig& old_part = config.is_delta ? config.source.partitions[i] : empty_part; @@ -99,12 +147,26 @@ bool GenerateUpdatePayloadFile(const PayloadGenerationConfig& config, strategy.reset(new FullUpdateGenerator()); } - vector aops; // Generate the operations using the strategy we selected above. - TEST_AND_RETURN_FALSE(strategy->GenerateOperations( - config, old_part, new_part, &blob_file, &aops)); + partition_tasks.push_back(PartitionProcessor(config, + old_part, + new_part, + &blob_file, + &all_aops[i], + std::move(strategy))); + } + thread_pool.Start(); + for (auto& processor : partition_tasks) { + thread_pool.AddWork(&processor); + } + thread_pool.JoinAll(); - TEST_AND_RETURN_FALSE(payload.AddPartition(old_part, new_part, aops)); + for (size_t i = 0; i < config.target.partitions.size(); i++) { + const PartitionConfig& old_part = + config.is_delta ? config.source.partitions[i] : empty_part; + const PartitionConfig& new_part = config.target.partitions[i]; + TEST_AND_RETURN_FALSE( + payload.AddPartition(old_part, new_part, std::move(all_aops[i]))); } } diff --git a/payload_generator/payload_file.cc b/payload_generator/payload_file.cc index 940e9bdc..c1594c75 100644 --- a/payload_generator/payload_file.cc +++ b/payload_generator/payload_file.cc @@ -86,10 +86,10 @@ bool PayloadFile::Init(const PayloadGenerationConfig& config) { bool PayloadFile::AddPartition(const PartitionConfig& old_conf, const PartitionConfig& new_conf, - const vector& aops) { + vector aops) { Partition part; part.name = new_conf.name; - part.aops = aops; + part.aops = std::move(aops); part.postinstall = new_conf.postinstall; part.verity = new_conf.verity; // Initialize the PartitionInfo objects if present. @@ -172,9 +172,7 @@ bool PayloadFile::WritePayload(const string& payload_file, TEST_AND_RETURN_FALSE(PayloadSigner::SignatureBlobLength( {private_key_path}, &signature_blob_length)); PayloadSigner::AddSignatureToManifest( - next_blob_offset, - signature_blob_length, - &manifest_); + next_blob_offset, signature_blob_length, &manifest_); } // Serialize protobuf diff --git a/payload_generator/payload_file.h b/payload_generator/payload_file.h index 9dc80a72..0c3e9d5e 100644 --- a/payload_generator/payload_file.h +++ b/payload_generator/payload_file.h @@ -43,7 +43,7 @@ class PayloadFile { // reference a blob stored in the file provided to WritePayload(). bool AddPartition(const PartitionConfig& old_conf, const PartitionConfig& new_conf, - const std::vector& aops); + std::vector aops); // Write the payload to the |payload_file| file. The operations reference // blobs in the |data_blobs_path| file and the blobs will be reordered in the From 5b002075485b0b564426730c56432369bdfe964f Mon Sep 17 00:00:00 2001 From: Andrew Date: Thu, 25 Jun 2020 07:41:40 -0700 Subject: [PATCH 342/624] update_engine: Fix non inclusive vocabulary Bug: 161896447 Test: build (cherry picked from commit cc6ab9f076694a816fa35f133e98e7737542ddd8) Change-Id: Ic321806ab6029c88723c220f243e2c2c7a9e94f0 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2267058 Tested-by: Andrew Lassalle Auto-Submit: Andrew Lassalle Reviewed-by: Amin Hassani Commit-Queue: Amin Hassani --- common/test_utils.h | 2 +- hardware_chromeos.cc | 2 +- omaha_request_params.h | 2 +- payload_generator/delta_diff_utils.cc | 6 +++--- payload_state.cc | 2 +- payload_state.h | 8 ++++---- payload_state_unittest.cc | 2 +- pylintrc | 2 +- update_manager/chromeos_policy.cc | 2 +- 9 files changed, 14 insertions(+), 14 deletions(-) diff --git a/common/test_utils.h b/common/test_utils.h index 44b7aa14..63ea7492 100644 --- a/common/test_utils.h +++ b/common/test_utils.h @@ -78,7 +78,7 @@ std::string Readlink(const std::string& path); void FillWithData(brillo::Blob* buffer); -// Compare the value of native array for download source parameter. +// Compare the value of builtin array for download source parameter. MATCHER_P(DownloadSourceMatcher, source_array, "") { return std::equal(source_array, source_array + kNumDownloadSources, arg); } diff --git a/hardware_chromeos.cc b/hardware_chromeos.cc index 3adef98f..2a838305 100644 --- a/hardware_chromeos.cc +++ b/hardware_chromeos.cc @@ -47,7 +47,7 @@ namespace { const char kOOBECompletedMarker[] = "/home/chronos/.oobe_completed"; // The stateful directory used by update_engine to store powerwash-safe files. -// The files stored here must be safelisted in the powerwash scripts. +// The files stored here must be added to the powerwash script allowlist. const char kPowerwashSafeDirectory[] = "/mnt/stateful_partition/unencrypted/preserve"; diff --git a/omaha_request_params.h b/omaha_request_params.h index 727c2ad4..76fc8060 100644 --- a/omaha_request_params.h +++ b/omaha_request_params.h @@ -328,7 +328,7 @@ class OmahaRequestParams { bool ToMoreStableChannel() const; // Returns True if we should store the fw/ec versions based on our hwid_. - // Compares hwid to a set of safelisted prefixes. + // Compares hwid to a set of prefixes in the allowlist. bool CollectECFWVersions() const; // Gets the machine type (e.g. "i686"). diff --git a/payload_generator/delta_diff_utils.cc b/payload_generator/delta_diff_utils.cc index 9a088158..220c7ae1 100644 --- a/payload_generator/delta_diff_utils.cc +++ b/payload_generator/delta_diff_utils.cc @@ -708,15 +708,15 @@ bool ReadExtentsToDiff(const string& old_part, version.OperationAllowed(InstallOperation::SOURCE_BSDIFF); if (bsdiff_allowed && blocks_to_read * kBlockSize > kMaxBsdiffDestinationSize) { - LOG(INFO) << "bsdiff blacklisted, data too big: " - << blocks_to_read * kBlockSize << " bytes"; + LOG(INFO) << "bsdiff ignored, data too big: " << blocks_to_read * kBlockSize + << " bytes"; bsdiff_allowed = false; } bool puffdiff_allowed = version.OperationAllowed(InstallOperation::PUFFDIFF); if (puffdiff_allowed && blocks_to_read * kBlockSize > kMaxPuffdiffDestinationSize) { - LOG(INFO) << "puffdiff blacklisted, data too big: " + LOG(INFO) << "puffdiff ignored, data too big: " << blocks_to_read * kBlockSize << " bytes"; puffdiff_allowed = false; } diff --git a/payload_state.cc b/payload_state.cc index bde7999b..36ab32b2 100644 --- a/payload_state.cc +++ b/payload_state.cc @@ -1154,7 +1154,7 @@ void PayloadState::LoadRollbackVersion() { void PayloadState::SetRollbackVersion(const string& rollback_version) { CHECK(powerwash_safe_prefs_); - LOG(INFO) << "Blacklisting version " << rollback_version; + LOG(INFO) << "Excluding version " << rollback_version; rollback_version_ = rollback_version; powerwash_safe_prefs_->SetString(kPrefsRollbackVersion, rollback_version); } diff --git a/payload_state.h b/payload_state.h index 90c3b139..427836b6 100644 --- a/payload_state.h +++ b/payload_state.h @@ -366,14 +366,14 @@ class PayloadState : public PayloadStateInterface { // check where policy was available. This info is preserved over powerwash. void LoadRollbackHappened(); - // Loads the blocklisted version from our prefs file. + // Loads the excluded version from our prefs file. void LoadRollbackVersion(); - // Blacklists this version from getting AU'd to until we receive a new update + // Excludes this version from getting AU'd to until we receive a new update // response. void SetRollbackVersion(const std::string& rollback_version); - // Clears any blocklisted version. + // Clears any excluded version. void ResetRollbackVersion(); inline uint32_t GetUrlIndex() { @@ -565,7 +565,7 @@ class PayloadState : public PayloadStateInterface { // forced updates to avoid update-rollback loops. bool rollback_happened_; - // This stores a blocklisted version set as part of rollback. When we rollback + // This stores an excluded version set as part of rollback. When we rollback // we store the version of the os from which we are rolling back from in order // to guarantee that we do not re-update to it on the next au attempt after // reboot. diff --git a/payload_state_unittest.cc b/payload_state_unittest.cc index d8d9afac..c33bda40 100644 --- a/payload_state_unittest.cc +++ b/payload_state_unittest.cc @@ -1016,7 +1016,7 @@ TEST(PayloadStateTest, RollbackVersion) { NiceMock* mock_powerwash_safe_prefs = fake_system_state.mock_powerwash_safe_prefs(); - // Mock out the os version and make sure it's blocklisted correctly. + // Mock out the os version and make sure it's excluded correctly. string rollback_version = "2345.0.0"; OmahaRequestParams params(&fake_system_state); params.Init(rollback_version, "", false); diff --git a/pylintrc b/pylintrc index 33adec24..a4338680 100644 --- a/pylintrc +++ b/pylintrc @@ -24,7 +24,7 @@ # Profiled execution. profile=no -# Add files or directories to the blacklist. They should be base names, not +# Add files or directories to the ignorelist. They should be base names, not # paths. ignore=CVS,.svn,.git,update_metadata_pb2.py diff --git a/update_manager/chromeos_policy.cc b/update_manager/chromeos_policy.cc index b96e29d8..8ea892e9 100644 --- a/update_manager/chromeos_policy.cc +++ b/update_manager/chromeos_policy.cc @@ -466,7 +466,7 @@ EvalStatus ChromeOSPolicy::UpdateCanStart( // ConnectionManager::IsUpdateAllowedOver(); be sure to deprecate the latter. // // TODO(garnold) The current logic generally treats the list of allowed -// connections coming from the device policy as a whitelist, meaning that it +// connections coming from the device policy as an allowlist, meaning that it // can only be used for enabling connections, but not disable them. Further, // certain connection types cannot be enabled even by policy. // In effect, the only thing that device policy can change is to enable From d60dc3929d21050730bb8a4ae16e3bb3f3a7e36e Mon Sep 17 00:00:00 2001 From: Tianjie Date: Wed, 29 Jul 2020 11:27:35 -0700 Subject: [PATCH 343/624] Update language to comply with Android's inclusive language guidance More details in: https://source.android.com/setup/contribute/respectful-code Bug: 161896447 Test: build, run unittests Change-Id: I8d666eee75490146eb57a183f0cfdf343b58b602 --- excluder_chromeos_unittest.cc | 14 +++++++------- payload_generator/payload_file.h | 4 ++-- payload_generator/payload_signer.cc | 2 +- payload_generator/payload_signer.h | 4 ++-- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/excluder_chromeos_unittest.cc b/excluder_chromeos_unittest.cc index a8c14b39..dba77e47 100644 --- a/excluder_chromeos_unittest.cc +++ b/excluder_chromeos_unittest.cc @@ -29,7 +29,7 @@ using std::unique_ptr; namespace chromeos_update_engine { -constexpr char kDummyHash[] = +constexpr char kFakeHash[] = "71ff43d76e2488e394e46872f5b066cc25e394c2c3e3790dd319517883b33db1"; class ExcluderChromeOSTest : public ::testing::Test { @@ -47,20 +47,20 @@ class ExcluderChromeOSTest : public ::testing::Test { }; TEST_F(ExcluderChromeOSTest, ExclusionCheck) { - EXPECT_FALSE(excluder_->IsExcluded(kDummyHash)); - EXPECT_TRUE(excluder_->Exclude(kDummyHash)); - EXPECT_TRUE(excluder_->IsExcluded(kDummyHash)); + EXPECT_FALSE(excluder_->IsExcluded(kFakeHash)); + EXPECT_TRUE(excluder_->Exclude(kFakeHash)); + EXPECT_TRUE(excluder_->IsExcluded(kFakeHash)); } TEST_F(ExcluderChromeOSTest, ResetFlow) { EXPECT_TRUE(excluder_->Exclude("abc")); - EXPECT_TRUE(excluder_->Exclude(kDummyHash)); + EXPECT_TRUE(excluder_->Exclude(kFakeHash)); EXPECT_TRUE(excluder_->IsExcluded("abc")); - EXPECT_TRUE(excluder_->IsExcluded(kDummyHash)); + EXPECT_TRUE(excluder_->IsExcluded(kFakeHash)); EXPECT_TRUE(excluder_->Reset()); EXPECT_FALSE(excluder_->IsExcluded("abc")); - EXPECT_FALSE(excluder_->IsExcluded(kDummyHash)); + EXPECT_FALSE(excluder_->IsExcluded(kFakeHash)); } } // namespace chromeos_update_engine diff --git a/payload_generator/payload_file.h b/payload_generator/payload_file.h index 0c3e9d5e..d1f8196e 100644 --- a/payload_generator/payload_file.h +++ b/payload_generator/payload_file.h @@ -60,9 +60,9 @@ class PayloadFile { // Computes a SHA256 hash of the given buf and sets the hash value in the // operation so that update_engine could verify. This hash should be set // for all operations that have a non-zero data blob. One exception is the - // dummy operation for signature blob because the contents of the signature + // fake operation for signature blob because the contents of the signature // blob will not be available at payload creation time. So, update_engine will - // gracefully ignore the dummy signature operation. + // gracefully ignore the fake signature operation. static bool AddOperationHash(InstallOperation* op, const brillo::Blob& buf); // Install operations in the manifest may reference data blobs, which diff --git a/payload_generator/payload_signer.cc b/payload_generator/payload_signer.cc index 7e5fd4e3..c3264c1d 100644 --- a/payload_generator/payload_signer.cc +++ b/payload_generator/payload_signer.cc @@ -82,7 +82,7 @@ bool ConvertSignaturesToProtobuf(const vector& signatures, // Given an unsigned payload under |payload_path| and the |payload_signature| // and |metadata_signature| generates an updated payload that includes the // signatures. It populates |out_metadata_size| with the size of the final -// manifest after adding the dummy signature operation, and +// manifest after adding the fake signature operation, and // |out_signatures_offset| with the expected offset for the new blob, and // |out_metadata_signature_size| which will be size of |metadata_signature| // if the payload major version supports metadata signature, 0 otherwise. diff --git a/payload_generator/payload_signer.h b/payload_generator/payload_signer.h index 06e4823f..9676b718 100644 --- a/payload_generator/payload_signer.h +++ b/payload_generator/payload_signer.h @@ -62,7 +62,7 @@ class PayloadSigner { // size in |metadata_signature_size| and signatures offset in // |signatures_offset|, calculates the payload signature blob into // |out_serialized_signature|. Note that the payload must already have an - // updated manifest that includes the dummy signature op and correct metadata + // updated manifest that includes the fake signature op and correct metadata // signature size in header. Returns true on success, false otherwise. static bool SignPayload(const std::string& unsigned_payload_path, const std::vector& private_key_paths, @@ -92,7 +92,7 @@ class PayloadSigner { brillo::Blob* out_payload_hash_data, brillo::Blob* out_metadata_hash); - // Given an unsigned payload in |payload_path| (with no dummy signature op) + // Given an unsigned payload in |payload_path| (with no fake signature op) // and the raw |payload_signatures| and |metadata_signatures| updates the // payload to include the signature thus turning it into a signed payload. The // new payload is stored in |signed_payload_path|. |payload_path| and From e283ce414e749e599d0ffad31897bc5e25450cad Mon Sep 17 00:00:00 2001 From: Tianjie Date: Wed, 29 Jul 2020 11:37:51 -0700 Subject: [PATCH 344/624] Update language to comply with Android's inclusive language guidance More details in: https://source.android.com/setup/contribute/respectful-code Bug: 161896447 Test: build, run unittests Change-Id: I6a7136b01ecce948a3997c60b7dcec848331e8ef --- common/mock_http_fetcher.h | 2 +- payload_consumer/payload_metadata.h | 2 +- payload_generator/squashfs_filesystem.cc | 2 +- payload_state.cc | 4 ++-- scripts/brillo_update_payload | 2 +- update_manager/chromeos_policy.cc | 3 +-- 6 files changed, 7 insertions(+), 8 deletions(-) diff --git a/common/mock_http_fetcher.h b/common/mock_http_fetcher.h index 0f043190..dec81b0c 100644 --- a/common/mock_http_fetcher.h +++ b/common/mock_http_fetcher.h @@ -76,7 +76,7 @@ class MockHttpFetcher : public HttpFetcher { void set_connect_timeout(int connect_timeout_seconds) override {} void set_max_retry_count(int max_retry_count) override {} - // Dummy: no bytes were downloaded. + // No bytes were downloaded in the mock class. size_t GetBytesDownloaded() override { return sent_size_; } // Begins the transfer if it hasn't already begun. diff --git a/payload_consumer/payload_metadata.h b/payload_consumer/payload_metadata.h index cc422539..8b36f533 100644 --- a/payload_consumer/payload_metadata.h +++ b/payload_consumer/payload_metadata.h @@ -63,7 +63,7 @@ class PayloadMetadata { // |metadata_signature| (if present) or the metadata signature in payload // itself (if present). Returns ErrorCode::kSuccess on match or a suitable // error code otherwise. This method must be called before any part of the - // metadata is parsed so that a man-in-the-middle attack on the SSL connection + // metadata is parsed so that an on-path attack on the SSL connection // to the payload server doesn't exploit any vulnerability in the code that // parses the protocol buffer. ErrorCode ValidateMetadataSignature( diff --git a/payload_generator/squashfs_filesystem.cc b/payload_generator/squashfs_filesystem.cc index eb4fda37..6152d7d6 100644 --- a/payload_generator/squashfs_filesystem.cc +++ b/payload_generator/squashfs_filesystem.cc @@ -275,7 +275,7 @@ bool SquashfsFilesystem::Init(const string& map, auto last = std::unique(zlib_blks.begin(), zlib_blks.end()); zlib_blks.erase(last, zlib_blks.end()); - // Sanity check. Make sure zlib blocks are not overlapping. + // Make sure zlib blocks are not overlapping. auto result = std::adjacent_find( zlib_blks.begin(), zlib_blks.end(), diff --git a/payload_state.cc b/payload_state.cc index 36ab32b2..4945fe77 100644 --- a/payload_state.cc +++ b/payload_state.cc @@ -1058,7 +1058,7 @@ void PayloadState::LoadUpdateTimestampStart() { stored_time = Time::FromInternalValue(stored_value); } - // Sanity check: If the time read from disk is in the future + // Validation check: If the time read from disk is in the future // (modulo some slack to account for possible NTP drift // adjustments), something is fishy and we should report and // reset. @@ -1105,7 +1105,7 @@ void PayloadState::LoadUpdateDurationUptime() { stored_delta = TimeDelta::FromInternalValue(stored_value); } - // Sanity-check: Uptime can never be greater than the wall-clock + // Validation check: Uptime can never be greater than the wall-clock // difference (modulo some slack). If it is, report and reset // to the wall-clock difference. TimeDelta diff = GetUpdateDuration() - stored_delta; diff --git a/scripts/brillo_update_payload b/scripts/brillo_update_payload index 4b581fb1..9bae74ef 100755 --- a/scripts/brillo_update_payload +++ b/scripts/brillo_update_payload @@ -896,7 +896,7 @@ cmd_check() { check_update_payload ${PAYCHECK_ARGS[@]} --check } -# Sanity check that the real generator exists: +# Check that the real generator exists: [[ -x "${GENERATOR}" ]] || GENERATOR="$(which delta_generator || true)" [[ -x "${GENERATOR}" ]] || die "can't find delta_generator" diff --git a/update_manager/chromeos_policy.cc b/update_manager/chromeos_policy.cc index 8ea892e9..be5f9143 100644 --- a/update_manager/chromeos_policy.cc +++ b/update_manager/chromeos_policy.cc @@ -598,7 +598,6 @@ EvalStatus ChromeOSPolicy::UpdateBackoffAndDownloadUrl( string* error, UpdateBackoffAndDownloadUrlResult* result, const UpdateState& update_state) const { - // Sanity checks. DCHECK_GE(update_state.download_errors_max, 0); // Set default result values. @@ -670,7 +669,7 @@ EvalStatus ChromeOSPolicy::UpdateBackoffAndDownloadUrl( Time prev_err_time; bool is_first = true; for (const auto& err_tuple : update_state.download_errors) { - // Do some sanity checks. + // Do some validation checks. int used_url_idx = get<0>(err_tuple); if (is_first && url_idx >= 0 && used_url_idx != url_idx) { LOG(WARNING) << "First URL in error log (" << used_url_idx From 9dd9305611074c2b16d0f6efb532efe739f2521c Mon Sep 17 00:00:00 2001 From: Kelvin Zhang Date: Tue, 21 Jul 2020 17:31:19 -0400 Subject: [PATCH 345/624] Add unit test for fallback route of manifest cache Test: Run atest Change-Id: Icc6a2c809c571c3ad8e16a863c37afd8d4042ed6 --- Android.bp | 1 + common/action_pipe.h | 2 + common/action_processor.h | 2 +- common/mock_action_processor.h | 2 + common/mock_http_fetcher.cc | 33 ++++--- common/mock_http_fetcher.h | 18 ++-- .../download_action_android_unittest.cc | 90 +++++++++++++++++++ 7 files changed, 130 insertions(+), 18 deletions(-) create mode 100644 payload_consumer/download_action_android_unittest.cc diff --git a/Android.bp b/Android.bp index ecf3585b..4e3e2484 100644 --- a/Android.bp +++ b/Android.bp @@ -671,6 +671,7 @@ cc_test { "payload_consumer/certificate_parser_android_unittest.cc", "payload_consumer/delta_performer_integration_test.cc", "payload_consumer/delta_performer_unittest.cc", + "payload_consumer/download_action_android_unittest.cc", "payload_consumer/extent_reader_unittest.cc", "payload_consumer/extent_writer_unittest.cc", "payload_consumer/fake_file_descriptor.cc", diff --git a/common/action_pipe.h b/common/action_pipe.h index 0c98ee13..4c568126 100644 --- a/common/action_pipe.h +++ b/common/action_pipe.h @@ -79,6 +79,8 @@ class ActionPipe { private: ObjectType contents_; + // Give unit test access + friend class DownloadActionTest; // The ctor is private. This is because this class should construct itself // via the static Bond() method. diff --git a/common/action_processor.h b/common/action_processor.h index 735a1063..ad98cc9c 100644 --- a/common/action_processor.h +++ b/common/action_processor.h @@ -89,7 +89,7 @@ class ActionProcessor { // But this call deletes the action if there no other object has a reference // to it, so in that case, the caller should not try to access any of its // member variables after this call. - void ActionComplete(AbstractAction* actionptr, ErrorCode code); + virtual void ActionComplete(AbstractAction* actionptr, ErrorCode code); private: FRIEND_TEST(ActionProcessorTest, ChainActionsTest); diff --git a/common/mock_action_processor.h b/common/mock_action_processor.h index 4c62109b..97857764 100644 --- a/common/mock_action_processor.h +++ b/common/mock_action_processor.h @@ -32,6 +32,8 @@ class MockActionProcessor : public ActionProcessor { MOCK_METHOD0(StartProcessing, void()); MOCK_METHOD1(EnqueueAction, void(AbstractAction* action)); + MOCK_METHOD2(ActionComplete, void(AbstractAction*, ErrorCode)); + // This is a legacy workaround described in: // https://github.com/google/googletest/blob/master/googlemock/docs/CookBook.md#legacy-workarounds-for-move-only-types-legacymoveonly void EnqueueAction(std::unique_ptr action) override { diff --git a/common/mock_http_fetcher.cc b/common/mock_http_fetcher.cc index 10e3b9ef..1b3cd7d7 100644 --- a/common/mock_http_fetcher.cc +++ b/common/mock_http_fetcher.cc @@ -22,6 +22,7 @@ #include #include #include +#include #include // This is a mock implementation of HttpFetcher which is useful for testing. @@ -43,12 +44,12 @@ void MockHttpFetcher::BeginTransfer(const std::string& url) { SignalTransferComplete(); return; } - if (sent_size_ < data_.size()) + if (sent_offset_ < data_.size()) SendData(true); } void MockHttpFetcher::SendData(bool skip_delivery) { - if (fail_transfer_ || sent_size_ == data_.size()) { + if (fail_transfer_ || sent_offset_ == data_.size()) { SignalTransferComplete(); return; } @@ -60,19 +61,22 @@ void MockHttpFetcher::SendData(bool skip_delivery) { // Setup timeout callback even if the transfer is about to be completed in // order to get a call to |TransferComplete|. - if (timeout_id_ == MessageLoop::kTaskIdNull) { + if (timeout_id_ == MessageLoop::kTaskIdNull && delay_) { + CHECK(MessageLoop::current()); timeout_id_ = MessageLoop::current()->PostDelayedTask( FROM_HERE, base::Bind(&MockHttpFetcher::TimeoutCallback, base::Unretained(this)), base::TimeDelta::FromMilliseconds(10)); } - if (!skip_delivery) { + if (!skip_delivery || !delay_) { const size_t chunk_size = - min(kMockHttpFetcherChunkSize, data_.size() - sent_size_); - sent_size_ += chunk_size; + min(kMockHttpFetcherChunkSize, data_.size() - sent_offset_); + sent_offset_ += chunk_size; + bytes_sent_ += chunk_size; CHECK(delegate_); - delegate_->ReceivedBytes(this, &data_[sent_size_ - chunk_size], chunk_size); + delegate_->ReceivedBytes( + this, &data_[sent_offset_ - chunk_size], chunk_size); } // We may get terminated and deleted right after |ReceivedBytes| call, so we // should not access any class member variable after this call. @@ -81,7 +85,7 @@ void MockHttpFetcher::SendData(bool skip_delivery) { void MockHttpFetcher::TimeoutCallback() { CHECK(!paused_); timeout_id_ = MessageLoop::kTaskIdNull; - CHECK_LE(sent_size_, data_.size()); + CHECK_LE(sent_offset_, data_.size()); // Same here, we should not access any member variable after this call. SendData(false); } @@ -90,10 +94,15 @@ void MockHttpFetcher::TimeoutCallback() { // The transfer cannot be resumed. void MockHttpFetcher::TerminateTransfer() { LOG(INFO) << "Terminating transfer."; - // Kill any timeout, it is ok to call with kTaskIdNull. - MessageLoop::current()->CancelTask(timeout_id_); - timeout_id_ = MessageLoop::kTaskIdNull; - delegate_->TransferTerminated(this); + // During testing, MessageLoop may or may not be available. + // So don't call CancelTask() unless necessary. + if (timeout_id_ != MessageLoop::kTaskIdNull) { + MessageLoop::current()->CancelTask(timeout_id_); + timeout_id_ = MessageLoop::kTaskIdNull; + } + if (delegate_) { + delegate_->TransferTerminated(this); + } } void MockHttpFetcher::SetHeader(const std::string& header_name, diff --git a/common/mock_http_fetcher.h b/common/mock_http_fetcher.h index dec81b0c..b082bbd4 100644 --- a/common/mock_http_fetcher.h +++ b/common/mock_http_fetcher.h @@ -46,7 +46,7 @@ class MockHttpFetcher : public HttpFetcher { size_t size, ProxyResolver* proxy_resolver) : HttpFetcher(proxy_resolver), - sent_size_(0), + sent_offset_(0), timeout_id_(brillo::MessageLoop::kTaskIdNull), paused_(false), fail_transfer_(false), @@ -64,7 +64,7 @@ class MockHttpFetcher : public HttpFetcher { // Ignores this. void SetOffset(off_t offset) override { - sent_size_ = offset; + sent_offset_ = offset; if (delegate_) delegate_->SeekToOffset(offset); } @@ -77,7 +77,7 @@ class MockHttpFetcher : public HttpFetcher { void set_max_retry_count(int max_retry_count) override {} // No bytes were downloaded in the mock class. - size_t GetBytesDownloaded() override { return sent_size_; } + size_t GetBytesDownloaded() override { return bytes_sent_; } // Begins the transfer if it hasn't already begun. void BeginTransfer(const std::string& url) override; @@ -113,6 +113,8 @@ class MockHttpFetcher : public HttpFetcher { const brillo::Blob& post_data() const { return post_data_; } + void set_delay(bool delay) { delay_ = delay; } + private: // Sends data to the delegate and sets up a timeout callback if needed. There // must be a delegate. If |skip_delivery| is true, no bytes will be delivered, @@ -129,8 +131,11 @@ class MockHttpFetcher : public HttpFetcher { // A full copy of the data we'll return to the delegate brillo::Blob data_; - // The number of bytes we've sent so far - size_t sent_size_; + // The current offset, marks the first byte that will be sent next + size_t sent_offset_; + + // Total number of bytes transferred + size_t bytes_sent_; // The extra headers set. std::map extra_headers_; @@ -148,6 +153,9 @@ class MockHttpFetcher : public HttpFetcher { // Set to true if BeginTransfer should EXPECT fail. bool never_use_; + // Whether it should wait for 10ms before sending data to delegates + bool delay_{true}; + DISALLOW_COPY_AND_ASSIGN(MockHttpFetcher); }; diff --git a/payload_consumer/download_action_android_unittest.cc b/payload_consumer/download_action_android_unittest.cc new file mode 100644 index 00000000..f78845f5 --- /dev/null +++ b/payload_consumer/download_action_android_unittest.cc @@ -0,0 +1,90 @@ +// +// Copyright (C) 2020 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include "common/mock_action_processor.h" +#include +#include +#include + +#include "payload_consumer/install_plan.h" +#include "update_engine/common/action_pipe.h" +#include "update_engine/common/boot_control_stub.h" +#include "update_engine/common/constants.h" +#include "update_engine/common/mock_http_fetcher.h" +#include "update_engine/common/mock_prefs.h" +#include "update_engine/common/test_utils.h" +#include "update_engine/payload_consumer/download_action.h" + +#include +#include +#include + +namespace chromeos_update_engine { +using testing::_; +using testing::DoAll; +using testing::Return; +using testing::SetArgPointee; + +class DownloadActionTest : public ::testing::Test { + public: + static constexpr int64_t METADATA_SIZE = 1024; + static constexpr int64_t SIGNATURE_SIZE = 256; + std::shared_ptr> action_pipe{ + new ActionPipe()}; +}; + +TEST_F(DownloadActionTest, CacheManifestInvalid) { + std::string data(METADATA_SIZE + SIGNATURE_SIZE, '-'); + MockPrefs prefs; + EXPECT_CALL(prefs, GetInt64(kPrefsUpdateStatePayloadIndex, _)) + .WillRepeatedly(DoAll(SetArgPointee<1>(0L), Return(true))); + EXPECT_CALL(prefs, GetInt64(kPrefsManifestMetadataSize, _)) + .WillRepeatedly(DoAll(SetArgPointee<1>(METADATA_SIZE), Return(true))); + EXPECT_CALL(prefs, GetInt64(kPrefsManifestSignatureSize, _)) + .WillRepeatedly(DoAll(SetArgPointee<1>(SIGNATURE_SIZE), Return(true))); + EXPECT_CALL(prefs, GetInt64(kPrefsUpdateStateNextDataOffset, _)) + .WillRepeatedly(DoAll(SetArgPointee<1>(0L), Return(true))); + EXPECT_CALL(prefs, GetString(kPrefsManifestBytes, _)) + .WillRepeatedly(DoAll(SetArgPointee<1>(data), Return(true))); + + BootControlStub boot_control; + MockHttpFetcher* http_fetcher = + new MockHttpFetcher(data.data(), data.size(), nullptr); + http_fetcher->set_delay(false); + InstallPlan install_plan; + auto& payload = install_plan.payloads.emplace_back(); + install_plan.download_url = "http://fake_url.invalid"; + payload.size = data.size(); + payload.payload_urls.emplace_back("http://fake_url.invalid"); + install_plan.is_resume = true; + action_pipe->set_contents(install_plan); + + // takes ownership of passed in HttpFetcher + auto download_action = + std::make_unique(&prefs, + &boot_control, + nullptr, + nullptr, + http_fetcher, + false /* interactive */); + download_action->set_in_pipe(action_pipe); + MockActionProcessor mock_processor; + download_action->SetProcessor(&mock_processor); + download_action->PerformAction(); + ASSERT_EQ(download_action->http_fetcher()->GetBytesDownloaded(), data.size()); +} + +} // namespace chromeos_update_engine From f693d8d17e042a5183e2966f12c58c33770f794f Mon Sep 17 00:00:00 2001 From: Kelvin Zhang Date: Tue, 4 Aug 2020 10:17:50 -0400 Subject: [PATCH 346/624] Fix broken test due to uninitialized members Bug: 162796604 Test: atest update_engine_unittests Change-Id: Ic421e4d901229a69a649d3bf3c496abc190f0396 --- common/mock_http_fetcher.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/common/mock_http_fetcher.h b/common/mock_http_fetcher.h index b082bbd4..ea5b83da 100644 --- a/common/mock_http_fetcher.h +++ b/common/mock_http_fetcher.h @@ -132,10 +132,10 @@ class MockHttpFetcher : public HttpFetcher { brillo::Blob data_; // The current offset, marks the first byte that will be sent next - size_t sent_offset_; + size_t sent_offset_{0}; // Total number of bytes transferred - size_t bytes_sent_; + size_t bytes_sent_{0}; // The extra headers set. std::map extra_headers_; @@ -145,13 +145,13 @@ class MockHttpFetcher : public HttpFetcher { brillo::MessageLoop::TaskId timeout_id_; // True iff the fetcher is paused. - bool paused_; + bool paused_{false}; // Set to true if the transfer should fail. - bool fail_transfer_; + bool fail_transfer_{false}; // Set to true if BeginTransfer should EXPECT fail. - bool never_use_; + bool never_use_{false}; // Whether it should wait for 10ms before sending data to delegates bool delay_{true}; From 88d1069f507c25c3f569c782d2724879502b9b44 Mon Sep 17 00:00:00 2001 From: Kelvin Zhang Date: Mon, 6 Jul 2020 10:54:03 -0400 Subject: [PATCH 347/624] Improve error message when hitting checksum mismatch Bug: 141931619 Test: Run existing unittests Change-Id: Icc0301d4c83d45f233e226f48059ec235c87376f --- payload_consumer/delta_performer.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc index 4690d856..19d12970 100644 --- a/payload_consumer/delta_performer.cc +++ b/payload_consumer/delta_performer.cc @@ -1152,7 +1152,8 @@ bool DeltaPerformer::PerformSourceCopyOperation( } if (read_ok && expected_source_hash == source_hash) return true; - + LOG(WARNING) << "Source hash from RAW device mismatched, attempting to " + "correct using ECC"; if (!OpenCurrentECCPartition()) { // The following function call will return false since the source hash // mismatches, but we still want to call it so it prints the appropriate @@ -1165,7 +1166,6 @@ bool DeltaPerformer::PerformSourceCopyOperation( << ", expected " << base::HexEncode(expected_source_hash.data(), expected_source_hash.size()); - if (should_optimize) { TEST_AND_RETURN_FALSE(fd_utils::ReadAndHashExtents( source_ecc_fd_, operation.src_extents(), block_size_, &source_hash)); From 2b291f05765486899cf0b4e8eda83bc89f083271 Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Tue, 21 Jul 2020 18:46:26 -0700 Subject: [PATCH 348/624] Define stable AIDL interface for update_engine Create definition of stable AIDL interface for update_engine so that it can be used by APEXes. Only a few APIs are exposed to APEXes and frozen. Bug: 160996544 Test: compiles Change-Id: I1293a4eccb4c1e9830be4e1a678a86b9c68635e1 --- stable/Android.bp | 39 ++++++++++ .../android/os/IUpdateEngineStable.aidl | 23 ++++++ .../os/IUpdateEngineStableCallback.aidl | 22 ++++++ stable/android/os/IUpdateEngineStable.aidl | 75 +++++++++++++++++++ .../os/IUpdateEngineStableCallback.aidl | 39 ++++++++++ 5 files changed, 198 insertions(+) create mode 100644 stable/Android.bp create mode 100644 stable/aidl_api/libupdate_engine_stable/current/android/os/IUpdateEngineStable.aidl create mode 100644 stable/aidl_api/libupdate_engine_stable/current/android/os/IUpdateEngineStableCallback.aidl create mode 100644 stable/android/os/IUpdateEngineStable.aidl create mode 100644 stable/android/os/IUpdateEngineStableCallback.aidl diff --git a/stable/Android.bp b/stable/Android.bp new file mode 100644 index 00000000..01dd88b7 --- /dev/null +++ b/stable/Android.bp @@ -0,0 +1,39 @@ +// +// Copyright (C) 2020 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Stable AIDL interface between update_engine and other APEXes +// ======================================================== +aidl_interface { + name: "libupdate_engine_stable", + srcs: [ + "android/os/IUpdateEngineStable.aidl", + "android/os/IUpdateEngineStableCallback.aidl", + ], + backend: { + cpp: { + enabled: true, + }, + java: { + enabled: false, + }, + ndk: { + enabled: true, + apex_available: [ + "com.android.gki.*", + ], + }, + }, +} diff --git a/stable/aidl_api/libupdate_engine_stable/current/android/os/IUpdateEngineStable.aidl b/stable/aidl_api/libupdate_engine_stable/current/android/os/IUpdateEngineStable.aidl new file mode 100644 index 00000000..82c3ca5f --- /dev/null +++ b/stable/aidl_api/libupdate_engine_stable/current/android/os/IUpdateEngineStable.aidl @@ -0,0 +1,23 @@ +/////////////////////////////////////////////////////////////////////////////// +// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE. // +/////////////////////////////////////////////////////////////////////////////// + +// This file is a snapshot of an AIDL interface (or parcelable). Do not try to +// edit this file. It looks like you are doing that because you have modified +// an AIDL interface in a backward-incompatible way, e.g., deleting a function +// from an interface or a field from a parcelable and it broke the build. That +// breakage is intended. +// +// You must not make a backward incompatible changes to the AIDL files built +// with the aidl_interface module type with versions property set. The module +// type is used to build AIDL files in a way that they can be used across +// independently updatable components of the system. If a device is shipped +// with such a backward incompatible change, it has a high risk of breaking +// later when a module using the interface is updated, e.g., Mainline modules. + +package android.os; +interface IUpdateEngineStable { + void applyPayloadFd(in ParcelFileDescriptor pfd, in long payload_offset, in long payload_size, in String[] headerKeyValuePairs); + boolean bind(android.os.IUpdateEngineStableCallback callback); + boolean unbind(android.os.IUpdateEngineStableCallback callback); +} diff --git a/stable/aidl_api/libupdate_engine_stable/current/android/os/IUpdateEngineStableCallback.aidl b/stable/aidl_api/libupdate_engine_stable/current/android/os/IUpdateEngineStableCallback.aidl new file mode 100644 index 00000000..4c72b495 --- /dev/null +++ b/stable/aidl_api/libupdate_engine_stable/current/android/os/IUpdateEngineStableCallback.aidl @@ -0,0 +1,22 @@ +/////////////////////////////////////////////////////////////////////////////// +// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE. // +/////////////////////////////////////////////////////////////////////////////// + +// This file is a snapshot of an AIDL interface (or parcelable). Do not try to +// edit this file. It looks like you are doing that because you have modified +// an AIDL interface in a backward-incompatible way, e.g., deleting a function +// from an interface or a field from a parcelable and it broke the build. That +// breakage is intended. +// +// You must not make a backward incompatible changes to the AIDL files built +// with the aidl_interface module type with versions property set. The module +// type is used to build AIDL files in a way that they can be used across +// independently updatable components of the system. If a device is shipped +// with such a backward incompatible change, it has a high risk of breaking +// later when a module using the interface is updated, e.g., Mainline modules. + +package android.os; +interface IUpdateEngineStableCallback { + oneway void onStatusUpdate(int status_code, float percentage); + oneway void onPayloadApplicationComplete(int error_code); +} diff --git a/stable/android/os/IUpdateEngineStable.aidl b/stable/android/os/IUpdateEngineStable.aidl new file mode 100644 index 00000000..b3b6674a --- /dev/null +++ b/stable/android/os/IUpdateEngineStable.aidl @@ -0,0 +1,75 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package android.os; + +import android.os.IUpdateEngineStableCallback; +import android.os.ParcelFileDescriptor; + +/** + * The stable interface exposed by the update engine daemon. + */ +interface IUpdateEngineStable { + /** + * Apply the given payload as provided in the given file descriptor. + * + * See {@link #bind(IUpdateEngineCallback)} for status updates. + * + * @param pfd The file descriptor opened at the payload file. Note that the daemon must have + * enough permission to operate on the file descriptor. + * @param payload_offset offset into pfd where the payload binary starts. + * @param payload_size length after payload_offset to read from pfd. If 0, it will be auto + * detected. + * @param headerKeyValuePairs additional header key value pairs, in the format of "key=value". + * @see android.os.UpdateEngine#applyPayload(android.content.res.AssetFileDescriptor, String[]) + */ + void applyPayloadFd(in ParcelFileDescriptor pfd, + in long payload_offset, + in long payload_size, + in String[] headerKeyValuePairs); + + /** + * Bind a callback for status updates on payload application. + * + * At any given time, only one callback can be bound. If a callback is already bound, + * subsequent binding will fail and return false until the bound callback is unbound. That is, + * binding is first-come, first-serve. + * + * A bound callback may be unbound explicitly by calling + * {@link #unbind(IUpdateEngineStableCallback)}, or + * implicitly when the process implementing the callback dies. + * + * @param callback See {@link IUpdateEngineStableCallback} + * @return true if binding is successful, false otherwise. + * @see android.os.UpdateEngine#bind(android.os.UpdateEngineCallback) + */ + boolean bind(IUpdateEngineStableCallback callback); + + /** + * Unbind a possibly bound callback. + * + * If the provided callback does not match the previously bound callback, unbinding fails. + * + * Note that a callback may also be unbound when the process implementing the callback dies. + * Hence, a client usually does not need to explicitly unbind a callback unless it wants to change + * the bound callback. + * + * @param callback The callback to be unbound. See {@link IUpdateEngineStableCallback}. + * @return true if unbinding is successful, false otherwise. + * @see android.os.UpdateEngine#unbind(android.os.UpdateEngineCallback) + */ + boolean unbind(IUpdateEngineStableCallback callback); +} diff --git a/stable/android/os/IUpdateEngineStableCallback.aidl b/stable/android/os/IUpdateEngineStableCallback.aidl new file mode 100644 index 00000000..d8fc3335 --- /dev/null +++ b/stable/android/os/IUpdateEngineStableCallback.aidl @@ -0,0 +1,39 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package android.os; + +/** + * The stable Callback interface for IUpdateEngineStable. + */ +oneway interface IUpdateEngineStableCallback { + /** + * Invoked when a payload is being applied and there is a status update. + * + * @param status_code see {@link android.os.UpdateEngine.UpdateStatusConstants}. + * @param percentage percentage of progress of the current stage. + * @see android.os.UpdateEngineCallback#onStatusUpdate(int, float) + */ + void onStatusUpdate(int status_code, float percentage); + + /** + * Invoked when a payload has finished being applied. + * + * @param error_code see {@link android.os.UpdateEngine.ErrorCodeConstants} + * @see android.os.UpdateEngineCallback#onPayloadApplicationComplete(int) + */ + void onPayloadApplicationComplete(int error_code); +} From 2562cf2960c97acdaac2dee7e306cd3c88cd9c68 Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Tue, 21 Jul 2020 19:28:44 -0700 Subject: [PATCH 349/624] Add binder_service_stable_android Add a new binder service that uses the exact same service delegate as its implementation, but exposes itself with the IUpdateEngineStable* APIs. Note: Even though the new interface requires only one binding at a time, race conditions for different clients are NOT handled in this CL. Multiple binding is still possible via the unstable IUpdateEngine API and via the Java android.os.UpdateEngine interface. Moreover, bindings with bind(IUpdateEngineStableCallback) and bind(IUpdateEngineCallback) are handled separately. Test: unit test Bug: 160996544 Change-Id: I26e8b4a58c0243d46ffcd7354d04f0c69f8fa66f --- Android.bp | 2 + binder_service_android.cc | 19 +---- binder_service_android_common.h | 45 +++++++++++ binder_service_stable_android.cc | 132 +++++++++++++++++++++++++++++++ binder_service_stable_android.h | 85 ++++++++++++++++++++ daemon_android.cc | 14 +++- daemon_android.h | 2 + 7 files changed, 280 insertions(+), 19 deletions(-) create mode 100644 binder_service_android_common.h create mode 100644 binder_service_stable_android.cc create mode 100644 binder_service_stable_android.h diff --git a/Android.bp b/Android.bp index 4e3e2484..b8cff0a4 100644 --- a/Android.bp +++ b/Android.bp @@ -272,6 +272,7 @@ cc_defaults { "libbrillo-binder", "libcurl", "libcutils", + "libupdate_engine_stable-cpp", "liblog", "libssl", "libstatslog", @@ -298,6 +299,7 @@ cc_library_static { srcs: [ ":libupdate_engine_aidl", "binder_service_android.cc", + "binder_service_stable_android.cc", "certificate_checker.cc", "daemon_android.cc", "daemon_state_android.cc", diff --git a/binder_service_android.cc b/binder_service_android.cc index 6b8a5529..0c8bc2f4 100644 --- a/binder_service_android.cc +++ b/binder_service_android.cc @@ -24,6 +24,8 @@ #include #include +#include "update_engine/binder_service_android_common.h" + using android::binder::Status; using android::os::IUpdateEngineCallback; using android::os::ParcelFileDescriptor; @@ -31,23 +33,6 @@ using std::string; using std::vector; using update_engine::UpdateEngineStatus; -namespace { -Status ErrorPtrToStatus(const brillo::ErrorPtr& error) { - return Status::fromServiceSpecificError( - 1, android::String8{error->GetMessage().c_str()}); -} - -vector ToVecString(const vector& inp) { - vector out; - out.reserve(inp.size()); - for (const auto& e : inp) { - out.emplace_back(android::String8{e}.string()); - } - return out; -} - -} // namespace - namespace chromeos_update_engine { BinderUpdateEngineAndroidService::BinderUpdateEngineAndroidService( diff --git a/binder_service_android_common.h b/binder_service_android_common.h new file mode 100644 index 00000000..fc621d97 --- /dev/null +++ b/binder_service_android_common.h @@ -0,0 +1,45 @@ +// +// Copyright (C) 2020 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef UPDATE_ENGINE_BINDER_SERVICE_ANDROID_COMMON_H_ +#define UPDATE_ENGINE_BINDER_SERVICE_ANDROID_COMMON_H_ + +#include +#include + +#include + +namespace chromeos_update_engine { + +static inline android::binder::Status ErrorPtrToStatus( + const brillo::ErrorPtr& error) { + return android::binder::Status::fromServiceSpecificError( + 1, android::String8{error->GetMessage().c_str()}); +} + +static inline std::vector ToVecString( + const std::vector& inp) { + std::vector out; + out.reserve(inp.size()); + for (const auto& e : inp) { + out.emplace_back(android::String8{e}.string()); + } + return out; +} + +} // namespace chromeos_update_engine + +#endif // UPDATE_ENGINE_BINDER_SERVICE_ANDROID_COMMON_H_ diff --git a/binder_service_stable_android.cc b/binder_service_stable_android.cc new file mode 100644 index 00000000..a12b349b --- /dev/null +++ b/binder_service_stable_android.cc @@ -0,0 +1,132 @@ +// +// Copyright (C) 2020 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include "update_engine/binder_service_stable_android.h" + +#include + +#include +#include +#include +#include +#include + +#include "update_engine/binder_service_android_common.h" + +using android::binder::Status; +using android::os::IUpdateEngineStableCallback; +using android::os::ParcelFileDescriptor; +using std::string; +using std::vector; +using update_engine::UpdateEngineStatus; + +namespace chromeos_update_engine { + +BinderUpdateEngineAndroidStableService::BinderUpdateEngineAndroidStableService( + ServiceDelegateAndroidInterface* service_delegate) + : service_delegate_(service_delegate) {} + +void BinderUpdateEngineAndroidStableService::SendStatusUpdate( + const UpdateEngineStatus& update_engine_status) { + last_status_ = static_cast(update_engine_status.status); + last_progress_ = update_engine_status.progress; + if (callback_) { + callback_->onStatusUpdate(last_status_, last_progress_); + } +} + +void BinderUpdateEngineAndroidStableService::SendPayloadApplicationComplete( + ErrorCode error_code) { + if (callback_) { + callback_->onPayloadApplicationComplete(static_cast(error_code)); + } +} + +Status BinderUpdateEngineAndroidStableService::bind( + const android::sp& callback, + bool* return_value) { + // Reject binding if another callback is already bound. + if (callback_ != nullptr) { + LOG(ERROR) << "Another callback is already bound. Can't bind new callback."; + *return_value = false; + return Status::ok(); + } + + // See BinderUpdateEngineAndroidService::bind. + if (last_status_ != -1) { + auto status = callback->onStatusUpdate(last_status_, last_progress_); + if (!status.isOk()) { + LOG(ERROR) << "Failed to call onStatusUpdate() from callback: " + << status.toString8(); + *return_value = false; + return Status::ok(); + } + } + + callback_ = callback; + + const android::sp& callback_binder = + IUpdateEngineStableCallback::asBinder(callback); + auto binder_wrapper = android::BinderWrapper::Get(); + binder_wrapper->RegisterForDeathNotifications( + callback_binder, + base::Bind(base::IgnoreResult( + &BinderUpdateEngineAndroidStableService::UnbindCallback), + base::Unretained(this), + base::Unretained(callback_binder.get()))); + + *return_value = true; + return Status::ok(); +} + +Status BinderUpdateEngineAndroidStableService::unbind( + const android::sp& callback, + bool* return_value) { + const android::sp& callback_binder = + IUpdateEngineStableCallback::asBinder(callback); + auto binder_wrapper = android::BinderWrapper::Get(); + binder_wrapper->UnregisterForDeathNotifications(callback_binder); + + *return_value = UnbindCallback(callback_binder.get()); + return Status::ok(); +} + +Status BinderUpdateEngineAndroidStableService::applyPayloadFd( + const ParcelFileDescriptor& pfd, + int64_t payload_offset, + int64_t payload_size, + const vector& header_kv_pairs) { + vector str_headers = ToVecString(header_kv_pairs); + + brillo::ErrorPtr error; + if (!service_delegate_->ApplyPayload( + pfd.get(), payload_offset, payload_size, str_headers, &error)) { + return ErrorPtrToStatus(error); + } + return Status::ok(); +} + +bool BinderUpdateEngineAndroidStableService::UnbindCallback( + const IBinder* callback) { + if (IUpdateEngineStableCallback::asBinder(callback_).get() != callback) { + LOG(ERROR) << "Unable to unbind unknown callback."; + return false; + } + callback_ = nullptr; + return true; +} + +} // namespace chromeos_update_engine diff --git a/binder_service_stable_android.h b/binder_service_stable_android.h new file mode 100644 index 00000000..16677980 --- /dev/null +++ b/binder_service_stable_android.h @@ -0,0 +1,85 @@ +// +// Copyright (C) 2020 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef UPDATE_ENGINE_BINDER_SERVICE_STABLE_ANDROID_H_ +#define UPDATE_ENGINE_BINDER_SERVICE_STABLE_ANDROID_H_ + +#include + +#include +#include + +#include +#include +#include + +#include "android/os/BnUpdateEngineStable.h" +#include "android/os/IUpdateEngineStableCallback.h" +#include "update_engine/service_delegate_android_interface.h" +#include "update_engine/service_observer_interface.h" + +namespace chromeos_update_engine { + +class BinderUpdateEngineAndroidStableService + : public android::os::BnUpdateEngineStable, + public ServiceObserverInterface { + public: + explicit BinderUpdateEngineAndroidStableService( + ServiceDelegateAndroidInterface* service_delegate); + ~BinderUpdateEngineAndroidStableService() override = default; + + const char* ServiceName() const { + return "android.os.UpdateEngineStableService"; + } + + // ServiceObserverInterface overrides. + void SendStatusUpdate( + const update_engine::UpdateEngineStatus& update_engine_status) override; + void SendPayloadApplicationComplete(ErrorCode error_code) override; + + // android::os::BnUpdateEngineStable overrides. + android::binder::Status applyPayloadFd( + const ::android::os::ParcelFileDescriptor& pfd, + int64_t payload_offset, + int64_t payload_size, + const std::vector& header_kv_pairs) override; + android::binder::Status bind( + const android::sp& callback, + bool* return_value) override; + android::binder::Status unbind( + const android::sp& callback, + bool* return_value) override; + + private: + // Remove the passed |callback| from the list of registered callbacks. Called + // on unbind() or whenever the callback object is destroyed. + // Returns true on success. + bool UnbindCallback(const IBinder* callback); + + // Bound callback. The stable interface only supports one callback at a time. + android::sp callback_; + + // Cached copy of the last status update sent. Used to send an initial + // notification when bind() is called from the client. + int last_status_{-1}; + double last_progress_{0.0}; + + ServiceDelegateAndroidInterface* service_delegate_; +}; + +} // namespace chromeos_update_engine + +#endif // UPDATE_ENGINE_BINDER_SERVICE_STABLE_ANDROID_H_ diff --git a/daemon_android.cc b/daemon_android.cc index 1aa921f8..313d7ddc 100644 --- a/daemon_android.cc +++ b/daemon_android.cc @@ -47,16 +47,26 @@ int DaemonAndroid::OnInit() { LOG_IF(ERROR, !daemon_state_android->Initialize()) << "Failed to initialize system state."; + auto binder_wrapper = android::BinderWrapper::Get(); + // Create the Binder Service. binder_service_ = new BinderUpdateEngineAndroidService{ daemon_state_android->service_delegate()}; - auto binder_wrapper = android::BinderWrapper::Get(); if (!binder_wrapper->RegisterService(binder_service_->ServiceName(), binder_service_)) { LOG(ERROR) << "Failed to register binder service."; } - daemon_state_->AddObserver(binder_service_.get()); + + // Create the stable binder service. + stable_binder_service_ = new BinderUpdateEngineAndroidStableService{ + daemon_state_android->service_delegate()}; + if (!binder_wrapper->RegisterService(stable_binder_service_->ServiceName(), + stable_binder_service_)) { + LOG(ERROR) << "Failed to register stable binder service."; + } + daemon_state_->AddObserver(stable_binder_service_.get()); + daemon_state_->StartUpdater(); return EX_OK; } diff --git a/daemon_android.h b/daemon_android.h index baead373..f0c028ec 100644 --- a/daemon_android.h +++ b/daemon_android.h @@ -22,6 +22,7 @@ #include #include "update_engine/binder_service_android.h" +#include "update_engine/binder_service_stable_android.h" #include "update_engine/common/subprocess.h" #include "update_engine/daemon_base.h" #include "update_engine/daemon_state_interface.h" @@ -43,6 +44,7 @@ class DaemonAndroid : public DaemonBase { brillo::BinderWatcher binder_watcher_; android::sp binder_service_; + android::sp stable_binder_service_; // The daemon state with all the required daemon classes for the configured // platform. From d51738c481012763aaa9c736267799fd70a77956 Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Thu, 23 Jul 2020 17:06:25 -0700 Subject: [PATCH 350/624] Add update_engine_stable_client This is a update_engine console client installed to APEXes so that scripts can invoke on. This client operates on the IUpdateEngineStable service. Test: pass Bug: 160996544 Change-Id: I0672b7bd1ccd87e35ffb99d7a66e63dffaf7df24 --- Android.bp | 9 ++ stable/Android.bp | 26 ++++ stable/update_engine_stable_client.cc | 190 ++++++++++++++++++++++++++ 3 files changed, 225 insertions(+) create mode 100644 stable/update_engine_stable_client.cc diff --git a/Android.bp b/Android.bp index b8cff0a4..1076c526 100644 --- a/Android.bp +++ b/Android.bp @@ -729,3 +729,12 @@ cc_prebuilt_binary { }, }, } + +// update_engine header library +cc_library_headers { + name: "libupdate_engine_headers", + export_include_dirs: ["."], + apex_available: [ + "com.android.gki.*", + ], +} diff --git a/stable/Android.bp b/stable/Android.bp index 01dd88b7..337ae96e 100644 --- a/stable/Android.bp +++ b/stable/Android.bp @@ -37,3 +37,29 @@ aidl_interface { }, }, } + +// update_engine_stable_client (type: executable) +// ======================================================== +// update_engine console client installed to APEXes +cc_binary { + name: "update_engine_stable_client", + + header_libs: [ + "libupdate_engine_headers", + ], + shared_libs: [ + "libbinder_ndk", + "libbase", + "liblog", + ], + static_libs: [ + "libgflags", + "libupdate_engine_stable-ndk_platform", + ], + srcs: [ + "update_engine_stable_client.cc", + ], + apex_available: [ + "com.android.gki.*", + ], +} diff --git a/stable/update_engine_stable_client.cc b/stable/update_engine_stable_client.cc new file mode 100644 index 00000000..c466c426 --- /dev/null +++ b/stable/update_engine_stable_client.cc @@ -0,0 +1,190 @@ +// +// Copyright (C) 2020 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// update_engine console client installed to APEXes for scripts to invoke +// directly. Uses the stable API. + +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace chromeos_update_engine::internal { + +DEFINE_string(payload, + "file:///path/to/payload.bin", + "The file URI to the update payload to use, or path to the file"); +DEFINE_int64(offset, + 0, + "The offset in the payload where the CrAU update starts."); +DEFINE_int64(size, + 0, + "The size of the CrAU part of the payload. If 0 is passed, it " + "will be autodetected."); +DEFINE_string(headers, + "", + "A list of key-value pairs, one element of the list per line."); + +int Exit(int return_code) { + LOG(INFO) << "Exit: " << return_code; + exit(return_code); + __builtin_unreachable(); +} +// Called whenever the UpdateEngine daemon dies. +void UpdateEngineServiceDied(void*) { + LOG(ERROR) << "UpdateEngineService died."; + Exit(EX_SOFTWARE); +} + +class UpdateEngineClientAndroid { + public: + UpdateEngineClientAndroid() = default; + int Run(); + + private: + class UECallback : public aidl::android::os::BnUpdateEngineStableCallback { + public: + UECallback() = default; + + // android::os::BnUpdateEngineStableCallback overrides. + ndk::ScopedAStatus onStatusUpdate(int status_code, float progress) override; + ndk::ScopedAStatus onPayloadApplicationComplete(int error_code) override; + }; + + static std::vector ParseHeaders(const std::string& arg); + + const ndk::ScopedAIBinder_DeathRecipient death_recipient_{ + AIBinder_DeathRecipient_new(&UpdateEngineServiceDied)}; + std::shared_ptr service_; + std::shared_ptr callback_; +}; + +ndk::ScopedAStatus UpdateEngineClientAndroid::UECallback::onStatusUpdate( + int status_code, float progress) { + LOG(INFO) << "onStatusUpdate(" << status_code << ", " << progress << ")"; + return ndk::ScopedAStatus::ok(); +} + +ndk::ScopedAStatus +UpdateEngineClientAndroid::UECallback::onPayloadApplicationComplete( + int error_code) { + LOG(INFO) << "onPayloadApplicationComplete(" << error_code << ")"; + auto code = static_cast(error_code); + Exit((code == ErrorCode::kSuccess || code == ErrorCode::kUpdatedButNotActive) + ? EX_OK + : EX_SOFTWARE); + __builtin_unreachable(); +} + +int UpdateEngineClientAndroid::Run() { + service_ = aidl::android::os::IUpdateEngineStable::fromBinder(ndk::SpAIBinder( + AServiceManager_getService("android.os.UpdateEngineStableService"))); + if (service_ == nullptr) { + LOG(ERROR) + << "Failed to get IUpdateEngineStable binder from service manager."; + return EX_SOFTWARE; + } + + // Register a callback object with the service. + callback_ = ndk::SharedRefBase::make(); + bool bound; + if (!service_->bind(callback_, &bound).isOk() || !bound) { + LOG(ERROR) << "Failed to bind() the UpdateEngine daemon."; + return EX_SOFTWARE; + } + + auto headers = ParseHeaders(FLAGS_headers); + ndk::ScopedAStatus status; + const char* payload_path; + std::string file_prefix = "file://"; + if (android::base::StartsWith(FLAGS_payload, file_prefix)) { + payload_path = FLAGS_payload.data() + file_prefix.length(); + } else { + payload_path = FLAGS_payload.data(); + } + ndk::ScopedFileDescriptor ufd( + TEMP_FAILURE_RETRY(open(payload_path, O_RDONLY))); + if (ufd.get() < 0) { + PLOG(ERROR) << "Can't open " << payload_path; + return EX_SOFTWARE; + } + status = service_->applyPayloadFd(ufd, FLAGS_offset, FLAGS_size, headers); + if (!status.isOk()) { + LOG(ERROR) << "Cannot apply payload: " << status.getDescription(); + return EX_SOFTWARE; + } + + // When following updates status changes, exit if the update_engine daemon + // dies. + if (AIBinder_linkToDeath(service_->asBinder().get(), + death_recipient_.get(), + nullptr) != STATUS_OK) { + return EX_SOFTWARE; + } + + return EX_OK; +} + +std::vector UpdateEngineClientAndroid::ParseHeaders( + const std::string& arg) { + std::vector lines = android::base::Split(arg, "\n"); + std::vector headers; + for (const auto& line : lines) { + auto header = android::base::Trim(line); + if (!header.empty()) { + headers.push_back(header); + } + } + return headers; +} + +} // namespace chromeos_update_engine::internal + +int main(int argc, char** argv) { + android::base::InitLogging(argv); + gflags::ParseCommandLineFlags(&argc, &argv, true); + + // Unlike other update_engine* processes that uses message loops, + // update_engine_stable_client uses a thread pool model. However, number of + // threads is limited to 1; that is, 0 additional threads should be spawned. + // This avoids some race conditions. + if (!ABinderProcess_setThreadPoolMaxThreadCount(0)) { + LOG(ERROR) << "Cannot set thread pool max thread count"; + return EX_SOFTWARE; + } + ABinderProcess_startThreadPool(); + + chromeos_update_engine::internal::UpdateEngineClientAndroid client{}; + int code = client.Run(); + if (code != EX_OK) + return code; + + ABinderProcess_joinThreadPool(); + LOG(ERROR) << "Exited from joinThreadPool."; + return EX_SOFTWARE; +} From f6f75c208f3fa34d5cd38bdaf473dc36ef95ac8b Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Fri, 31 Jul 2020 15:20:25 -0700 Subject: [PATCH 351/624] Properly set target build vars for partial updates. If partial_update flag is set in payload, additionally do the following: - Set is_target_dynamic to true; that is, assume that the target build supports dynamic partitions - Assert that current and target build supports Virtual A/B. For non-partial updates, there is no change in behavior. Bug: 162616968 Test: apply GKI update Change-Id: I773b609c7a3f942827fd8b8f0f80f602884efb12 --- dynamic_partition_control_android.cc | 54 +++++++++++++++++++++++++--- dynamic_partition_control_android.h | 4 +++ 2 files changed, 54 insertions(+), 4 deletions(-) diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc index ba749d97..aa0f393c 100644 --- a/dynamic_partition_control_android.cc +++ b/dynamic_partition_control_android.cc @@ -434,17 +434,17 @@ bool DynamicPartitionControlAndroid::PreparePartitionsForUpdate( return false; } + if (!SetTargetBuildVars(manifest)) { + return false; + } + // Although the current build supports dynamic partitions, the given payload // doesn't use it for target partitions. This could happen when applying a // retrofit update. Skip updating the partition metadata for the target slot. - is_target_dynamic_ = !manifest.dynamic_partition_metadata().groups().empty(); if (!is_target_dynamic_) { return true; } - target_supports_snapshot_ = - manifest.dynamic_partition_metadata().snapshot_enabled(); - if (!update) return true; @@ -505,6 +505,52 @@ bool DynamicPartitionControlAndroid::PreparePartitionsForUpdate( return true; } +bool DynamicPartitionControlAndroid::SetTargetBuildVars( + const DeltaArchiveManifest& manifest) { + // Precondition: current build supports dynamic partition. + CHECK(GetDynamicPartitionsFeatureFlag().IsEnabled()); + + bool is_target_dynamic = + !manifest.dynamic_partition_metadata().groups().empty(); + bool target_supports_snapshot = + manifest.dynamic_partition_metadata().snapshot_enabled(); + + if (manifest.partial_update()) { + // Partial updates requires DAP. On partial updates that does not involve + // dynamic partitions, groups() can be empty, so also assume + // is_target_dynamic in this case. This assumption should be safe because we + // also check target_supports_snapshot below, which presumably also implies + // target build supports dynamic partition. + if (!is_target_dynamic) { + LOG(INFO) << "Assuming target build supports dynamic partitions for " + "partial updates."; + is_target_dynamic = true; + } + + // Partial updates requires Virtual A/B. Double check that both current + // build and target build supports Virtual A/B. + if (!GetVirtualAbFeatureFlag().IsEnabled()) { + LOG(ERROR) << "Partial update cannot be applied on a device that does " + "not support snapshots."; + return false; + } + if (!target_supports_snapshot) { + LOG(ERROR) << "Cannot apply partial update to a build that does not " + "support snapshots."; + return false; + } + } + + // Store the flags. + is_target_dynamic_ = is_target_dynamic; + // If !is_target_dynamic_, leave target_supports_snapshot_ unset because + // snapshots would not work without dynamic partition. + if (is_target_dynamic_) { + target_supports_snapshot_ = target_supports_snapshot; + } + return true; +} + namespace { // Try our best to erase AVB footer. class AvbFooterEraser { diff --git a/dynamic_partition_control_android.h b/dynamic_partition_control_android.h index 08656fdc..9ee85db6 100644 --- a/dynamic_partition_control_android.h +++ b/dynamic_partition_control_android.h @@ -267,6 +267,10 @@ class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { // doing anything. bool EnsureMetadataMounted(); + // Set boolean flags related to target build. This includes flags like + // target_supports_snapshot_ and is_target_dynamic_. + bool SetTargetBuildVars(const DeltaArchiveManifest& manifest); + std::set mapped_devices_; const FeatureFlag dynamic_partitions_; const FeatureFlag virtual_ab_; From aba70abe81618542044dc20907f281a56b8b500e Mon Sep 17 00:00:00 2001 From: Kelvin Zhang Date: Tue, 4 Aug 2020 10:32:59 -0400 Subject: [PATCH 352/624] Improve payload magic header handling Currently, we use central directory's extra fields and filenames to determine starting position of a zipentry's file data. However, central directory's extra field might differ from extra field in local file header. For example, the Extended-Timestamp field has different formats depending on whether it's in local file header or central directory. We should use local file header for computing offsets. Test: Serve an OTA by update_device.py Change-Id: I00d150d874b9c874bb713569ea14938e036f854e --- payload_consumer/payload_metadata.cc | 14 ++++++++++- scripts/update_device.py | 35 ++++++++++++++++++++++++---- 2 files changed, 44 insertions(+), 5 deletions(-) diff --git a/payload_consumer/payload_metadata.cc b/payload_consumer/payload_metadata.cc index 01f3b62b..2cb73eb1 100644 --- a/payload_consumer/payload_metadata.cc +++ b/payload_consumer/payload_metadata.cc @@ -18,6 +18,7 @@ #include +#include #include #include "update_engine/common/constants.h" @@ -55,7 +56,18 @@ MetadataParseResult PayloadMetadata::ParsePayloadHeader( // Validate the magic string. if (memcmp(payload.data(), kDeltaMagic, sizeof(kDeltaMagic)) != 0) { - LOG(ERROR) << "Bad payload format -- invalid delta magic."; + LOG(ERROR) << "Bad payload format -- invalid delta magic: " + << base::StringPrintf("%02x%02x%02x%02x", + payload[0], + payload[1], + payload[2], + payload[3]) + << " Expected: " + << base::StringPrintf("%02x%02x%02x%02x", + kDeltaMagic[0], + kDeltaMagic[1], + kDeltaMagic[2], + kDeltaMagic[3]); *error = ErrorCode::kDownloadInvalidMetadataMagicString; return MetadataParseResult::kError; } diff --git a/scripts/update_device.py b/scripts/update_device.py index 7be3edbf..1cd4b6ae 100755 --- a/scripts/update_device.py +++ b/scripts/update_device.py @@ -20,12 +20,14 @@ from __future__ import absolute_import import argparse +import binascii import hashlib import logging import os import socket import subprocess import sys +import struct import threading import xml.etree.ElementTree import zipfile @@ -89,6 +91,7 @@ class AndroidOTAPackage(object): OTA_PAYLOAD_PROPERTIES_TXT = 'payload_properties.txt' SECONDARY_OTA_PAYLOAD_BIN = 'secondary/payload.bin' SECONDARY_OTA_PAYLOAD_PROPERTIES_TXT = 'secondary/payload_properties.txt' + PAYLOAD_MAGIC_HEADER = b'CrAU' def __init__(self, otafilename, secondary_payload=False): self.otafilename = otafilename @@ -97,10 +100,34 @@ def __init__(self, otafilename, secondary_payload=False): payload_entry = (self.SECONDARY_OTA_PAYLOAD_BIN if secondary_payload else self.OTA_PAYLOAD_BIN) payload_info = otazip.getinfo(payload_entry) - self.offset = payload_info.header_offset - self.offset += zipfile.sizeFileHeader - self.offset += len(payload_info.extra) + len(payload_info.filename) - self.size = payload_info.file_size + + if payload_info.compress_type != 0: + logging.error( + "Expected layload to be uncompressed, got compression method %d", + payload_info.compress_type) + # Don't use len(payload_info.extra). Because that returns size of extra + # fields in central directory. We need to look at local file directory, + # as these two might have different sizes. + with open(otafilename, "rb") as fp: + fp.seek(payload_info.header_offset) + data = fp.read(zipfile.sizeFileHeader) + fheader = struct.unpack(zipfile.structFileHeader, data) + # Last two fields of local file header are filename length and + # extra length + filename_len = fheader[-2] + extra_len = fheader[-1] + self.offset = payload_info.header_offset + self.offset += zipfile.sizeFileHeader + self.offset += filename_len + extra_len + self.size = payload_info.file_size + fp.seek(self.offset) + payload_header = fp.read(4) + if payload_header != self.PAYLOAD_MAGIC_HEADER: + logging.warning( + "Invalid header, expeted %s, got %s." + "Either the offset is not correct, or payload is corrupted", + binascii.hexlify(self.PAYLOAD_MAGIC_HEADER), + payload_header) property_entry = (self.SECONDARY_OTA_PAYLOAD_PROPERTIES_TXT if secondary_payload else self.OTA_PAYLOAD_PROPERTIES_TXT) From 5d7a1de410f98011b18306c679eec2cf29cda85f Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Fri, 7 Aug 2020 11:49:03 -0700 Subject: [PATCH 353/624] update_engine_stable_android: Use noreturn exit is already marked as noreturn. Use [[noreturn]] to avoid using unreachable(). Test: builds Change-Id: Iaac02bd03d2e3d179da5a7d0133849826ccb106e --- stable/update_engine_stable_client.cc | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/stable/update_engine_stable_client.cc b/stable/update_engine_stable_client.cc index c466c426..da203c4c 100644 --- a/stable/update_engine_stable_client.cc +++ b/stable/update_engine_stable_client.cc @@ -50,10 +50,9 @@ DEFINE_string(headers, "", "A list of key-value pairs, one element of the list per line."); -int Exit(int return_code) { +[[noreturn]] int Exit(int return_code) { LOG(INFO) << "Exit: " << return_code; exit(return_code); - __builtin_unreachable(); } // Called whenever the UpdateEngine daemon dies. void UpdateEngineServiceDied(void*) { @@ -98,7 +97,6 @@ UpdateEngineClientAndroid::UECallback::onPayloadApplicationComplete( Exit((code == ErrorCode::kSuccess || code == ErrorCode::kUpdatedButNotActive) ? EX_OK : EX_SOFTWARE); - __builtin_unreachable(); } int UpdateEngineClientAndroid::Run() { From ff716cdd33b75e628c3abeed3f79ac6547b51ee5 Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Tue, 11 Aug 2020 14:37:10 -0700 Subject: [PATCH 354/624] update_engine: Fix a lint issue For some reason the linter is thinking this line is doing C-Style casting and gives this error: /mnt/host/source/src/aosp/system/update_engine/update_manager/real_device_policy_provider.cc:108: Using C-style cast. Use reinterpret_cast(...) instead [readability/casting] [4] BUG=None TEST=repo upload doesn't fail anymore. Change-Id: I5d1e0d96a21fdf1080262e8e04237ccfb809c96f Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2350579 Tested-by: Amin Hassani Reviewed-by: Jae Hoon Kim Commit-Queue: Amin Hassani --- update_manager/real_device_policy_provider.cc | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/update_manager/real_device_policy_provider.cc b/update_manager/real_device_policy_provider.cc index 781e2acb..55232f3e 100644 --- a/update_manager/real_device_policy_provider.cc +++ b/update_manager/real_device_policy_provider.cc @@ -104,9 +104,10 @@ void RealDevicePolicyProvider::RefreshDevicePolicyAndReschedule() { } template -void RealDevicePolicyProvider::UpdateVariable(AsyncCopyVariable* var, - bool (DevicePolicy::*getter)(T*) - const) { +void RealDevicePolicyProvider::UpdateVariable( + AsyncCopyVariable* var, + // NOLINTNEXTLINE(readability/casting) + bool (DevicePolicy::*getter)(T*) const) { T new_value; if (policy_provider_->device_policy_is_loaded() && (policy_provider_->GetDevicePolicy().*getter)(&new_value)) { From a6e1812a9ba8a897cd7b22c72bdbfbe09f1a9a65 Mon Sep 17 00:00:00 2001 From: Kelvin Zhang Date: Thu, 6 Aug 2020 15:53:25 -0400 Subject: [PATCH 355/624] Remove metric_report_android.cc from unittests We don't need metric reporting for unittests. And the android reporter often causes dynamic linking issues if the unittest is ran on a different target(sargo vs aosp_sargo). This change should make it easier to run atests. Test: atest update_engine_unittests Change-Id: I92367ff906472ea0513112da0d4e7cfd3bab2a84 --- Android.bp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Android.bp b/Android.bp index 1076c526..b6ee4769 100644 --- a/Android.bp +++ b/Android.bp @@ -306,7 +306,6 @@ cc_library_static { "hardware_android.cc", "libcurl_http_fetcher.cc", "logging_android.cc", - "metrics_reporter_android.cc", "metrics_utils.cc", "network_selector_android.cc", "update_attempter_android.cc", @@ -331,7 +330,7 @@ cc_binary { "otacerts", ], - srcs: ["main.cc"], + srcs: ["main.cc", "metrics_reporter_android.cc"], init_rc: ["update_engine.rc"], } @@ -706,6 +705,7 @@ cc_test { "testrunner.cc", "update_attempter_android_unittest.cc", "update_status_utils_unittest.cc", + "metrics_reporter_stub.cc", ], } From 3b323cf1a3b846227dc0d31b7baf03193461f5e6 Mon Sep 17 00:00:00 2001 From: Kelvin Zhang Date: Wed, 5 Aug 2020 10:56:02 -0400 Subject: [PATCH 356/624] Fetch a/b ota partitions from the build prop, instead of walking through /dev/block/by-name Test: administer a gki partial update on cuttlefish Bug: 162148770 Change-Id: I173bd3ee1c462428ed02a9421c87ebed8dde636d --- common/fake_boot_control.h | 3 + .../partition_update_generator_android.cc | 169 ++++++------------ .../partition_update_generator_android.h | 13 +- ...ition_update_generator_android_unittest.cc | 65 ++++--- 4 files changed, 90 insertions(+), 160 deletions(-) diff --git a/common/fake_boot_control.h b/common/fake_boot_control.h index adbacd67..5d8823a2 100644 --- a/common/fake_boot_control.h +++ b/common/fake_boot_control.h @@ -57,6 +57,9 @@ class FakeBootControl : public BootControlInterface { if (part_it == devices_[slot].end()) return false; *device = part_it->second; + if (is_dynamic != nullptr) { + *is_dynamic = false; + } return true; } diff --git a/payload_consumer/partition_update_generator_android.cc b/payload_consumer/partition_update_generator_android.cc index 5768dd6f..d5d5313f 100644 --- a/payload_consumer/partition_update_generator_android.cc +++ b/payload_consumer/partition_update_generator_android.cc @@ -18,30 +18,23 @@ #include #include -#include -#include #include +#include #include #include +#include +#include "update_engine/common/boot_control_interface.h" #include "update_engine/common/hash_calculator.h" #include "update_engine/common/utils.h" -namespace { -// TODO(xunchang) use definition in fs_mgr, e.g. fs_mgr_get_slot_suffix -const char* SUFFIX_A = "_a"; -const char* SUFFIX_B = "_b"; -} // namespace - namespace chromeos_update_engine { PartitionUpdateGeneratorAndroid::PartitionUpdateGeneratorAndroid( BootControlInterface* boot_control, - std::string device_dir, size_t block_size) : boot_control_(boot_control), - block_device_dir_(std::move(device_dir)), block_size_(block_size) {} bool PartitionUpdateGeneratorAndroid:: @@ -50,22 +43,57 @@ bool PartitionUpdateGeneratorAndroid:: BootControlInterface::Slot target_slot, const std::set& partitions_in_payload, std::vector* update_list) { - auto ab_partitions = GetStaticAbPartitionsOnDevice(); - if (!ab_partitions.has_value()) { + auto ab_partitions = GetAbPartitionsOnDevice(); + if (ab_partitions.empty()) { LOG(ERROR) << "Failed to load static a/b partitions"; return false; } std::vector partition_updates; - for (const auto& partition_name : ab_partitions.value()) { + for (const auto& partition_name : ab_partitions) { if (partitions_in_payload.find(partition_name) != partitions_in_payload.end()) { LOG(INFO) << partition_name << " has included in payload"; continue; } + bool is_source_dynamic = false; + std::string source_device; + + TEST_AND_RETURN_FALSE( + boot_control_->GetPartitionDevice(partition_name, + source_slot, + true, /* not_in_payload */ + &source_device, + &is_source_dynamic)); + bool is_target_dynamic = false; + std::string target_device; + TEST_AND_RETURN_FALSE(boot_control_->GetPartitionDevice( + partition_name, target_slot, true, &target_device, &is_target_dynamic)); + + if (is_source_dynamic || is_target_dynamic) { + if (is_source_dynamic != is_target_dynamic) { + LOG(ERROR) << "Partition " << partition_name << " is expected to be a" + << " static partition. source slot is " + << (is_source_dynamic ? "" : "not") + << " dynamic, and target slot " << target_slot << " is " + << (is_target_dynamic ? "" : "not") << " dynamic."; + return false; + } else { + continue; + } + } + + auto source_size = utils::FileSize(source_device); + auto target_size = utils::FileSize(target_device); + if (source_size == -1 || target_size == -1 || source_size != target_size || + source_size % block_size_ != 0) { + LOG(ERROR) << "Invalid partition size. source size " << source_size + << ", target size " << target_size; + return false; + } - auto partition_update = - CreatePartitionUpdate(partition_name, source_slot, target_slot); + auto partition_update = CreatePartitionUpdate( + partition_name, source_device, target_device, source_size); if (!partition_update.has_value()) { LOG(ERROR) << "Failed to create partition update for " << partition_name; return false; @@ -76,98 +104,14 @@ bool PartitionUpdateGeneratorAndroid:: return true; } -std::optional> -PartitionUpdateGeneratorAndroid::GetStaticAbPartitionsOnDevice() { - if (std::error_code error_code; - !std::filesystem::exists(block_device_dir_, error_code) || error_code) { - LOG(ERROR) << "Failed to find " << block_device_dir_ << " " - << error_code.message(); - return std::nullopt; - } - - std::error_code error_code; - auto it = std::filesystem::directory_iterator(block_device_dir_, error_code); - if (error_code) { - LOG(ERROR) << "Failed to iterate " << block_device_dir_ << " " - << error_code.message(); - return std::nullopt; - } - - std::set partitions_with_suffix; - for (const auto& entry : it) { - auto partition_name = entry.path().filename().string(); - if (android::base::EndsWith(partition_name, SUFFIX_A) || - android::base::EndsWith(partition_name, SUFFIX_B)) { - partitions_with_suffix.insert(partition_name); - } - } - - // Second iteration to add the partition name without suffixes. - std::set ab_partitions; - for (std::string_view name : partitions_with_suffix) { - if (!android::base::ConsumeSuffix(&name, SUFFIX_A)) { - continue; - } - - // Add to the output list if the partition exist for both slot a and b. - auto base_name = std::string(name); - if (partitions_with_suffix.find(base_name + SUFFIX_B) != - partitions_with_suffix.end()) { - ab_partitions.insert(base_name); - } else { - LOG(WARNING) << "Failed to find the b partition for " << base_name; - } - } - - return ab_partitions; -} - -std::optional -PartitionUpdateGeneratorAndroid::CreatePartitionUpdate( - const std::string& partition_name, - BootControlInterface::Slot source_slot, - BootControlInterface::Slot target_slot) { - bool is_source_dynamic = false; - std::string source_device; - if (!boot_control_->GetPartitionDevice(partition_name, - source_slot, - true, /* not_in_payload */ - &source_device, - &is_source_dynamic)) { - LOG(ERROR) << "Failed to load source " << partition_name; - return std::nullopt; - } - bool is_target_dynamic = false; - std::string target_device; - if (!boot_control_->GetPartitionDevice(partition_name, - target_slot, - true, - &target_device, - &is_target_dynamic)) { - LOG(ERROR) << "Failed to load target " << partition_name; - return std::nullopt; - } - - if (is_source_dynamic || is_target_dynamic) { - LOG(ERROR) << "Partition " << partition_name << " is expected to be a" - << " static partition. source slot is " - << (is_source_dynamic ? "" : "not") - << " dynamic, and target slot " << target_slot << " is " - << (is_target_dynamic ? "" : "not") << " dynamic."; - return std::nullopt; - } - - auto source_size = utils::FileSize(source_device); - auto target_size = utils::FileSize(target_device); - if (source_size == -1 || target_size == -1 || source_size != target_size || - source_size % block_size_ != 0) { - LOG(ERROR) << "Invalid partition size. source size " << source_size - << ", target size " << target_size; - return std::nullopt; - } - - return CreatePartitionUpdate( - partition_name, source_device, target_device, source_size); +std::vector +PartitionUpdateGeneratorAndroid::GetAbPartitionsOnDevice() const { + auto partition_list_str = + android::base::GetProperty("ro.product.ab_ota_partitions", ""); + return base::SplitString(partition_list_str, + ",", + base::TRIM_WHITESPACE, + base::SPLIT_WANT_NONEMPTY); } std::optional @@ -183,6 +127,8 @@ PartitionUpdateGeneratorAndroid::CreatePartitionUpdate( auto raw_hash = CalculateHashForPartition(source_device, partition_size); if (!raw_hash.has_value()) { + LOG(ERROR) << "Failed to calculate hash for partition " << source_device + << " size: " << partition_size; return {}; } old_partition_info->set_hash(raw_hash->data(), raw_hash->size()); @@ -225,16 +171,9 @@ namespace partition_update_generator { std::unique_ptr Create( BootControlInterface* boot_control, size_t block_size) { CHECK(boot_control); - auto dynamic_control = boot_control->GetDynamicPartitionControl(); - CHECK(dynamic_control); - std::string dir_path; - if (!dynamic_control->GetDeviceDir(&dir_path)) { - return nullptr; - } return std::unique_ptr( - new PartitionUpdateGeneratorAndroid( - boot_control, std::move(dir_path), block_size)); + new PartitionUpdateGeneratorAndroid(boot_control, block_size)); } } // namespace partition_update_generator diff --git a/payload_consumer/partition_update_generator_android.h b/payload_consumer/partition_update_generator_android.h index 97b7d838..0330c99f 100644 --- a/payload_consumer/partition_update_generator_android.h +++ b/payload_consumer/partition_update_generator_android.h @@ -29,11 +29,11 @@ #include "update_engine/payload_consumer/partition_update_generator_interface.h" namespace chromeos_update_engine { + class PartitionUpdateGeneratorAndroid : public PartitionUpdateGeneratorInterface { public: PartitionUpdateGeneratorAndroid(BootControlInterface* boot_control, - std::string device_dir, size_t block_size); bool GenerateOperationsForPartitionsNotInPayload( @@ -41,15 +41,13 @@ class PartitionUpdateGeneratorAndroid BootControlInterface::Slot target_slot, const std::set& partitions_in_payload, std::vector* update_list) override; + virtual std::vector GetAbPartitionsOnDevice() const; private: friend class PartitionUpdateGeneratorAndroidTest; FRIEND_TEST(PartitionUpdateGeneratorAndroidTest, GetStaticPartitions); FRIEND_TEST(PartitionUpdateGeneratorAndroidTest, CreatePartitionUpdate); - // Gets the name of the static a/b partitions on the device. - std::optional> GetStaticAbPartitionsOnDevice(); - // Creates a PartitionUpdate object for a given partition to update from // source to target. Returns std::nullopt on failure. std::optional CreatePartitionUpdate( @@ -58,17 +56,10 @@ class PartitionUpdateGeneratorAndroid const std::string& target_device, int64_t partition_size); - std::optional CreatePartitionUpdate( - const std::string& partition_name, - BootControlInterface::Slot source_slot, - BootControlInterface::Slot target_slot); - std::optional CalculateHashForPartition( const std::string& block_device, int64_t partition_size); BootControlInterface* boot_control_; - // Path to look for a/b partitions - std::string block_device_dir_; size_t block_size_; }; diff --git a/payload_consumer/partition_update_generator_android_unittest.cc b/payload_consumer/partition_update_generator_android_unittest.cc index c3be9dbc..86d025ed 100644 --- a/payload_consumer/partition_update_generator_android_unittest.cc +++ b/payload_consumer/partition_update_generator_android_unittest.cc @@ -19,12 +19,14 @@ #include #include #include +#include #include #include #include #include +#include "update_engine/common/boot_control_interface.h" #include "update_engine/common/fake_boot_control.h" #include "update_engine/common/hash_calculator.h" #include "update_engine/common/test_utils.h" @@ -32,40 +34,53 @@ namespace chromeos_update_engine { +class FakePartitionUpdateGenerator : public PartitionUpdateGeneratorAndroid { + public: + std::vector GetAbPartitionsOnDevice() const { + return ab_partitions_; + } + using PartitionUpdateGeneratorAndroid::PartitionUpdateGeneratorAndroid; + std::vector ab_partitions_; +}; + class PartitionUpdateGeneratorAndroidTest : public ::testing::Test { protected: void SetUp() override { ASSERT_TRUE(device_dir_.CreateUniqueTempDir()); boot_control_ = std::make_unique(); - boot_control_->SetNumSlots(2); - auto generator = - partition_update_generator::Create(boot_control_.get(), 4096); - generator_.reset( - static_cast(generator.release())); ASSERT_TRUE(boot_control_); + boot_control_->SetNumSlots(2); + generator_ = std::make_unique( + boot_control_.get(), 4096); ASSERT_TRUE(generator_); - generator_->block_device_dir_ = device_dir_.GetPath().value(); } - std::unique_ptr generator_; + std::unique_ptr generator_; std::unique_ptr boot_control_; base::ScopedTempDir device_dir_; + std::map device_map_; void SetUpBlockDevice(const std::map& contents) { + std::set partition_base_names; for (const auto& [name, content] : contents) { - auto path = generator_->block_device_dir_ + "/" + name; + auto path = device_dir_.GetPath().value() + "/" + name; ASSERT_TRUE( utils::WriteFile(path.c_str(), content.data(), content.size())); if (android::base::EndsWith(name, "_a")) { - boot_control_->SetPartitionDevice( - name.substr(0, name.size() - 2), 0, path); + auto prefix = name.substr(0, name.size() - 2); + boot_control_->SetPartitionDevice(prefix, 0, path); + partition_base_names.emplace(prefix); } else if (android::base::EndsWith(name, "_b")) { - boot_control_->SetPartitionDevice( - name.substr(0, name.size() - 2), 1, path); + auto prefix = name.substr(0, name.size() - 2); + boot_control_->SetPartitionDevice(prefix, 1, path); + partition_base_names.emplace(prefix); } + device_map_[name] = std::move(path); } + generator_->ab_partitions_ = {partition_base_names.begin(), + partition_base_names.end()}; } void CheckPartitionUpdate(const std::string& name, @@ -95,25 +110,6 @@ class PartitionUpdateGeneratorAndroidTest : public ::testing::Test { } }; -TEST_F(PartitionUpdateGeneratorAndroidTest, GetStaticPartitions) { - std::map contents = { - {"system_a", ""}, - {"system_b", ""}, - {"vendor_a", ""}, - {"vendor_b", ""}, - {"persist", ""}, - {"vbmeta_a", ""}, - {"vbmeta_b", ""}, - {"boot_a", ""}, - {"boot_b", ""}, - }; - - SetUpBlockDevice(contents); - auto partitions = generator_->GetStaticAbPartitionsOnDevice(); - ASSERT_EQ(std::set({"system", "vendor", "vbmeta", "boot"}), - partitions); -} - TEST_F(PartitionUpdateGeneratorAndroidTest, CreatePartitionUpdate) { auto system_contents = std::string(4096 * 2, '1'); auto boot_contents = std::string(4096 * 5, 'b'); @@ -125,13 +121,14 @@ TEST_F(PartitionUpdateGeneratorAndroidTest, CreatePartitionUpdate) { }; SetUpBlockDevice(contents); - auto system_partition_update = - generator_->CreatePartitionUpdate("system", 0, 1); + auto system_partition_update = generator_->CreatePartitionUpdate( + "system", device_map_["system_a"], device_map_["system_b"], 4096 * 2); ASSERT_TRUE(system_partition_update.has_value()); CheckPartitionUpdate( "system", system_contents, system_partition_update.value()); - auto boot_partition_update = generator_->CreatePartitionUpdate("boot", 0, 1); + auto boot_partition_update = generator_->CreatePartitionUpdate( + "boot", device_map_["boot_a"], device_map_["boot_b"], 4096 * 5); ASSERT_TRUE(boot_partition_update.has_value()); CheckPartitionUpdate("boot", boot_contents, boot_partition_update.value()); } From c743e8f70d36affb3ec1ad16440781612eeb3eb5 Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Tue, 11 Aug 2020 14:50:25 -0700 Subject: [PATCH 357/624] update_engine: Remove junk logs These logs bear no special meaning and just polute the device's update_engine logs. So it is better to just remove them. BUG=b:163075733 TEST=None Change-Id: I901231152eaadaa16b0d212ab97f691a1decee90 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2350581 Reviewed-by: Jae Hoon Kim Tested-by: Amin Hassani Commit-Queue: Andrew Lassalle --- metrics_reporter_omaha.cc | 73 ----------------------------- update_manager/policy_utils.h | 1 - update_manager/update_manager-inl.h | 3 -- 3 files changed, 77 deletions(-) diff --git a/metrics_reporter_omaha.cc b/metrics_reporter_omaha.cc index fb4e4ce9..0cf0e59b 100644 --- a/metrics_reporter_omaha.cc +++ b/metrics_reporter_omaha.cc @@ -146,8 +146,6 @@ MetricsReporterOmaha::MetricsReporterOmaha() void MetricsReporterOmaha::ReportDailyMetrics(base::TimeDelta os_age) { string metric = metrics::kMetricDailyOSAgeDays; - LOG(INFO) << "Uploading " << utils::FormatTimeDelta(os_age) << " for metric " - << metric; metrics_lib_->SendToUMA(metric, static_cast(os_age.InDays()), 0, // min: 0 days @@ -168,20 +166,17 @@ void MetricsReporterOmaha::ReportUpdateCheckMetrics( metric = metrics::kMetricCheckResult; value = static_cast(result); max_value = static_cast(metrics::CheckResult::kNumConstants) - 1; - LOG(INFO) << "Sending " << value << " for metric " << metric << " (enum)"; metrics_lib_->SendEnumToUMA(metric, value, max_value); } if (reaction != metrics::CheckReaction::kUnset) { metric = metrics::kMetricCheckReaction; value = static_cast(reaction); max_value = static_cast(metrics::CheckReaction::kNumConstants) - 1; - LOG(INFO) << "Sending " << value << " for metric " << metric << " (enum)"; metrics_lib_->SendEnumToUMA(metric, value, max_value); } if (download_error_code != metrics::DownloadErrorCode::kUnset) { metric = metrics::kMetricCheckDownloadErrorCode; value = static_cast(download_error_code); - LOG(INFO) << "Sending " << value << " for metric " << metric << " (sparse)"; metrics_lib_->SendSparseToUMA(metric, value); } @@ -191,8 +186,6 @@ void MetricsReporterOmaha::ReportUpdateCheckMetrics( kPrefsMetricsCheckLastReportingTime, &time_since_last)) { metric = metrics::kMetricCheckTimeSinceLastCheckMinutes; - LOG(INFO) << "Sending " << utils::FormatTimeDelta(time_since_last) - << " for metric " << metric; metrics_lib_->SendToUMA(metric, time_since_last.InMinutes(), 0, // min: 0 min @@ -205,8 +198,6 @@ void MetricsReporterOmaha::ReportUpdateCheckMetrics( if (metrics_utils::MonotonicDurationHelper( system_state, &uptime_since_last_storage, &uptime_since_last)) { metric = metrics::kMetricCheckTimeSinceLastCheckUptimeMinutes; - LOG(INFO) << "Sending " << utils::FormatTimeDelta(uptime_since_last) - << " for metric " << metric; metrics_lib_->SendToUMA(metric, uptime_since_last.InMinutes(), 0, // min: 0 min @@ -221,13 +212,9 @@ void MetricsReporterOmaha::ReportUpdateCheckMetrics( value = utils::VersionPrefix(target_version); if (value != 0) { metric = metrics::kMetricCheckTargetVersion; - LOG(INFO) << "Sending " << value << " for metric " << metric - << " (sparse)"; metrics_lib_->SendSparseToUMA(metric, value); if (system_state->request_params()->rollback_allowed()) { metric = metrics::kMetricCheckRollbackTargetVersion; - LOG(INFO) << "Sending " << value << " for metric " << metric - << " (sparse)"; metrics_lib_->SendSparseToUMA(metric, value); } } @@ -239,8 +226,6 @@ void MetricsReporterOmaha::ReportAbnormallyTerminatedUpdateAttemptMetrics() { metrics::AttemptResult attempt_result = metrics::AttemptResult::kAbnormalTermination; - LOG(INFO) << "Uploading " << static_cast(attempt_result) - << " for metric " << metric; metrics_lib_->SendEnumToUMA( metric, static_cast(attempt_result), @@ -257,7 +242,6 @@ void MetricsReporterOmaha::ReportUpdateAttemptMetrics( metrics::AttemptResult attempt_result, ErrorCode internal_error_code) { string metric = metrics::kMetricAttemptNumber; - LOG(INFO) << "Uploading " << attempt_number << " for metric " << metric; metrics_lib_->SendToUMA(metric, attempt_number, 0, // min: 0 attempts @@ -265,13 +249,9 @@ void MetricsReporterOmaha::ReportUpdateAttemptMetrics( 50); // num_buckets metric = metrics::kMetricAttemptPayloadType; - LOG(INFO) << "Uploading " << utils::ToString(payload_type) << " for metric " - << metric; metrics_lib_->SendEnumToUMA(metric, payload_type, kNumPayloadTypes); metric = metrics::kMetricAttemptDurationMinutes; - LOG(INFO) << "Uploading " << utils::FormatTimeDelta(duration) - << " for metric " << metric; metrics_lib_->SendToUMA(metric, duration.InMinutes(), 0, // min: 0 min @@ -279,8 +259,6 @@ void MetricsReporterOmaha::ReportUpdateAttemptMetrics( 50); // num_buckets metric = metrics::kMetricAttemptDurationUptimeMinutes; - LOG(INFO) << "Uploading " << utils::FormatTimeDelta(duration_uptime) - << " for metric " << metric; metrics_lib_->SendToUMA(metric, duration_uptime.InMinutes(), 0, // min: 0 min @@ -289,7 +267,6 @@ void MetricsReporterOmaha::ReportUpdateAttemptMetrics( metric = metrics::kMetricAttemptPayloadSizeMiB; int64_t payload_size_mib = payload_size / kNumBytesInOneMiB; - LOG(INFO) << "Uploading " << payload_size_mib << " for metric " << metric; metrics_lib_->SendToUMA(metric, payload_size_mib, 0, // min: 0 MiB @@ -297,8 +274,6 @@ void MetricsReporterOmaha::ReportUpdateAttemptMetrics( 50); // num_buckets metric = metrics::kMetricAttemptResult; - LOG(INFO) << "Uploading " << static_cast(attempt_result) - << " for metric " << metric; metrics_lib_->SendEnumToUMA( metric, static_cast(attempt_result), @@ -314,8 +289,6 @@ void MetricsReporterOmaha::ReportUpdateAttemptMetrics( kPrefsMetricsAttemptLastReportingTime, &time_since_last)) { metric = metrics::kMetricAttemptTimeSinceLastAttemptMinutes; - LOG(INFO) << "Sending " << utils::FormatTimeDelta(time_since_last) - << " for metric " << metric; metrics_lib_->SendToUMA(metric, time_since_last.InMinutes(), 0, // min: 0 min @@ -328,8 +301,6 @@ void MetricsReporterOmaha::ReportUpdateAttemptMetrics( if (metrics_utils::MonotonicDurationHelper( system_state, &uptime_since_last_storage, &uptime_since_last)) { metric = metrics::kMetricAttemptTimeSinceLastAttemptUptimeMinutes; - LOG(INFO) << "Sending " << utils::FormatTimeDelta(uptime_since_last) - << " for metric " << metric; metrics_lib_->SendToUMA(metric, uptime_since_last.InMinutes(), 0, // min: 0 min @@ -347,8 +318,6 @@ void MetricsReporterOmaha::ReportUpdateAttemptDownloadMetrics( string metric = metrics::kMetricAttemptPayloadBytesDownloadedMiB; int64_t payload_bytes_downloaded_mib = payload_bytes_downloaded / kNumBytesInOneMiB; - LOG(INFO) << "Uploading " << payload_bytes_downloaded_mib << " for metric " - << metric; metrics_lib_->SendToUMA(metric, payload_bytes_downloaded_mib, 0, // min: 0 MiB @@ -357,8 +326,6 @@ void MetricsReporterOmaha::ReportUpdateAttemptDownloadMetrics( metric = metrics::kMetricAttemptPayloadDownloadSpeedKBps; int64_t payload_download_speed_kbps = payload_download_speed_bps / 1000; - LOG(INFO) << "Uploading " << payload_download_speed_kbps << " for metric " - << metric; metrics_lib_->SendToUMA(metric, payload_download_speed_kbps, 0, // min: 0 kB/s @@ -366,20 +333,15 @@ void MetricsReporterOmaha::ReportUpdateAttemptDownloadMetrics( 50); // num_buckets metric = metrics::kMetricAttemptDownloadSource; - LOG(INFO) << "Uploading " << download_source << " for metric " << metric; metrics_lib_->SendEnumToUMA(metric, download_source, kNumDownloadSources); if (payload_download_error_code != metrics::DownloadErrorCode::kUnset) { metric = metrics::kMetricAttemptDownloadErrorCode; - LOG(INFO) << "Uploading " << static_cast(payload_download_error_code) - << " for metric " << metric << " (sparse)"; metrics_lib_->SendSparseToUMA( metric, static_cast(payload_download_error_code)); } metric = metrics::kMetricAttemptConnectionType; - LOG(INFO) << "Uploading " << static_cast(connection_type) - << " for metric " << metric; metrics_lib_->SendEnumToUMA( metric, static_cast(connection_type), @@ -399,7 +361,6 @@ void MetricsReporterOmaha::ReportSuccessfulUpdateMetrics( int url_switch_count) { string metric = metrics::kMetricSuccessfulUpdatePayloadSizeMiB; int64_t mbs = payload_size / kNumBytesInOneMiB; - LOG(INFO) << "Uploading " << mbs << " (MiBs) for metric " << metric; metrics_lib_->SendToUMA(metric, mbs, 0, // min: 0 MiB @@ -429,7 +390,6 @@ void MetricsReporterOmaha::ReportSuccessfulUpdateMetrics( } if (mbs > 0) { - LOG(INFO) << "Uploading " << mbs << " (MiBs) for metric " << metric; metrics_lib_->SendToUMA(metric, mbs, 0, // min: 0 MiB @@ -439,8 +399,6 @@ void MetricsReporterOmaha::ReportSuccessfulUpdateMetrics( } metric = metrics::kMetricSuccessfulUpdateDownloadSourcesUsed; - LOG(INFO) << "Uploading 0x" << std::hex << download_sources_used - << " (bit flags) for metric " << metric; metrics_lib_->SendToUMA(metric, download_sources_used, 0, // min @@ -448,8 +406,6 @@ void MetricsReporterOmaha::ReportSuccessfulUpdateMetrics( 1 << kNumDownloadSources); // num_buckets metric = metrics::kMetricSuccessfulUpdateDownloadOverheadPercentage; - LOG(INFO) << "Uploading " << download_overhead_percentage << "% for metric " - << metric; metrics_lib_->SendToUMA(metric, download_overhead_percentage, 0, // min: 0% overhead @@ -457,8 +413,6 @@ void MetricsReporterOmaha::ReportSuccessfulUpdateMetrics( 50); // num_buckets metric = metrics::kMetricSuccessfulUpdateUrlSwitchCount; - LOG(INFO) << "Uploading " << url_switch_count << " (count) for metric " - << metric; metrics_lib_->SendToUMA(metric, url_switch_count, 0, // min: 0 URL switches @@ -466,8 +420,6 @@ void MetricsReporterOmaha::ReportSuccessfulUpdateMetrics( 50); // num_buckets metric = metrics::kMetricSuccessfulUpdateTotalDurationMinutes; - LOG(INFO) << "Uploading " << utils::FormatTimeDelta(total_duration) - << " for metric " << metric; metrics_lib_->SendToUMA(metric, static_cast(total_duration.InMinutes()), 0, // min: 0 min @@ -475,8 +427,6 @@ void MetricsReporterOmaha::ReportSuccessfulUpdateMetrics( 50); // num_buckets metric = metrics::kMetricSuccessfulUpdateTotalDurationUptimeMinutes; - LOG(INFO) << "Uploading " << utils::FormatTimeDelta(total_duration_uptime) - << " for metric " << metric; metrics_lib_->SendToUMA(metric, static_cast(total_duration_uptime.InMinutes()), 0, // min: 0 min @@ -484,8 +434,6 @@ void MetricsReporterOmaha::ReportSuccessfulUpdateMetrics( 50); // num_buckets metric = metrics::kMetricSuccessfulUpdateRebootCount; - LOG(INFO) << "Uploading reboot count of " << reboot_count << " for metric " - << metric; metrics_lib_->SendToUMA(metric, reboot_count, 0, // min: 0 reboots @@ -494,8 +442,6 @@ void MetricsReporterOmaha::ReportSuccessfulUpdateMetrics( metric = metrics::kMetricSuccessfulUpdatePayloadType; metrics_lib_->SendEnumToUMA(metric, payload_type, kNumPayloadTypes); - LOG(INFO) << "Uploading " << utils::ToString(payload_type) << " for metric " - << metric; metric = metrics::kMetricSuccessfulUpdateAttemptCount; metrics_lib_->SendToUMA(metric, @@ -503,11 +449,8 @@ void MetricsReporterOmaha::ReportSuccessfulUpdateMetrics( 1, // min: 1 attempt 50, // max: 50 attempts 50); // num_buckets - LOG(INFO) << "Uploading " << attempt_count << " for metric " << metric; metric = metrics::kMetricSuccessfulUpdateUpdatesAbandonedCount; - LOG(INFO) << "Uploading " << updates_abandoned_count << " (count) for metric " - << metric; metrics_lib_->SendToUMA(metric, updates_abandoned_count, 0, // min: 0 counts @@ -519,7 +462,6 @@ void MetricsReporterOmaha::ReportRollbackMetrics( metrics::RollbackResult result) { string metric = metrics::kMetricRollbackResult; int value = static_cast(result); - LOG(INFO) << "Sending " << value << " for metric " << metric << " (enum)"; metrics_lib_->SendEnumToUMA( metric, value, static_cast(metrics::RollbackResult::kNumConstants)); } @@ -530,7 +472,6 @@ void MetricsReporterOmaha::ReportEnterpriseRollbackMetrics( string metric = metrics::kMetricEnterpriseRollbackSuccess; if (!success) metric = metrics::kMetricEnterpriseRollbackFailure; - LOG(INFO) << "Sending " << value << " for metric " << metric; metrics_lib_->SendSparseToUMA(metric, value); } @@ -547,8 +488,6 @@ void MetricsReporterOmaha::ReportCertificateCheckMetrics( case ServerToCheck::kNone: return; } - LOG(INFO) << "Uploading " << static_cast(result) << " for metric " - << metric; metrics_lib_->SendEnumToUMA( metric, static_cast(result), @@ -562,9 +501,6 @@ void MetricsReporterOmaha::ReportFailedUpdateCount(int target_attempt) { 1, // min value 50, // max value kNumDefaultUmaBuckets); - - LOG(INFO) << "Uploading " << target_attempt << " (count) for metric " - << metric; } void MetricsReporterOmaha::ReportTimeToReboot(int time_to_reboot_minutes) { @@ -574,9 +510,6 @@ void MetricsReporterOmaha::ReportTimeToReboot(int time_to_reboot_minutes) { 0, // min: 0 minute 30 * 24 * 60, // max: 1 month (approx) kNumDefaultUmaBuckets); - - LOG(INFO) << "Uploading " << time_to_reboot_minutes << " for metric " - << metric; } void MetricsReporterOmaha::ReportInstallDateProvisioningSource(int source, @@ -588,7 +521,6 @@ void MetricsReporterOmaha::ReportInstallDateProvisioningSource(int source, void MetricsReporterOmaha::ReportInternalErrorCode(ErrorCode error_code) { auto metric = metrics::kMetricAttemptInternalErrorCode; - LOG(INFO) << "Uploading " << error_code << " for metric " << metric; metrics_lib_->SendEnumToUMA(metric, static_cast(error_code), static_cast(ErrorCode::kUmaReportedMax)); @@ -600,18 +532,14 @@ void MetricsReporterOmaha::ReportKeyVersionMetrics( bool kernel_max_rollforward_success) { int value = kernel_min_version; string metric = metrics::kMetricKernelMinVersion; - LOG(INFO) << "Sending " << value << " for metric " << metric; metrics_lib_->SendSparseToUMA(metric, value); value = kernel_max_rollforward_version; metric = metrics::kMetricKernelMaxRollforwardVersion; - LOG(INFO) << "Sending " << value << " for metric " << metric; metrics_lib_->SendSparseToUMA(metric, value); bool bool_value = kernel_max_rollforward_success; metric = metrics::kMetricKernelMaxRollforwardSetSuccess; - LOG(INFO) << "Sending " << bool_value << " for metric " << metric - << " (bool)"; metrics_lib_->SendBoolToUMA(metric, bool_value); } @@ -621,7 +549,6 @@ void MetricsReporterOmaha::ReportEnterpriseUpdateSeenToDownloadDays( has_time_restriction_policy ? metrics::kMetricSuccessfulUpdateDurationFromSeenTimeRestrictedDays : metrics::kMetricSuccessfulUpdateDurationFromSeenDays; - LOG(INFO) << "Sending " << time_to_update_days << " for metric " << metric; metrics_lib_->SendToUMA(metric, time_to_update_days, diff --git a/update_manager/policy_utils.h b/update_manager/policy_utils.h index 32047803..dc606f21 100644 --- a/update_manager/policy_utils.h +++ b/update_manager/policy_utils.h @@ -55,7 +55,6 @@ EvalStatus ConsultPolicies(const std::vector policies, EvalStatus status = (policy->*policy_method)(ec, state, error, result, args...); if (status != EvalStatus::kContinue) { - LOG(INFO) << "decision by " << policy->PolicyRequestName(policy_method); return status; } } diff --git a/update_manager/update_manager-inl.h b/update_manager/update_manager-inl.h index a1d172d5..550642c5 100644 --- a/update_manager/update_manager-inl.h +++ b/update_manager/update_manager-inl.h @@ -49,7 +49,6 @@ EvalStatus UpdateManager::EvaluatePolicy( ec->ResetEvaluation(); const std::string policy_name = policy_->PolicyRequestName(policy_method); - LOG(INFO) << policy_name << ": START"; // First try calling the actual policy. std::string error; @@ -71,8 +70,6 @@ EvalStatus UpdateManager::EvaluatePolicy( } } - LOG(INFO) << policy_name << ": END"; - return status; } From b753e0e9258170dce7c62733eed78e616b4a37b2 Mon Sep 17 00:00:00 2001 From: Kelvin Zhang Date: Mon, 17 Aug 2020 13:54:46 -0400 Subject: [PATCH 358/624] Fix race condition in blob_file_writer SetTotalBlobs modifies total_blobs, so it chould grab a mutex first. Test: run delta_generator repeatly(~500 times) Change-Id: Ic0e3ab0298dee9a30c0f8ba414d506e10e3654ca --- payload_generator/blob_file_writer.cc | 6 +++--- payload_generator/blob_file_writer.h | 6 ++---- payload_generator/full_update_generator.cc | 5 +---- 3 files changed, 6 insertions(+), 11 deletions(-) diff --git a/payload_generator/blob_file_writer.cc b/payload_generator/blob_file_writer.cc index 7cdeb352..a1afe873 100644 --- a/payload_generator/blob_file_writer.cc +++ b/payload_generator/blob_file_writer.cc @@ -38,9 +38,9 @@ off_t BlobFileWriter::StoreBlob(const brillo::Blob& blob) { return result; } -void BlobFileWriter::SetTotalBlobs(size_t total_blobs) { - total_blobs_ = total_blobs; - stored_blobs_ = 0; +void BlobFileWriter::IncTotalBlobs(size_t increment) { + base::AutoLock auto_lock(blob_mutex_); + total_blobs_ += increment; } } // namespace chromeos_update_engine diff --git a/payload_generator/blob_file_writer.h b/payload_generator/blob_file_writer.h index 48553be1..bdd4c08c 100644 --- a/payload_generator/blob_file_writer.h +++ b/payload_generator/blob_file_writer.h @@ -35,10 +35,8 @@ class BlobFileWriter { // was stored, or -1 in case of failure. off_t StoreBlob(const brillo::Blob& blob); - // The number of |total_blobs| is the number of blobs that will be stored but - // is only used for logging purposes. If not set or set to 0, logging will be - // skipped. This function will also reset the number of stored blobs to 0. - void SetTotalBlobs(size_t total_blobs); + // Increase |total_blobs| by |increment|. Thread safe. + void IncTotalBlobs(size_t increment); private: size_t total_blobs_{0}; diff --git a/payload_generator/full_update_generator.cc b/payload_generator/full_update_generator.cc index 94a43ab7..4a5f63a9 100644 --- a/payload_generator/full_update_generator.cc +++ b/payload_generator/full_update_generator.cc @@ -153,7 +153,7 @@ bool FullUpdateGenerator::GenerateOperations( aops->resize(num_chunks); vector chunk_processors; chunk_processors.reserve(num_chunks); - blob_file->SetTotalBlobs(num_chunks); + blob_file->IncTotalBlobs(num_chunks); for (size_t i = 0; i < num_chunks; ++i) { size_t start_block = i * chunk_blocks; @@ -187,9 +187,6 @@ bool FullUpdateGenerator::GenerateOperations( thread_pool.AddWork(&processor); thread_pool.JoinAll(); - // All the work done, disable logging. - blob_file->SetTotalBlobs(0); - // All the operations must have a type set at this point. Otherwise, a // ChunkProcessor failed to complete. for (const AnnotatedOperation& aop : *aops) { From 37b6723db1e4a89e307b4b665432c65af07eedb5 Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Thu, 13 Aug 2020 09:29:48 -0700 Subject: [PATCH 359/624] update_engine: Implement ReleaseLtsTag LTS (Long Term Support) requires reading the policy ChromeOSReleaseLtsHint() to get the token that is used for defining the LTS channel parameters. The value is sent in "ltstag" attribute of the updatecheck. BUG=chromium:1114784 TEST=sudo FEATURES=test emerge update_engine Cq-Depend: chromium:2345311 Change-Id: I480e7920e9187ebe79cbde6c655252e432842608 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2350580 Reviewed-by: Victor-Gabriel Savu Reviewed-by: Jae Hoon Kim Reviewed-by: Andrew Lassalle Tested-by: Victor-Gabriel Savu Commit-Queue: Victor-Gabriel Savu Commit-Queue: Amin Hassani --- mock_update_attempter.h | 21 ++++---- omaha_request_action_unittest.cc | 14 ++++++ omaha_request_builder_xml.cc | 5 ++ omaha_request_params.h | 7 +++ omaha_request_params_unittest.cc | 7 +++ update_attempter.cc | 7 +++ update_attempter.h | 3 ++ update_attempter_unittest.cc | 48 ++++++++++++------- update_manager/android_things_policy.cc | 1 + update_manager/chromeos_policy.cc | 1 + update_manager/chromeos_policy_unittest.cc | 3 ++ update_manager/default_policy.cc | 1 + update_manager/device_policy_provider.h | 2 + .../enterprise_device_policy_impl.cc | 6 +++ update_manager/fake_device_policy_provider.h | 6 +++ update_manager/policy.h | 2 + update_manager/real_device_policy_provider.cc | 1 + update_manager/real_device_policy_provider.h | 5 ++ .../real_device_policy_provider_unittest.cc | 1 + 19 files changed, 115 insertions(+), 26 deletions(-) diff --git a/mock_update_attempter.h b/mock_update_attempter.h index ad348028..cc056484 100644 --- a/mock_update_attempter.h +++ b/mock_update_attempter.h @@ -30,16 +30,17 @@ class MockUpdateAttempter : public UpdateAttempter { public: using UpdateAttempter::UpdateAttempter; - MOCK_METHOD9(Update, - void(const std::string& app_version, - const std::string& omaha_url, - const std::string& target_channel, - const std::string& target_version_prefix, - bool rollback_allowed, - bool rollback_data_save_requested, - int rollback_allowed_milestones, - bool obey_proxies, - bool interactive)); + MOCK_METHOD10(Update, + void(const std::string& app_version, + const std::string& omaha_url, + const std::string& target_channel, + const std::string& lts_tag, + const std::string& target_version_prefix, + bool rollback_allowed, + bool rollback_data_save_requested, + int rollback_allowed_milestones, + bool obey_proxies, + bool interactive)); MOCK_METHOD1(GetStatus, bool(update_engine::UpdateEngineStatus* out_status)); diff --git a/omaha_request_action_unittest.cc b/omaha_request_action_unittest.cc index e608c077..adb95dff 100644 --- a/omaha_request_action_unittest.cc +++ b/omaha_request_action_unittest.cc @@ -1528,6 +1528,7 @@ TEST_F(OmahaRequestActionTest, XmlEncodeIsUsedForParams) { request_params_.set_os_board("x86 generic"); fake_prefs_.SetString(kPrefsOmahaCohort, "evil\nstring"); fake_prefs_.SetString(kPrefsOmahaCohortHint, "evil&string\\"); @@ -1547,6 +1548,8 @@ TEST_F(OmahaRequestActionTest, XmlEncodeIsUsedForParams) { EXPECT_EQ(string::npos, post_str.find("x86 generic")); EXPECT_NE(string::npos, post_str.find("cohort=\"evil\nstring\"")); @@ -1801,6 +1804,17 @@ TEST_F(OmahaRequestActionTest, DeviceQuickFixBuildTokenIsNotSetTest) { EXPECT_EQ(string::npos, post_str.find(omaha_cohort_hint)); } +TEST_F(OmahaRequestActionTest, TargetChannelHintTest) { + tuc_params_.http_response = fake_update_response_.GetNoUpdateResponse(); + tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable; + tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; + request_params_.set_lts_tag("hint>"); + + ASSERT_TRUE(TestUpdateCheck()); + + EXPECT_NE(string::npos, post_str.find("ltstag=\"hint>\"")); +} + void OmahaRequestActionTest::PingTest(bool ping_only) { NiceMock prefs; fake_system_state_.set_prefs(&prefs); diff --git a/omaha_request_builder_xml.cc b/omaha_request_builder_xml.cc index 2eb71bb6..8add89fe 100644 --- a/omaha_request_builder_xml.cc +++ b/omaha_request_builder_xml.cc @@ -154,6 +154,11 @@ string OmahaRequestBuilderXml::GetAppBody(const OmahaAppData& app_data) const { app_body += " rollback_allowed=\"true\""; } } + if (!params_->lts_tag().empty()) { + app_body += base::StringPrintf( + " ltstag=\"%s\"", + XmlEncodeWithDefault(params_->lts_tag()).c_str()); + } app_body += ">\n"; } diff --git a/omaha_request_params.h b/omaha_request_params.h index 76fc8060..aad92903 100644 --- a/omaha_request_params.h +++ b/omaha_request_params.h @@ -148,6 +148,10 @@ class OmahaRequestParams { return target_version_prefix_; } + inline std::string lts_tag() const { return lts_tag_; } + + inline void set_lts_tag(const std::string& hint) { lts_tag_ = hint; } + inline void set_rollback_allowed(bool rollback_allowed) { rollback_allowed_ = rollback_allowed; } @@ -367,6 +371,9 @@ class OmahaRequestParams { // changed and cancel the current download attempt. std::string download_channel_; + // The value defining the parameters of the LTS (Long Term Support). + std::string lts_tag_; + std::string hwid_; // Hardware Qualification ID of the client std::string fw_version_; // Chrome OS Firmware Version. std::string ec_version_; // Chrome OS EC Version. diff --git a/omaha_request_params_unittest.cc b/omaha_request_params_unittest.cc index bfcbc328..110fb2bd 100644 --- a/omaha_request_params_unittest.cc +++ b/omaha_request_params_unittest.cc @@ -236,6 +236,13 @@ TEST_F(OmahaRequestParamsTest, ToMoreStableChannelFlagTest) { EXPECT_FALSE(params_.ToMoreStableChannel()); } +TEST_F(OmahaRequestParamsTest, TargetChannelHintTest) { + EXPECT_TRUE(params_.Init("", "", false)); + const string kHint("foo-hint"); + params_.set_lts_tag(kHint); + EXPECT_EQ(kHint, params_.lts_tag()); +} + TEST_F(OmahaRequestParamsTest, ShouldPowerwashTest) { params_.mutable_image_props_.is_powerwash_allowed = false; EXPECT_FALSE(params_.ShouldPowerwash()); diff --git a/update_attempter.cc b/update_attempter.cc index 60c2c36c..0f0605d5 100644 --- a/update_attempter.cc +++ b/update_attempter.cc @@ -247,6 +247,7 @@ void UpdateAttempter::ReportOSAge() { void UpdateAttempter::Update(const string& app_version, const string& omaha_url, const string& target_channel, + const string& lts_tag, const string& target_version_prefix, bool rollback_allowed, bool rollback_data_save_requested, @@ -284,6 +285,7 @@ void UpdateAttempter::Update(const string& app_version, if (!CalculateUpdateParams(app_version, omaha_url, target_channel, + lts_tag, target_version_prefix, rollback_allowed, rollback_data_save_requested, @@ -359,6 +361,7 @@ void UpdateAttempter::CalculateP2PParams(bool interactive) { bool UpdateAttempter::CalculateUpdateParams(const string& app_version, const string& omaha_url, const string& target_channel, + const string& lts_tag, const string& target_version_prefix, bool rollback_allowed, bool rollback_data_save_requested, @@ -378,6 +381,9 @@ bool UpdateAttempter::CalculateUpdateParams(const string& app_version, // Update the target version prefix. omaha_request_params_->set_target_version_prefix(target_version_prefix); + // Update the LTS support. + omaha_request_params_->set_lts_tag(lts_tag); + // Set whether rollback is allowed. omaha_request_params_->set_rollback_allowed(rollback_allowed); @@ -1103,6 +1109,7 @@ void UpdateAttempter::OnUpdateScheduled(EvalStatus status, Update(forced_app_version_, forced_omaha_url_, params.target_channel, + params.lts_tag, params.target_version_prefix, params.rollback_allowed, params.rollback_data_save_requested, diff --git a/update_attempter.h b/update_attempter.h index dd958f54..abd0bd4a 100644 --- a/update_attempter.h +++ b/update_attempter.h @@ -85,6 +85,7 @@ class UpdateAttempter : public ActionProcessorDelegate, virtual void Update(const std::string& app_version, const std::string& omaha_url, const std::string& target_channel, + const std::string& lts_tag, const std::string& target_version_prefix, bool rollback_allowed, bool rollback_data_save_requested, @@ -293,6 +294,7 @@ class UpdateAttempter : public ActionProcessorDelegate, FRIEND_TEST(UpdateAttempterTest, SessionIdTestOnOmahaRequestActions); FRIEND_TEST(UpdateAttempterTest, SetRollbackHappenedNotRollback); FRIEND_TEST(UpdateAttempterTest, SetRollbackHappenedRollback); + FRIEND_TEST(UpdateAttempterTest, TargetChannelHintSetAndReset); FRIEND_TEST(UpdateAttempterTest, TargetVersionPrefixSetAndReset); FRIEND_TEST(UpdateAttempterTest, UpdateAfterInstall); FRIEND_TEST(UpdateAttempterTest, UpdateAttemptFlagsCachedAtUpdateStart); @@ -369,6 +371,7 @@ class UpdateAttempter : public ActionProcessorDelegate, bool CalculateUpdateParams(const std::string& app_version, const std::string& omaha_url, const std::string& target_channel, + const std::string& lts_tag, const std::string& target_version_prefix, bool rollback_allowed, bool rollback_data_save_requested, diff --git a/update_attempter_unittest.cc b/update_attempter_unittest.cc index 0086dd5d..7466aba5 100644 --- a/update_attempter_unittest.cc +++ b/update_attempter_unittest.cc @@ -174,6 +174,7 @@ class UpdateAttempterUnderTest : public UpdateAttempter { void Update(const std::string& app_version, const std::string& omaha_url, const std::string& target_channel, + const std::string& lts_tag, const std::string& target_version_prefix, bool rollback_allowed, bool rollback_data_save_requested, @@ -185,6 +186,7 @@ class UpdateAttempterUnderTest : public UpdateAttempter { UpdateAttempter::Update(app_version, omaha_url, target_channel, + lts_tag, target_version_prefix, rollback_allowed, rollback_data_save_requested, @@ -425,7 +427,7 @@ void UpdateAttempterTest::ScheduleQuitMainLoop() { void UpdateAttempterTest::SessionIdTestChange() { EXPECT_NE(UpdateStatus::UPDATED_NEED_REBOOT, attempter_.status()); const auto old_session_id = attempter_.session_id_; - attempter_.Update("", "", "", "", false, false, 0, false, false); + attempter_.Update("", "", "", "", "", false, false, 0, false, false); EXPECT_NE(old_session_id, attempter_.session_id_); ScheduleQuitMainLoop(); } @@ -795,7 +797,7 @@ void UpdateAttempterTest::UpdateTestStart() { EXPECT_CALL(*processor_, StartProcessing()); } - attempter_.Update("", "", "", "", false, false, 0, false, false); + attempter_.Update("", "", "", "", "", false, false, 0, false, false); loop_.PostTask(FROM_HERE, base::Bind(&UpdateAttempterTest::UpdateTestVerify, base::Unretained(this))); @@ -995,7 +997,7 @@ void UpdateAttempterTest::P2PNotEnabledStart() { fake_system_state_.set_p2p_manager(&mock_p2p_manager); mock_p2p_manager.fake().SetP2PEnabled(false); EXPECT_CALL(mock_p2p_manager, PerformHousekeeping()).Times(0); - attempter_.Update("", "", "", "", false, false, 0, false, false); + attempter_.Update("", "", "", "", "", false, false, 0, false, false); EXPECT_FALSE(actual_using_p2p_for_downloading_); EXPECT_FALSE(actual_using_p2p_for_sharing()); ScheduleQuitMainLoop(); @@ -1017,7 +1019,7 @@ void UpdateAttempterTest::P2PEnabledStartingFailsStart() { mock_p2p_manager.fake().SetEnsureP2PRunningResult(false); mock_p2p_manager.fake().SetPerformHousekeepingResult(false); EXPECT_CALL(mock_p2p_manager, PerformHousekeeping()).Times(0); - attempter_.Update("", "", "", "", false, false, 0, false, false); + attempter_.Update("", "", "", "", "", false, false, 0, false, false); EXPECT_FALSE(actual_using_p2p_for_downloading()); EXPECT_FALSE(actual_using_p2p_for_sharing()); ScheduleQuitMainLoop(); @@ -1040,7 +1042,7 @@ void UpdateAttempterTest::P2PEnabledHousekeepingFailsStart() { mock_p2p_manager.fake().SetEnsureP2PRunningResult(true); mock_p2p_manager.fake().SetPerformHousekeepingResult(false); EXPECT_CALL(mock_p2p_manager, PerformHousekeeping()); - attempter_.Update("", "", "", "", false, false, 0, false, false); + attempter_.Update("", "", "", "", "", false, false, 0, false, false); EXPECT_FALSE(actual_using_p2p_for_downloading()); EXPECT_FALSE(actual_using_p2p_for_sharing()); ScheduleQuitMainLoop(); @@ -1062,7 +1064,7 @@ void UpdateAttempterTest::P2PEnabledStart() { mock_p2p_manager.fake().SetEnsureP2PRunningResult(true); mock_p2p_manager.fake().SetPerformHousekeepingResult(true); EXPECT_CALL(mock_p2p_manager, PerformHousekeeping()); - attempter_.Update("", "", "", "", false, false, 0, false, false); + attempter_.Update("", "", "", "", "", false, false, 0, false, false); EXPECT_TRUE(actual_using_p2p_for_downloading()); EXPECT_TRUE(actual_using_p2p_for_sharing()); ScheduleQuitMainLoop(); @@ -1089,6 +1091,7 @@ void UpdateAttempterTest::P2PEnabledInteractiveStart() { "", "", "", + "", false, false, /*rollback_allowed_milestones=*/0, @@ -1123,7 +1126,7 @@ void UpdateAttempterTest::ReadScatterFactorFromPolicyTestStart() { attempter_.policy_provider_.reset( new policy::PolicyProvider(std::move(device_policy))); - attempter_.Update("", "", "", "", false, false, 0, false, false); + attempter_.Update("", "", "", "", "", false, false, 0, false, false); EXPECT_EQ(scatter_factor_in_seconds, attempter_.scatter_factor_.InSeconds()); ScheduleQuitMainLoop(); @@ -1161,7 +1164,7 @@ void UpdateAttempterTest::DecrementUpdateCheckCountTestStart() { attempter_.policy_provider_.reset( new policy::PolicyProvider(std::move(device_policy))); - attempter_.Update("", "", "", "", false, false, 0, false, false); + attempter_.Update("", "", "", "", "", false, false, 0, false, false); EXPECT_EQ(scatter_factor_in_seconds, attempter_.scatter_factor_.InSeconds()); // Make sure the file still exists. @@ -1177,7 +1180,7 @@ void UpdateAttempterTest::DecrementUpdateCheckCountTestStart() { // However, if the count is already 0, it's not decremented. Test that. initial_value = 0; EXPECT_TRUE(fake_prefs.SetInt64(kPrefsUpdateCheckCount, initial_value)); - attempter_.Update("", "", "", "", false, false, 0, false, false); + attempter_.Update("", "", "", "", "", false, false, 0, false, false); EXPECT_TRUE(fake_prefs.Exists(kPrefsUpdateCheckCount)); EXPECT_TRUE(fake_prefs.GetInt64(kPrefsUpdateCheckCount, &new_value)); EXPECT_EQ(initial_value, new_value); @@ -1228,6 +1231,7 @@ void UpdateAttempterTest::NoScatteringDoneDuringManualUpdateTestStart() { "", "", "", + "", false, false, /*rollback_allowed_milestones=*/0, @@ -1284,7 +1288,7 @@ void UpdateAttempterTest::StagingSetsPrefsAndTurnsOffScatteringStart() { FakePrefs fake_prefs; SetUpStagingTest(kValidStagingSchedule, &fake_prefs); - attempter_.Update("", "", "", "", false, false, 0, false, false); + attempter_.Update("", "", "", "", "", false, false, 0, false, false); // Check that prefs have the correct values. int64_t update_count; EXPECT_TRUE(fake_prefs.GetInt64(kPrefsUpdateCheckCount, &update_count)); @@ -1342,7 +1346,7 @@ void UpdateAttempterTest::StagingOffIfInteractiveStart() { SetUpStagingTest(kValidStagingSchedule, &fake_prefs); attempter_.Update( - "", "", "", "", false, false, 0, false, /* interactive = */ true); + "", "", "", "", "", false, false, 0, false, /* interactive = */ true); CheckStagingOff(); ScheduleQuitMainLoop(); @@ -1363,7 +1367,7 @@ void UpdateAttempterTest::StagingOffIfOobeStart() { SetUpStagingTest(kValidStagingSchedule, &fake_prefs); attempter_.Update( - "", "", "", "", false, false, 0, false, /* interactive = */ true); + "", "", "", "", "", false, false, 0, false, /* interactive = */ true); CheckStagingOff(); ScheduleQuitMainLoop(); @@ -1692,18 +1696,29 @@ TEST_F(UpdateAttempterTest, UpdateAfterInstall) { TEST_F(UpdateAttempterTest, TargetVersionPrefixSetAndReset) { attempter_.CalculateUpdateParams( - "", "", "", "1234", false, false, 4, false, false); + "", "", "", "", "1234", false, false, 4, false, false); EXPECT_EQ("1234", fake_system_state_.request_params()->target_version_prefix()); attempter_.CalculateUpdateParams( - "", "", "", "", false, 4, false, false, false); + "", "", "", "", "", false, 4, false, false, false); EXPECT_TRUE( fake_system_state_.request_params()->target_version_prefix().empty()); } +TEST_F(UpdateAttempterTest, TargetChannelHintSetAndReset) { + attempter_.CalculateUpdateParams( + "", "", "", "hint", "", false, false, 4, false, false); + EXPECT_EQ("hint", fake_system_state_.request_params()->lts_tag()); + + attempter_.CalculateUpdateParams( + "", "", "", "", "", false, 4, false, false, false); + EXPECT_TRUE(fake_system_state_.request_params()->lts_tag().empty()); +} + TEST_F(UpdateAttempterTest, RollbackAllowedSetAndReset) { attempter_.CalculateUpdateParams("", + "", "", "", "1234", @@ -1717,6 +1732,7 @@ TEST_F(UpdateAttempterTest, RollbackAllowedSetAndReset) { fake_system_state_.request_params()->rollback_allowed_milestones()); attempter_.CalculateUpdateParams("", + "", "", "", "1234", @@ -1844,7 +1860,7 @@ void UpdateAttempterTest::ResetRollbackHappenedStart(bool is_consumer, SetRollbackHappened(false)) .Times(expected_reset ? 1 : 0); attempter_.policy_provider_ = std::move(mock_policy_provider); - attempter_.Update("", "", "", "", false, false, 0, false, false); + attempter_.Update("", "", "", "", "", false, false, 0, false, false); ScheduleQuitMainLoop(); } @@ -2185,7 +2201,7 @@ void UpdateAttempterTest::UpdateToQuickFixBuildStart(bool set_token) { .WillOnce(Return(false)); attempter_.policy_provider_.reset( new policy::PolicyProvider(std::move(device_policy))); - attempter_.Update("", "", "", "", false, false, 0, false, false); + attempter_.Update("", "", "", "", "", false, false, 0, false, false); EXPECT_EQ(token, attempter_.omaha_request_params_->autoupdate_token()); ScheduleQuitMainLoop(); diff --git a/update_manager/android_things_policy.cc b/update_manager/android_things_policy.cc index a76ea482..6362a73d 100644 --- a/update_manager/android_things_policy.cc +++ b/update_manager/android_things_policy.cc @@ -58,6 +58,7 @@ EvalStatus AndroidThingsPolicy::UpdateCheckAllowed( // Set the default return values. result->updates_enabled = true; result->target_channel.clear(); + result->lts_tag.clear(); result->target_version_prefix.clear(); result->rollback_allowed = false; result->rollback_data_save_requested = false; diff --git a/update_manager/chromeos_policy.cc b/update_manager/chromeos_policy.cc index c310e421..a4926f43 100644 --- a/update_manager/chromeos_policy.cc +++ b/update_manager/chromeos_policy.cc @@ -215,6 +215,7 @@ EvalStatus ChromeOSPolicy::UpdateCheckAllowed(EvaluationContext* ec, // Set the default return values. result->updates_enabled = true; result->target_channel.clear(); + result->lts_tag.clear(); result->target_version_prefix.clear(); result->rollback_allowed = false; result->rollback_allowed_milestones = -1; diff --git a/update_manager/chromeos_policy_unittest.cc b/update_manager/chromeos_policy_unittest.cc index 414ac0d0..f4ad165c 100644 --- a/update_manager/chromeos_policy_unittest.cc +++ b/update_manager/chromeos_policy_unittest.cc @@ -262,6 +262,8 @@ TEST_F(UmChromeOSPolicyTest, UpdateCheckAllowedWithAttributes) { new bool(false)); fake_state_.device_policy_provider()->var_release_channel()->reset( new string("foo-channel")); + fake_state_.device_policy_provider()->var_release_lts_tag()->reset( + new string("foo-hint")); UpdateCheckParams result; ExpectPolicyStatus( @@ -270,6 +272,7 @@ TEST_F(UmChromeOSPolicyTest, UpdateCheckAllowedWithAttributes) { EXPECT_EQ("1.2", result.target_version_prefix); EXPECT_EQ(5, result.rollback_allowed_milestones); EXPECT_EQ("foo-channel", result.target_channel); + EXPECT_EQ("foo-hint", result.lts_tag); EXPECT_FALSE(result.interactive); } diff --git a/update_manager/default_policy.cc b/update_manager/default_policy.cc index 81ab795d..cc13c441 100644 --- a/update_manager/default_policy.cc +++ b/update_manager/default_policy.cc @@ -40,6 +40,7 @@ EvalStatus DefaultPolicy::UpdateCheckAllowed(EvaluationContext* ec, UpdateCheckParams* result) const { result->updates_enabled = true; result->target_channel.clear(); + result->lts_tag.clear(); result->target_version_prefix.clear(); result->rollback_allowed = false; result->rollback_allowed_milestones = -1; // No version rolls should happen. diff --git a/update_manager/device_policy_provider.h b/update_manager/device_policy_provider.h index b68fe964..d63c416e 100644 --- a/update_manager/device_policy_provider.h +++ b/update_manager/device_policy_provider.h @@ -44,6 +44,8 @@ class DevicePolicyProvider : public Provider { virtual Variable* var_release_channel_delegated() = 0; + virtual Variable* var_release_lts_tag() = 0; + virtual Variable* var_update_disabled() = 0; virtual Variable* var_target_version_prefix() = 0; diff --git a/update_manager/enterprise_device_policy_impl.cc b/update_manager/enterprise_device_policy_impl.cc index dea38bad..fed50a94 100644 --- a/update_manager/enterprise_device_policy_impl.cc +++ b/update_manager/enterprise_device_policy_impl.cc @@ -126,6 +126,12 @@ EvalStatus EnterpriseDevicePolicyImpl::UpdateCheckAllowed( if (release_channel_p) result->target_channel = *release_channel_p; } + + const string* release_lts_tag_p = + ec->GetValue(dp_provider->var_release_lts_tag()); + if (release_lts_tag_p) { + result->lts_tag = *release_lts_tag_p; + } } return EvalStatus::kContinue; } diff --git a/update_manager/fake_device_policy_provider.h b/update_manager/fake_device_policy_provider.h index 86bdef1e..352e51eb 100644 --- a/update_manager/fake_device_policy_provider.h +++ b/update_manager/fake_device_policy_provider.h @@ -42,6 +42,10 @@ class FakeDevicePolicyProvider : public DevicePolicyProvider { return &var_release_channel_delegated_; } + FakeVariable* var_release_lts_tag() override { + return &var_release_lts_tag_; + } + FakeVariable* var_update_disabled() override { return &var_update_disabled_; } @@ -98,6 +102,8 @@ class FakeDevicePolicyProvider : public DevicePolicyProvider { kVariableModePoll}; FakeVariable var_release_channel_delegated_{"release_channel_delegated", kVariableModePoll}; + FakeVariable var_release_lts_tag_{"release_lts_tag", + kVariableModePoll}; FakeVariable var_update_disabled_{"update_disabled", kVariableModePoll}; FakeVariable var_target_version_prefix_{"target_version_prefix", kVariableModePoll}; diff --git a/update_manager/policy.h b/update_manager/policy.h index 844a4d0a..9194c38c 100644 --- a/update_manager/policy.h +++ b/update_manager/policy.h @@ -60,6 +60,8 @@ struct UpdateCheckParams { int rollback_allowed_milestones; // A target channel, if so imposed by policy; otherwise, an empty string. std::string target_channel; + // Specifies if the channel hint, e.g. LTS (Long Term Support) updates. + std::string lts_tag; // Whether the allowed update is interactive (user-initiated) or periodic. bool interactive; diff --git a/update_manager/real_device_policy_provider.cc b/update_manager/real_device_policy_provider.cc index 55232f3e..bd9d415b 100644 --- a/update_manager/real_device_policy_provider.cc +++ b/update_manager/real_device_policy_provider.cc @@ -220,6 +220,7 @@ void RealDevicePolicyProvider::RefreshDevicePolicy() { UpdateVariable(&var_release_channel_, &DevicePolicy::GetReleaseChannel); UpdateVariable(&var_release_channel_delegated_, &DevicePolicy::GetReleaseChannelDelegated); + UpdateVariable(&var_release_lts_tag_, &DevicePolicy::GetReleaseLtsTag); UpdateVariable(&var_update_disabled_, &DevicePolicy::GetUpdateDisabled); UpdateVariable(&var_target_version_prefix_, &DevicePolicy::GetTargetVersionPrefix); diff --git a/update_manager/real_device_policy_provider.h b/update_manager/real_device_policy_provider.h index 9da052d8..e6df18cb 100644 --- a/update_manager/real_device_policy_provider.h +++ b/update_manager/real_device_policy_provider.h @@ -64,6 +64,10 @@ class RealDevicePolicyProvider : public DevicePolicyProvider { return &var_release_channel_delegated_; } + Variable* var_release_lts_tag() override { + return &var_release_lts_tag_; + } + Variable* var_update_disabled() override { return &var_update_disabled_; } @@ -191,6 +195,7 @@ class RealDevicePolicyProvider : public DevicePolicyProvider { AsyncCopyVariable var_release_channel_{"release_channel"}; AsyncCopyVariable var_release_channel_delegated_{ "release_channel_delegated"}; + AsyncCopyVariable var_release_lts_tag_{"release_lts_tag"}; AsyncCopyVariable var_update_disabled_{"update_disabled"}; AsyncCopyVariable var_target_version_prefix_{ "target_version_prefix"}; diff --git a/update_manager/real_device_policy_provider_unittest.cc b/update_manager/real_device_policy_provider_unittest.cc index 84debd11..1384e6f1 100644 --- a/update_manager/real_device_policy_provider_unittest.cc +++ b/update_manager/real_device_policy_provider_unittest.cc @@ -177,6 +177,7 @@ TEST_F(UmRealDevicePolicyProviderTest, NonExistentDevicePolicyEmptyVariables) { UmTestUtils::ExpectVariableNotSet(provider_->var_release_channel()); UmTestUtils::ExpectVariableNotSet(provider_->var_release_channel_delegated()); + UmTestUtils::ExpectVariableNotSet(provider_->var_release_lts_tag()); UmTestUtils::ExpectVariableNotSet(provider_->var_update_disabled()); UmTestUtils::ExpectVariableNotSet(provider_->var_target_version_prefix()); UmTestUtils::ExpectVariableNotSet( From 612da762b2cee5cf942b22d63cb41598b3e8a65c Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Fri, 14 Aug 2020 13:27:33 -0700 Subject: [PATCH 360/624] update_engine: Remove check for deprecated values Build gives warning for these deprecated values in protobuf. We don't need to test for the existence of these fields anyway because if they exist, we just ignore them. BUG=chromium:163075733 TEST=None Change-Id: I6e39a521c39d6398ca096cc43219aaef7bd628de Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2357491 Tested-by: Amin Hassani Reviewed-by: Jae Hoon Kim --- payload_consumer/delta_performer.cc | 8 -------- 1 file changed, 8 deletions(-) diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc index 11cf0069..95dfbcc7 100644 --- a/payload_consumer/delta_performer.cc +++ b/payload_consumer/delta_performer.cc @@ -1475,14 +1475,6 @@ ErrorCode DeltaPerformer::ValidateManifest() { } } - if (manifest_.has_old_rootfs_info() || manifest_.has_new_rootfs_info() || - manifest_.has_old_kernel_info() || manifest_.has_new_kernel_info() || - manifest_.install_operations_size() != 0 || - manifest_.kernel_install_operations_size() != 0) { - LOG(ERROR) << "Manifest contains deprecated fields."; - return ErrorCode::kPayloadMismatchedType; - } - if (manifest_.max_timestamp() < hardware_->GetBuildTimestamp()) { LOG(ERROR) << "The current OS build timestamp (" << hardware_->GetBuildTimestamp() From 8d6df9ac7a70f4b07ebb86f50fb3548b693acad5 Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Thu, 13 Aug 2020 13:59:54 -0700 Subject: [PATCH 361/624] Check allocatable space correctly when sideloading on VAB On a device with Virtual A/B, when sideloading and there's not enough space in super partition to hold CoW, update_engine falls back to overwriting all source partitions. In that case, the allocatable space should be the whole super partition, not a half of it. Also update doc comments. Test: unit test. RecoveryErrorShouldDeleteSource fails without the patch but succeeds with the patch. Bug: 163613538 Change-Id: I6bd6895a7eabeb4e8436e57b0ac6830c11d1e98f --- dynamic_partition_control_android.cc | 12 +++++++++++- dynamic_partition_control_android.h | 7 +++++-- dynamic_partition_control_android_unittest.cc | 18 ++++++++++++------ dynamic_partition_test_utils.h | 6 ++++-- 4 files changed, 32 insertions(+), 11 deletions(-) diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc index aa0f393c..ccb99ba4 100644 --- a/dynamic_partition_control_android.cc +++ b/dynamic_partition_control_android.cc @@ -838,6 +838,11 @@ bool DynamicPartitionControlAndroid::UpdatePartitionMetadata( MetadataBuilder* builder, uint32_t target_slot, const DeltaArchiveManifest& manifest) { + // Check preconditions. + CHECK(!GetVirtualAbFeatureFlag().IsEnabled() || IsRecovery()) + << "UpdatePartitionMetadata is called on a Virtual A/B device " + "but source partitions is not deleted. This is not allowed."; + // If applying downgrade from Virtual A/B to non-Virtual A/B, the left-over // COW group needs to be deleted to ensure there are enough space to create // target partitions. @@ -853,7 +858,12 @@ bool DynamicPartitionControlAndroid::UpdatePartitionMetadata( std::string expr; uint64_t allocatable_space = builder->AllocatableSpace(); - if (!GetDynamicPartitionsFeatureFlag().IsRetrofit()) { + // On device retrofitting dynamic partitions, allocatable_space = super. + // On device launching dynamic partitions w/o VAB, + // allocatable_space = super / 2. + // On device launching dynamic partitions with VAB, allocatable_space = super. + if (!GetDynamicPartitionsFeatureFlag().IsRetrofit() && + !GetVirtualAbFeatureFlag().IsEnabled()) { allocatable_space /= 2; expr = "half of "; } diff --git a/dynamic_partition_control_android.h b/dynamic_partition_control_android.h index 9ee85db6..49967f6c 100644 --- a/dynamic_partition_control_android.h +++ b/dynamic_partition_control_android.h @@ -203,8 +203,11 @@ class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { bool force_writable, std::string* path); - // Update |builder| according to |partition_metadata|, assuming the device - // does not have Virtual A/B. + // Update |builder| according to |partition_metadata|. + // - In Android mode, this is only called when the device + // does not have Virtual A/B. + // - When sideloading, this maybe called as a fallback path if CoW cannot + // be created. bool UpdatePartitionMetadata(android::fs_mgr::MetadataBuilder* builder, uint32_t target_slot, const DeltaArchiveManifest& manifest); diff --git a/dynamic_partition_control_android_unittest.cc b/dynamic_partition_control_android_unittest.cc index 4154b36c..223e177d 100644 --- a/dynamic_partition_control_android_unittest.cc +++ b/dynamic_partition_control_android_unittest.cc @@ -113,21 +113,24 @@ class DynamicPartitionControlAndroidTest : public ::testing::Test { // |slot|. void SetMetadata(uint32_t slot, const PartitionSuffixSizes& sizes, - uint32_t partition_attr = 0) { + uint32_t partition_attr = 0, + uint64_t super_size = kDefaultSuperSize) { EXPECT_CALL(dynamicControl(), LoadMetadataBuilder(GetSuperDevice(slot), slot)) .Times(AnyNumber()) - .WillRepeatedly(Invoke([sizes, partition_attr](auto, auto) { + .WillRepeatedly(Invoke([=](auto, auto) { return NewFakeMetadata(PartitionSuffixSizesToManifest(sizes), - partition_attr); + partition_attr, + super_size); })); EXPECT_CALL(dynamicControl(), LoadMetadataBuilder(GetSuperDevice(slot), slot, _)) .Times(AnyNumber()) - .WillRepeatedly(Invoke([sizes, partition_attr](auto, auto, auto) { + .WillRepeatedly(Invoke([=](auto, auto, auto) { return NewFakeMetadata(PartitionSuffixSizesToManifest(sizes), - partition_attr); + partition_attr, + super_size); })); } @@ -1006,8 +1009,11 @@ TEST_P(SnapshotPartitionTestP, RecoveryErrorShouldDeleteSource) { return dynamicControl().RealPrepareDynamicPartitionsForUpdate( source_slot, target_slot, manifest, delete_source); })); + // Only one slot of space in super + uint64_t super_size = kDefaultGroupSize + 1_MiB; // Expectation on PrepareDynamicPartitionsForUpdate - SetMetadata(source(), {{S("system"), 2_GiB}, {S("vendor"), 1_GiB}}); + SetMetadata( + source(), {{S("system"), 2_GiB}, {S("vendor"), 1_GiB}}, 0, super_size); ExpectUnmap({T("system"), T("vendor")}); // Expect that the source partitions aren't present in target super metadata. ExpectStoreMetadata({{T("system"), 3_GiB}, {T("vendor"), 1_GiB}}); diff --git a/dynamic_partition_test_utils.h b/dynamic_partition_test_utils.h index 70a176b5..d701dce8 100644 --- a/dynamic_partition_test_utils.h +++ b/dynamic_partition_test_utils.h @@ -175,9 +175,11 @@ inline DeltaArchiveManifest PartitionSizesToManifest( } inline std::unique_ptr NewFakeMetadata( - const DeltaArchiveManifest& manifest, uint32_t partition_attr = 0) { + const DeltaArchiveManifest& manifest, + uint32_t partition_attr = 0, + uint64_t super_size = kDefaultSuperSize) { auto builder = - MetadataBuilder::New(kDefaultSuperSize, kFakeMetadataSize, kMaxNumSlots); + MetadataBuilder::New(super_size, kFakeMetadataSize, kMaxNumSlots); for (const auto& group : manifest.dynamic_partition_metadata().groups()) { EXPECT_TRUE(builder->AddGroup(group.name(), group.size())); for (const auto& partition_name : group.partition_names()) { From 9e7a6db36721dc14a17da32394e8c0e5b234e159 Mon Sep 17 00:00:00 2001 From: Kelvin Zhang Date: Thu, 13 Aug 2020 14:55:58 -0400 Subject: [PATCH 362/624] Update payload.py to work with OTA packages directly Test: python3 payload_info.py some_ota.zip Change-Id: Id688e87d4712f2f750320eb0cd86c6a2cc263313 --- scripts/update_payload/payload.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/scripts/update_payload/payload.py b/scripts/update_payload/payload.py index ea5ed308..78b8e2ca 100644 --- a/scripts/update_payload/payload.py +++ b/scripts/update_payload/payload.py @@ -20,7 +20,9 @@ from __future__ import print_function import hashlib +import io import struct +import zipfile from update_payload import applier from update_payload import checker @@ -119,6 +121,10 @@ def __init__(self, payload_file, payload_file_offset=0): payload_file: update payload file object open for reading payload_file_offset: the offset of the actual payload """ + if zipfile.is_zipfile(payload_file): + with zipfile.ZipFile(payload_file) as zfp: + with zfp.open("payload.bin") as payload_fp: + payload_file = io.BytesIO(payload_fp.read()) self.payload_file = payload_file self.payload_file_offset = payload_file_offset self.manifest_hasher = None From 1f3f3b012546bb54ae07c4a72df51d13adc9981d Mon Sep 17 00:00:00 2001 From: David Anderson Date: Mon, 17 Aug 2020 12:04:23 -0700 Subject: [PATCH 363/624] Expose extent_ranges for libsnapshot to use in resolving merge sequences. Bug: 162274240 Test: builds Change-Id: If8a1bbf99f9195d69b375021b59a7213935924b9 --- Android.bp | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/Android.bp b/Android.bp index b6ee4769..a2e4ba04 100644 --- a/Android.bp +++ b/Android.bp @@ -468,6 +468,20 @@ cc_defaults { ], } +cc_library_static { + name: "libpayload_extent_ranges", + defaults: [ + "ue_defaults", + ], + host_supported: true, + srcs: [ + "payload_generator/extent_ranges.cc", + ], + static_libs: [ + "update_metadata-protos", + ], +} + cc_library_static { name: "libpayload_generator", defaults: [ @@ -737,4 +751,5 @@ cc_library_headers { apex_available: [ "com.android.gki.*", ], + host_supported: true, } From d719103441dcd39bcd0fa6fd1795e3783267b7bb Mon Sep 17 00:00:00 2001 From: Kelvin Zhang Date: Tue, 11 Aug 2020 10:48:16 -0400 Subject: [PATCH 364/624] Support per-partition timestamps update_engine is heading toward supporting partial updates, which an OTA update can update just a subset of all partitions. In this context, a single max_timestamp in OTA manifest is insufficient for checking potential downgrades, as different partitions can have different timestamps. This CL adds per-partition timestamp support on update_engine side. update_engine will accept a payload with per-partition timestamps and reject the update if any partition has an older timestamp. Changes made: 1. Add new version field to PartitionUpdate protobuf message. 2. Add new methods to HardwareInterface for fetching/checking timestamp of each partition. 3. Update delta_performer to invoke new APIs in 2 properly. 4. Add relevant testcases. Test: unittest Bug: 162553432 Change-Id: I767343e003fd35ce0d22197b15040488cf30be30 --- common/fake_hardware.h | 15 ++ common/hardware_interface.h | 13 ++ common/utils.cc | 27 ++- common/utils.h | 6 + common/utils_unittest.cc | 8 + hardware_android.cc | 24 +++ hardware_android.h | 8 +- hardware_chromeos.cc | 11 ++ hardware_chromeos.h | 4 + payload_consumer/delta_performer.cc | 57 +++++- payload_consumer/delta_performer.h | 10 ++ .../delta_performer_integration_test.cc | 167 +++++++++++++++--- payload_consumer/delta_performer_unittest.cc | 20 +++ scripts/update_payload/update_metadata_pb2.py | 77 ++++---- update_metadata.proto | 5 + 15 files changed, 384 insertions(+), 68 deletions(-) diff --git a/common/fake_hardware.h b/common/fake_hardware.h index 2a8e81de..30c08978 100644 --- a/common/fake_hardware.h +++ b/common/fake_hardware.h @@ -19,10 +19,12 @@ #include #include +#include #include #include "update_engine/common/hardware_interface.h" +#include "update_engine/common/utils.h" namespace chromeos_update_engine { @@ -207,6 +209,18 @@ class FakeHardware : public HardwareInterface { bool GetIsRollbackPowerwashScheduled() const { return powerwash_scheduled_ && save_rollback_data_; } + std::string GetVersionForLogging( + const std::string& partition_name) const override { + return partition_timestamps_[partition_name]; + } + void SetVersion(const std::string& partition_name, std::string timestamp) { + partition_timestamps_[partition_name] = std::move(timestamp); + } + bool IsPartitionUpdateValid(const std::string& partition_name, + const std::string& new_version) const override { + const auto old_version = GetVersionForLogging(partition_name); + return utils::IsTimestampNewer(old_version, new_version); + } private: bool is_official_build_{true}; @@ -230,6 +244,7 @@ class FakeHardware : public HardwareInterface { int64_t build_timestamp_{0}; bool first_active_omaha_ping_sent_{false}; bool warm_reset_{false}; + mutable std::map partition_timestamps_; DISALLOW_COPY_AND_ASSIGN(FakeHardware); }; diff --git a/common/hardware_interface.h b/common/hardware_interface.h index 4f0305fb..0fffbfb7 100644 --- a/common/hardware_interface.h +++ b/common/hardware_interface.h @@ -142,6 +142,19 @@ class HardwareInterface { // If |warm_reset| is true, sets the warm reset to indicate a warm reset is // needed on the next reboot. Otherwise, clears the flag. virtual void SetWarmReset(bool warm_reset) = 0; + + // Return the version/timestamp for partition `partition_name`. + // Don't make any assumption about the formatting of returned string. + // Only used for logging/debugging purposes. + virtual std::string GetVersionForLogging( + const std::string& partition_name) const = 0; + + // Return true if and only if `new_version` is "newer" than the + // version number of partition `partition_name`. The notion of + // "newer" is defined by this function. Caller should not make + // any assumption about the underlying logic. + virtual bool IsPartitionUpdateValid(const std::string& partition_name, + const std::string& new_version) const = 0; }; } // namespace chromeos_update_engine diff --git a/common/utils.cc b/common/utils.cc index 3e3d8302..bbb155f6 100644 --- a/common/utils.cc +++ b/common/utils.cc @@ -820,7 +820,7 @@ ErrorCode GetBaseErrorCode(ErrorCode code) { return base_code; } -string StringVectorToString(const vector &vec_str) { +string StringVectorToString(const vector& vec_str) { string str = "["; for (vector::const_iterator i = vec_str.begin(); i != vec_str.end(); ++i) { @@ -849,7 +849,7 @@ string CalculateP2PFileId(const brillo::Blob& payload_hash, encoded_hash.c_str()); } -bool ConvertToOmahaInstallDate(Time time, int *out_num_days) { +bool ConvertToOmahaInstallDate(Time time, int* out_num_days) { time_t unix_time = time.ToTimeT(); // Output of: date +"%s" --date="Jan 1, 2007 0:00 PST". const time_t kOmahaEpoch = 1167638400; @@ -982,6 +982,29 @@ string GetExclusionName(const string& str_to_convert) { return base::NumberToString(base::StringPieceHash()(str_to_convert)); } +static bool ParseTimestamp(const std::string& str, int64_t* out) { + if (!base::StringToInt64(str, out)) { + LOG(WARNING) << "Invalid timestamp: " << str; + return false; + } + return true; +} + +bool IsTimestampNewer(const std::string& old_version, + const std::string& new_version) { + if (old_version.empty() || new_version.empty()) { + LOG(WARNING) + << "One of old/new timestamp is empty, permit update anyway. Old: " + << old_version << " New: " << new_version; + return true; + } + int64_t old_ver = 0; + TEST_AND_RETURN_FALSE(ParseTimestamp(old_version, &old_ver)); + int64_t new_ver = 0; + TEST_AND_RETURN_FALSE(ParseTimestamp(new_version, &new_ver)); + return old_ver <= new_ver; +} + } // namespace utils } // namespace chromeos_update_engine diff --git a/common/utils.h b/common/utils.h index 23ac03d9..5dfee3bd 100644 --- a/common/utils.h +++ b/common/utils.h @@ -323,6 +323,12 @@ std::string GetTimeAsString(time_t utime); // with |Excluder| as the exclusion name. std::string GetExclusionName(const std::string& str_to_convert); +// Parse `old_version` and `new_version` as integer timestamps and +// return true if `new_version` is larger/newer. +// Returns true if either one is empty. Return false if +bool IsTimestampNewer(const std::string& old_version, + const std::string& new_version); + } // namespace utils // Utility class to close a file descriptor diff --git a/common/utils_unittest.cc b/common/utils_unittest.cc index ebcc548b..37871d2d 100644 --- a/common/utils_unittest.cc +++ b/common/utils_unittest.cc @@ -481,4 +481,12 @@ TEST(UtilsTest, GetFilePathTest) { IGNORE_EINTR(close(fd)); } +TEST(UtilsTest, ValidatePerPartitionTimestamp) { + ASSERT_FALSE(utils::IsTimestampNewer("10", "5")); + ASSERT_TRUE(utils::IsTimestampNewer("10", "11")); + ASSERT_FALSE(utils::IsTimestampNewer("10", "lol")); + ASSERT_FALSE(utils::IsTimestampNewer("lol", "ZZZ")); + ASSERT_TRUE(utils::IsTimestampNewer("10", "")); +} + } // namespace chromeos_update_engine diff --git a/hardware_android.cc b/hardware_android.cc index 0bf05e46..659e67e8 100644 --- a/hardware_android.cc +++ b/hardware_android.cc @@ -19,13 +19,17 @@ #include #include +#include +#include +#include #include #include #include #include "update_engine/common/hardware.h" #include "update_engine/common/platform_constants.h" +#include "update_engine/common/utils.h" using android::base::GetBoolProperty; using android::base::GetIntProperty; @@ -223,4 +227,24 @@ void HardwareAndroid::SetWarmReset(bool warm_reset) { } } +std::string HardwareAndroid::GetVersionForLogging( + const std::string& partition_name) const { + return android::base::GetProperty("ro." + partition_name + ".build.date.utc", + ""); +} + +bool HardwareAndroid::IsPartitionUpdateValid( + const std::string& partition_name, const std::string& new_version) const { + const auto old_version = GetVersionForLogging(partition_name); + // TODO(zhangkelvin) for some partitions, missing a current timestamp should + // be an error, e.g. system, vendor, product etc. + auto applicable = utils::IsTimestampNewer(old_version, new_version); + if (!applicable) { + LOG(ERROR) << "Timestamp on partition " << partition_name + << " is newer than update. Partition timestamp: " << old_version + << " Update timestamp: " << new_version; + } + return applicable; +} + } // namespace chromeos_update_engine diff --git a/hardware_android.h b/hardware_android.h index e0368f9a..2e55f97f 100644 --- a/hardware_android.h +++ b/hardware_android.h @@ -18,6 +18,7 @@ #define UPDATE_ENGINE_HARDWARE_ANDROID_H_ #include +#include #include #include @@ -28,7 +29,7 @@ namespace chromeos_update_engine { // Implements the real interface with the hardware in the Android platform. -class HardwareAndroid final : public HardwareInterface { +class HardwareAndroid : public HardwareInterface { public: HardwareAndroid() = default; ~HardwareAndroid() override = default; @@ -58,6 +59,11 @@ class HardwareAndroid final : public HardwareInterface { bool GetFirstActiveOmahaPingSent() const override; bool SetFirstActiveOmahaPingSent() override; void SetWarmReset(bool warm_reset) override; + [[nodiscard]] std::string GetVersionForLogging( + const std::string& partition_name) const override; + [[nodiscard]] bool IsPartitionUpdateValid( + const std::string& partition_name, + const std::string& new_version) const override; private: DISALLOW_COPY_AND_ASSIGN(HardwareAndroid); diff --git a/hardware_chromeos.cc b/hardware_chromeos.cc index 2a838305..58f30dba 100644 --- a/hardware_chromeos.cc +++ b/hardware_chromeos.cc @@ -384,4 +384,15 @@ bool HardwareChromeOS::SetFirstActiveOmahaPingSent() { void HardwareChromeOS::SetWarmReset(bool warm_reset) {} +std::string HardwareChromeOS::GetVersionForLogging( + const std::string& partition_name) const { + // TODO(zhangkelvin) Implement per-partition timestamp for Chrome OS. + return ""; +} +bool HardwareChromeOS::IsPartitionUpdateValid( + const std::string& partition_name, const std::string& new_version) const { + // TODO(zhangkelvin) Implement per-partition timestamp for Chrome OS. + return true; +} + } // namespace chromeos_update_engine diff --git a/hardware_chromeos.h b/hardware_chromeos.h index e14ae9a1..49fed88d 100644 --- a/hardware_chromeos.h +++ b/hardware_chromeos.h @@ -63,6 +63,10 @@ class HardwareChromeOS final : public HardwareInterface { bool GetFirstActiveOmahaPingSent() const override; bool SetFirstActiveOmahaPingSent() override; void SetWarmReset(bool warm_reset) override; + std::string GetVersionForLogging( + const std::string& partition_name) const override; + bool IsPartitionUpdateValid(const std::string& partition_name, + const std::string& new_version) const override; private: friend class HardwareChromeOSTest; diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc index 19d12970..aa0b4f56 100644 --- a/payload_consumer/delta_performer.cc +++ b/payload_consumer/delta_performer.cc @@ -1628,17 +1628,15 @@ ErrorCode DeltaPerformer::ValidateManifest() { LOG(ERROR) << "Manifest contains deprecated fields."; return ErrorCode::kPayloadMismatchedType; } - - if (manifest_.max_timestamp() < hardware_->GetBuildTimestamp()) { - LOG(ERROR) << "The current OS build timestamp (" - << hardware_->GetBuildTimestamp() - << ") is newer than the maximum timestamp in the manifest (" - << manifest_.max_timestamp() << ")"; + TimestampCheckResult result = CheckTimestampError(); + if (result == TimestampCheckResult::DOWNGRADE) { if (!hardware_->AllowDowngrade()) { return ErrorCode::kPayloadTimestampError; } LOG(INFO) << "The current OS build allows downgrade, continuing to apply" " the payload with an older timestamp."; + } else if (result == TimestampCheckResult::FAILURE) { + return ErrorCode::kPayloadTimestampError; } // TODO(crbug.com/37661) we should be adding more and more manifest checks, @@ -1647,6 +1645,53 @@ ErrorCode DeltaPerformer::ValidateManifest() { return ErrorCode::kSuccess; } +TimestampCheckResult DeltaPerformer::CheckTimestampError() const { + bool is_partial_update = + manifest_.has_partial_update() && manifest_.partial_update(); + const auto& partitions = manifest_.partitions(); + auto&& timestamp_valid = [this](const PartitionUpdate& partition) { + return hardware_->IsPartitionUpdateValid(partition.partition_name(), + partition.version()); + }; + if (is_partial_update) { + // for partial updates, all partition MUST have valid timestamps + // But max_timestamp can be empty + for (const auto& partition : partitions) { + if (!partition.has_version()) { + LOG(ERROR) + << "PartitionUpdate " << partition.partition_name() + << " does ot have a version field. Not allowed in partial updates."; + return TimestampCheckResult::FAILURE; + } + if (!timestamp_valid(partition)) { + // Warning because the system might allow downgrade. + LOG(WARNING) << "PartitionUpdate " << partition.partition_name() + << " has an older version than partition on device."; + return TimestampCheckResult::DOWNGRADE; + } + } + + return TimestampCheckResult::SUCCESS; + } + if (manifest_.max_timestamp() < hardware_->GetBuildTimestamp()) { + LOG(ERROR) << "The current OS build timestamp (" + << hardware_->GetBuildTimestamp() + << ") is newer than the maximum timestamp in the manifest (" + << manifest_.max_timestamp() << ")"; + return TimestampCheckResult::DOWNGRADE; + } + // Otherwise... partitions can have empty timestamps. + for (const auto& partition : partitions) { + if (partition.has_version() && !timestamp_valid(partition)) { + // Warning because the system might allow downgrade. + LOG(WARNING) << "PartitionUpdate " << partition.partition_name() + << " has an older version than partition on device."; + return TimestampCheckResult::DOWNGRADE; + } + } + return TimestampCheckResult::SUCCESS; +} + ErrorCode DeltaPerformer::ValidateOperationHash( const InstallOperation& operation) { if (!operation.data_sha256_hash().size()) { diff --git a/payload_consumer/delta_performer.h b/payload_consumer/delta_performer.h index 2d1768dd..0718ef60 100644 --- a/payload_consumer/delta_performer.h +++ b/payload_consumer/delta_performer.h @@ -49,6 +49,12 @@ class PrefsInterface; // This class performs the actions in a delta update synchronously. The delta // update itself should be passed in in chunks as it is received. +enum class TimestampCheckResult { + SUCCESS, + FAILURE, + DOWNGRADE, +}; + class DeltaPerformer : public FileWriter { public: // Defines the granularity of progress logging in terms of how many "completed @@ -310,6 +316,10 @@ class DeltaPerformer : public FileWriter { // Also see comment for the static PreparePartitionsForUpdate(). bool PreparePartitionsForUpdate(uint64_t* required_size); + // Check if current manifest contains timestamp errors. (ill-formed or + // downgrade) + TimestampCheckResult CheckTimestampError() const; + // Update Engine preference store. PrefsInterface* prefs_; diff --git a/payload_consumer/delta_performer_integration_test.cc b/payload_consumer/delta_performer_integration_test.cc index acbecad5..c257b284 100644 --- a/payload_consumer/delta_performer_integration_test.cc +++ b/payload_consumer/delta_performer_integration_test.cc @@ -36,9 +36,12 @@ #include "update_engine/common/constants.h" #include "update_engine/common/fake_boot_control.h" #include "update_engine/common/fake_hardware.h" +#include "update_engine/common/fake_prefs.h" #include "update_engine/common/mock_prefs.h" #include "update_engine/common/test_utils.h" #include "update_engine/common/utils.h" +#include "update_engine/hardware_android.h" +#include "update_engine/payload_consumer/install_plan.h" #include "update_engine/payload_consumer/mock_download_action.h" #include "update_engine/payload_consumer/payload_constants.h" #include "update_engine/payload_consumer/payload_metadata.h" @@ -125,7 +128,41 @@ enum OperationHashTest { } // namespace -class DeltaPerformerIntegrationTest : public ::testing::Test {}; +class DeltaPerformerIntegrationTest : public ::testing::Test { + public: + void RunManifestValidation(const DeltaArchiveManifest& manifest, + uint64_t major_version, + ErrorCode expected) { + FakePrefs prefs; + InstallPlan::Payload payload; + InstallPlan install_plan; + DeltaPerformer performer{&prefs, + nullptr, + &fake_hardware_, + nullptr, + &install_plan, + &payload, + false /* interactive*/}; + // Delta performer will treat manifest as kDelta payload + // if it's a partial update. + payload.type = manifest.partial_update() ? InstallPayloadType::kDelta + : InstallPayloadType::kFull; + + // The Manifest we are validating. + performer.manifest_.CopyFrom(manifest); + performer.major_payload_version_ = major_version; + + EXPECT_EQ(expected, performer.ValidateManifest()); + } + void AddPartition(DeltaArchiveManifest* manifest, + std::string name, + int timestamp) { + auto& partition = *manifest->add_partitions(); + partition.set_version(std::to_string(timestamp)); + partition.set_partition_name(name); + } + FakeHardware fake_hardware_; +}; static void CompareFilesByBlock(const string& a_file, const string& b_file, @@ -995,13 +1032,13 @@ void DoOperationHashMismatchTest(OperationHashTest op_hash_test, delete performer; } -TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageTest) { +TEST_F(DeltaPerformerIntegrationTest, RunAsRootSmallImageTest) { DoSmallImageTest( false, false, -1, kSignatureGenerator, false, kSourceMinorPayloadVersion); } -TEST(DeltaPerformerIntegrationTest, - RunAsRootSmallImageSignaturePlaceholderTest) { +TEST_F(DeltaPerformerIntegrationTest, + RunAsRootSmallImageSignaturePlaceholderTest) { DoSmallImageTest(false, false, -1, @@ -1010,8 +1047,8 @@ TEST(DeltaPerformerIntegrationTest, kSourceMinorPayloadVersion); } -TEST(DeltaPerformerIntegrationTest, - RunAsRootSmallImageSignaturePlaceholderMismatchTest) { +TEST_F(DeltaPerformerIntegrationTest, + RunAsRootSmallImageSignaturePlaceholderMismatchTest) { DeltaState state; GenerateDeltaFile(false, false, @@ -1021,7 +1058,7 @@ TEST(DeltaPerformerIntegrationTest, kSourceMinorPayloadVersion); } -TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageChunksTest) { +TEST_F(DeltaPerformerIntegrationTest, RunAsRootSmallImageChunksTest) { DoSmallImageTest(false, false, kBlockSize, @@ -1030,27 +1067,28 @@ TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageChunksTest) { kSourceMinorPayloadVersion); } -TEST(DeltaPerformerIntegrationTest, RunAsRootFullKernelSmallImageTest) { +TEST_F(DeltaPerformerIntegrationTest, RunAsRootFullKernelSmallImageTest) { DoSmallImageTest( true, false, -1, kSignatureGenerator, false, kSourceMinorPayloadVersion); } -TEST(DeltaPerformerIntegrationTest, RunAsRootFullSmallImageTest) { +TEST_F(DeltaPerformerIntegrationTest, RunAsRootFullSmallImageTest) { DoSmallImageTest( true, true, -1, kSignatureGenerator, true, kFullPayloadMinorVersion); } -TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignNoneTest) { +TEST_F(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignNoneTest) { DoSmallImageTest( false, false, -1, kSignatureNone, false, kSourceMinorPayloadVersion); } -TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignGeneratedTest) { +TEST_F(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignGeneratedTest) { DoSmallImageTest( false, false, -1, kSignatureGenerated, true, kSourceMinorPayloadVersion); } -TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignGeneratedShellTest) { +TEST_F(DeltaPerformerIntegrationTest, + RunAsRootSmallImageSignGeneratedShellTest) { DoSmallImageTest(false, false, -1, @@ -1059,8 +1097,8 @@ TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignGeneratedShellTest) { kSourceMinorPayloadVersion); } -TEST(DeltaPerformerIntegrationTest, - RunAsRootSmallImageSignGeneratedShellECKeyTest) { +TEST_F(DeltaPerformerIntegrationTest, + RunAsRootSmallImageSignGeneratedShellECKeyTest) { DoSmallImageTest(false, false, -1, @@ -1069,8 +1107,8 @@ TEST(DeltaPerformerIntegrationTest, kSourceMinorPayloadVersion); } -TEST(DeltaPerformerIntegrationTest, - RunAsRootSmallImageSignGeneratedShellBadKeyTest) { +TEST_F(DeltaPerformerIntegrationTest, + RunAsRootSmallImageSignGeneratedShellBadKeyTest) { DoSmallImageTest(false, false, -1, @@ -1079,8 +1117,8 @@ TEST(DeltaPerformerIntegrationTest, kSourceMinorPayloadVersion); } -TEST(DeltaPerformerIntegrationTest, - RunAsRootSmallImageSignGeneratedShellRotateCl1Test) { +TEST_F(DeltaPerformerIntegrationTest, + RunAsRootSmallImageSignGeneratedShellRotateCl1Test) { DoSmallImageTest(false, false, -1, @@ -1089,8 +1127,8 @@ TEST(DeltaPerformerIntegrationTest, kSourceMinorPayloadVersion); } -TEST(DeltaPerformerIntegrationTest, - RunAsRootSmallImageSignGeneratedShellRotateCl2Test) { +TEST_F(DeltaPerformerIntegrationTest, + RunAsRootSmallImageSignGeneratedShellRotateCl2Test) { DoSmallImageTest(false, false, -1, @@ -1099,14 +1137,97 @@ TEST(DeltaPerformerIntegrationTest, kSourceMinorPayloadVersion); } -TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSourceOpsTest) { +TEST_F(DeltaPerformerIntegrationTest, RunAsRootSmallImageSourceOpsTest) { DoSmallImageTest( false, false, -1, kSignatureGenerator, false, kSourceMinorPayloadVersion); } -TEST(DeltaPerformerIntegrationTest, - RunAsRootMandatoryOperationHashMismatchTest) { +TEST_F(DeltaPerformerIntegrationTest, + RunAsRootMandatoryOperationHashMismatchTest) { DoOperationHashMismatchTest(kInvalidOperationData, true); } +TEST_F(DeltaPerformerIntegrationTest, ValidatePerPartitionTimestampSuccess) { + // The Manifest we are validating. + DeltaArchiveManifest manifest; + + fake_hardware_.SetVersion("system", "5"); + fake_hardware_.SetVersion("product", "99"); + fake_hardware_.SetBuildTimestamp(1); + + manifest.set_minor_version(kFullPayloadMinorVersion); + manifest.set_max_timestamp(2); + AddPartition(&manifest, "system", 10); + AddPartition(&manifest, "product", 100); + + RunManifestValidation( + manifest, kMaxSupportedMajorPayloadVersion, ErrorCode::kSuccess); +} + +TEST_F(DeltaPerformerIntegrationTest, ValidatePerPartitionTimestampFailure) { + // The Manifest we are validating. + DeltaArchiveManifest manifest; + + fake_hardware_.SetVersion("system", "5"); + fake_hardware_.SetVersion("product", "99"); + fake_hardware_.SetBuildTimestamp(1); + + manifest.set_minor_version(kFullPayloadMinorVersion); + manifest.set_max_timestamp(2); + AddPartition(&manifest, "system", 10); + AddPartition(&manifest, "product", 98); + + RunManifestValidation(manifest, + kMaxSupportedMajorPayloadVersion, + ErrorCode::kPayloadTimestampError); +} + +TEST_F(DeltaPerformerIntegrationTest, + ValidatePerPartitionTimestampMissingTimestamp) { + // The Manifest we are validating. + DeltaArchiveManifest manifest; + + fake_hardware_.SetVersion("system", "5"); + fake_hardware_.SetVersion("product", "99"); + fake_hardware_.SetBuildTimestamp(1); + + manifest.set_minor_version(kFullPayloadMinorVersion); + manifest.set_max_timestamp(2); + AddPartition(&manifest, "system", 10); + { + auto& partition = *manifest.add_partitions(); + // For complete updates, missing timestamp should not trigger + // timestamp error. + partition.set_partition_name("product"); + } + + RunManifestValidation( + manifest, kMaxSupportedMajorPayloadVersion, ErrorCode::kSuccess); +} + +TEST_F(DeltaPerformerIntegrationTest, + ValidatePerPartitionTimestampPartialUpdate) { + // The Manifest we are validating. + DeltaArchiveManifest manifest; + + fake_hardware_.SetVersion("system", "5"); + fake_hardware_.SetVersion("product", "99"); + fake_hardware_.SetBuildTimestamp(1); + + manifest.set_minor_version(kPartialUpdateMinorPayloadVersion); + manifest.set_max_timestamp(2); + manifest.set_partial_update(true); + AddPartition(&manifest, "system", 10); + { + auto& partition = *manifest.add_partitions(); + // For partial updates, missing timestamp should + // trigger an error + partition.set_partition_name("product"); + } + + RunManifestValidation(manifest, + kMaxSupportedMajorPayloadVersion, + ErrorCode::kPayloadTimestampError); +} + } // namespace chromeos_update_engine diff --git a/payload_consumer/delta_performer_unittest.cc b/payload_consumer/delta_performer_unittest.cc index 44107cd3..fbd754f4 100644 --- a/payload_consumer/delta_performer_unittest.cc +++ b/payload_consumer/delta_performer_unittest.cc @@ -36,9 +36,11 @@ #include #include "update_engine/common/constants.h" +#include "update_engine/common/error_code.h" #include "update_engine/common/fake_boot_control.h" #include "update_engine/common/fake_hardware.h" #include "update_engine/common/fake_prefs.h" +#include "update_engine/common/hardware_interface.h" #include "update_engine/common/test_utils.h" #include "update_engine/common/utils.h" #include "update_engine/payload_consumer/fake_file_descriptor.h" @@ -899,6 +901,24 @@ TEST_F(DeltaPerformerTest, ValidateManifestDowngrade) { ErrorCode::kPayloadTimestampError); } +TEST_F(DeltaPerformerTest, ValidatePerPartitionTimestampSuccess) { + // The Manifest we are validating. + DeltaArchiveManifest manifest; + + manifest.set_minor_version(kFullPayloadMinorVersion); + manifest.set_max_timestamp(2); + fake_hardware_.SetBuildTimestamp(1); + auto& partition = *manifest.add_partitions(); + partition.set_version("10"); + partition.set_partition_name("system"); + fake_hardware_.SetVersion("system", "5"); + + RunManifestValidation(manifest, + kMaxSupportedMajorPayloadVersion, + InstallPayloadType::kFull, + ErrorCode::kSuccess); +} + TEST_F(DeltaPerformerTest, BrilloMetadataSignatureSizeTest) { unsigned int seed = time(nullptr); EXPECT_TRUE(performer_.Write(kDeltaMagic, sizeof(kDeltaMagic))); diff --git a/scripts/update_payload/update_metadata_pb2.py b/scripts/update_payload/update_metadata_pb2.py index d41c1da1..841cd22b 100644 --- a/scripts/update_payload/update_metadata_pb2.py +++ b/scripts/update_payload/update_metadata_pb2.py @@ -2,8 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: update_metadata.proto -import sys -_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -19,8 +17,8 @@ name='update_metadata.proto', package='chromeos_update_engine', syntax='proto2', - serialized_options=_b('H\003'), - serialized_pb=_b('\n\x15update_metadata.proto\x12\x16\x63hromeos_update_engine\"1\n\x06\x45xtent\x12\x13\n\x0bstart_block\x18\x01 \x01(\x04\x12\x12\n\nnum_blocks\x18\x02 \x01(\x04\"\x9f\x01\n\nSignatures\x12@\n\nsignatures\x18\x01 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x1aO\n\tSignature\x12\x13\n\x07version\x18\x01 \x01(\rB\x02\x18\x01\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\x12\x1f\n\x17unpadded_signature_size\x18\x03 \x01(\x07\"+\n\rPartitionInfo\x12\x0c\n\x04size\x18\x01 \x01(\x04\x12\x0c\n\x04hash\x18\x02 \x01(\x0c\"w\n\tImageInfo\x12\r\n\x05\x62oard\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\x0f\n\x07\x63hannel\x18\x03 \x01(\t\x12\x0f\n\x07version\x18\x04 \x01(\t\x12\x15\n\rbuild_channel\x18\x05 \x01(\t\x12\x15\n\rbuild_version\x18\x06 \x01(\t\"\xee\x03\n\x10InstallOperation\x12;\n\x04type\x18\x01 \x02(\x0e\x32-.chromeos_update_engine.InstallOperation.Type\x12\x13\n\x0b\x64\x61ta_offset\x18\x02 \x01(\x04\x12\x13\n\x0b\x64\x61ta_length\x18\x03 \x01(\x04\x12\x33\n\x0bsrc_extents\x18\x04 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\nsrc_length\x18\x05 \x01(\x04\x12\x33\n\x0b\x64st_extents\x18\x06 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\ndst_length\x18\x07 \x01(\x04\x12\x18\n\x10\x64\x61ta_sha256_hash\x18\x08 \x01(\x0c\x12\x17\n\x0fsrc_sha256_hash\x18\t \x01(\x0c\"\xad\x01\n\x04Type\x12\x0b\n\x07REPLACE\x10\x00\x12\x0e\n\nREPLACE_BZ\x10\x01\x12\x0c\n\x04MOVE\x10\x02\x1a\x02\x08\x01\x12\x0e\n\x06\x42SDIFF\x10\x03\x1a\x02\x08\x01\x12\x0f\n\x0bSOURCE_COPY\x10\x04\x12\x11\n\rSOURCE_BSDIFF\x10\x05\x12\x0e\n\nREPLACE_XZ\x10\x08\x12\x08\n\x04ZERO\x10\x06\x12\x0b\n\x07\x44ISCARD\x10\x07\x12\x11\n\rBROTLI_BSDIFF\x10\n\x12\x0c\n\x08PUFFDIFF\x10\t\"\xd7\x05\n\x0fPartitionUpdate\x12\x16\n\x0epartition_name\x18\x01 \x02(\t\x12\x17\n\x0frun_postinstall\x18\x02 \x01(\x08\x12\x18\n\x10postinstall_path\x18\x03 \x01(\t\x12\x17\n\x0f\x66ilesystem_type\x18\x04 \x01(\t\x12M\n\x17new_partition_signature\x18\x05 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x12\x41\n\x12old_partition_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x41\n\x12new_partition_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12<\n\noperations\x18\x08 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x1c\n\x14postinstall_optional\x18\t \x01(\x08\x12=\n\x15hash_tree_data_extent\x18\n \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x38\n\x10hash_tree_extent\x18\x0b \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x1b\n\x13hash_tree_algorithm\x18\x0c \x01(\t\x12\x16\n\x0ehash_tree_salt\x18\r \x01(\x0c\x12\x37\n\x0f\x66\x65\x63_data_extent\x18\x0e \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x32\n\nfec_extent\x18\x0f \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x14\n\tfec_roots\x18\x10 \x01(\r:\x01\x32\"L\n\x15\x44ynamicPartitionGroup\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\x0c\n\x04size\x18\x02 \x01(\x04\x12\x17\n\x0fpartition_names\x18\x03 \x03(\t\"s\n\x18\x44ynamicPartitionMetadata\x12=\n\x06groups\x18\x01 \x03(\x0b\x32-.chromeos_update_engine.DynamicPartitionGroup\x12\x18\n\x10snapshot_enabled\x18\x02 \x01(\x08\"\xe1\x06\n\x14\x44\x65ltaArchiveManifest\x12H\n\x12install_operations\x18\x01 \x03(\x0b\x32(.chromeos_update_engine.InstallOperationB\x02\x18\x01\x12O\n\x19kernel_install_operations\x18\x02 \x03(\x0b\x32(.chromeos_update_engine.InstallOperationB\x02\x18\x01\x12\x18\n\nblock_size\x18\x03 \x01(\r:\x04\x34\x30\x39\x36\x12\x19\n\x11signatures_offset\x18\x04 \x01(\x04\x12\x17\n\x0fsignatures_size\x18\x05 \x01(\x04\x12\x42\n\x0fold_kernel_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x42\n\x0fnew_kernel_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x42\n\x0fold_rootfs_info\x18\x08 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x42\n\x0fnew_rootfs_info\x18\t \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x39\n\x0eold_image_info\x18\n \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x39\n\x0enew_image_info\x18\x0b \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x18\n\rminor_version\x18\x0c \x01(\r:\x01\x30\x12;\n\npartitions\x18\r \x03(\x0b\x32\'.chromeos_update_engine.PartitionUpdate\x12\x15\n\rmax_timestamp\x18\x0e \x01(\x03\x12T\n\x1a\x64ynamic_partition_metadata\x18\x0f \x01(\x0b\x32\x30.chromeos_update_engine.DynamicPartitionMetadata\x12\x16\n\x0epartial_update\x18\x10 \x01(\x08\x42\x02H\x03') + serialized_options=b'H\003', + serialized_pb=b'\n\x15update_metadata.proto\x12\x16\x63hromeos_update_engine\"1\n\x06\x45xtent\x12\x13\n\x0bstart_block\x18\x01 \x01(\x04\x12\x12\n\nnum_blocks\x18\x02 \x01(\x04\"\x9f\x01\n\nSignatures\x12@\n\nsignatures\x18\x01 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x1aO\n\tSignature\x12\x13\n\x07version\x18\x01 \x01(\rB\x02\x18\x01\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\x12\x1f\n\x17unpadded_signature_size\x18\x03 \x01(\x07\"+\n\rPartitionInfo\x12\x0c\n\x04size\x18\x01 \x01(\x04\x12\x0c\n\x04hash\x18\x02 \x01(\x0c\"w\n\tImageInfo\x12\r\n\x05\x62oard\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\x0f\n\x07\x63hannel\x18\x03 \x01(\t\x12\x0f\n\x07version\x18\x04 \x01(\t\x12\x15\n\rbuild_channel\x18\x05 \x01(\t\x12\x15\n\rbuild_version\x18\x06 \x01(\t\"\xee\x03\n\x10InstallOperation\x12;\n\x04type\x18\x01 \x02(\x0e\x32-.chromeos_update_engine.InstallOperation.Type\x12\x13\n\x0b\x64\x61ta_offset\x18\x02 \x01(\x04\x12\x13\n\x0b\x64\x61ta_length\x18\x03 \x01(\x04\x12\x33\n\x0bsrc_extents\x18\x04 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\nsrc_length\x18\x05 \x01(\x04\x12\x33\n\x0b\x64st_extents\x18\x06 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\ndst_length\x18\x07 \x01(\x04\x12\x18\n\x10\x64\x61ta_sha256_hash\x18\x08 \x01(\x0c\x12\x17\n\x0fsrc_sha256_hash\x18\t \x01(\x0c\"\xad\x01\n\x04Type\x12\x0b\n\x07REPLACE\x10\x00\x12\x0e\n\nREPLACE_BZ\x10\x01\x12\x0c\n\x04MOVE\x10\x02\x1a\x02\x08\x01\x12\x0e\n\x06\x42SDIFF\x10\x03\x1a\x02\x08\x01\x12\x0f\n\x0bSOURCE_COPY\x10\x04\x12\x11\n\rSOURCE_BSDIFF\x10\x05\x12\x0e\n\nREPLACE_XZ\x10\x08\x12\x08\n\x04ZERO\x10\x06\x12\x0b\n\x07\x44ISCARD\x10\x07\x12\x11\n\rBROTLI_BSDIFF\x10\n\x12\x0c\n\x08PUFFDIFF\x10\t\"\xe8\x05\n\x0fPartitionUpdate\x12\x16\n\x0epartition_name\x18\x01 \x02(\t\x12\x17\n\x0frun_postinstall\x18\x02 \x01(\x08\x12\x18\n\x10postinstall_path\x18\x03 \x01(\t\x12\x17\n\x0f\x66ilesystem_type\x18\x04 \x01(\t\x12M\n\x17new_partition_signature\x18\x05 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x12\x41\n\x12old_partition_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x41\n\x12new_partition_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12<\n\noperations\x18\x08 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x1c\n\x14postinstall_optional\x18\t \x01(\x08\x12=\n\x15hash_tree_data_extent\x18\n \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x38\n\x10hash_tree_extent\x18\x0b \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x1b\n\x13hash_tree_algorithm\x18\x0c \x01(\t\x12\x16\n\x0ehash_tree_salt\x18\r \x01(\x0c\x12\x37\n\x0f\x66\x65\x63_data_extent\x18\x0e \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x32\n\nfec_extent\x18\x0f \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x14\n\tfec_roots\x18\x10 \x01(\r:\x01\x32\x12\x0f\n\x07version\x18\x11 \x01(\t\"L\n\x15\x44ynamicPartitionGroup\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\x0c\n\x04size\x18\x02 \x01(\x04\x12\x17\n\x0fpartition_names\x18\x03 \x03(\t\"s\n\x18\x44ynamicPartitionMetadata\x12=\n\x06groups\x18\x01 \x03(\x0b\x32-.chromeos_update_engine.DynamicPartitionGroup\x12\x18\n\x10snapshot_enabled\x18\x02 \x01(\x08\"\xe1\x06\n\x14\x44\x65ltaArchiveManifest\x12H\n\x12install_operations\x18\x01 \x03(\x0b\x32(.chromeos_update_engine.InstallOperationB\x02\x18\x01\x12O\n\x19kernel_install_operations\x18\x02 \x03(\x0b\x32(.chromeos_update_engine.InstallOperationB\x02\x18\x01\x12\x18\n\nblock_size\x18\x03 \x01(\r:\x04\x34\x30\x39\x36\x12\x19\n\x11signatures_offset\x18\x04 \x01(\x04\x12\x17\n\x0fsignatures_size\x18\x05 \x01(\x04\x12\x42\n\x0fold_kernel_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x42\n\x0fnew_kernel_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x42\n\x0fold_rootfs_info\x18\x08 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x42\n\x0fnew_rootfs_info\x18\t \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x39\n\x0eold_image_info\x18\n \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x39\n\x0enew_image_info\x18\x0b \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x18\n\rminor_version\x18\x0c \x01(\r:\x01\x30\x12;\n\npartitions\x18\r \x03(\x0b\x32\'.chromeos_update_engine.PartitionUpdate\x12\x15\n\rmax_timestamp\x18\x0e \x01(\x03\x12T\n\x1a\x64ynamic_partition_metadata\x18\x0f \x01(\x0b\x32\x30.chromeos_update_engine.DynamicPartitionMetadata\x12\x16\n\x0epartial_update\x18\x10 \x01(\x08\x42\x02H\x03' ) @@ -41,11 +39,11 @@ type=None), _descriptor.EnumValueDescriptor( name='MOVE', index=2, number=2, - serialized_options=_b('\010\001'), + serialized_options=b'\010\001', type=None), _descriptor.EnumValueDescriptor( name='BSDIFF', index=3, number=3, - serialized_options=_b('\010\001'), + serialized_options=b'\010\001', type=None), _descriptor.EnumValueDescriptor( name='SOURCE_COPY', index=4, number=4, @@ -135,11 +133,11 @@ has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b('\030\001'), file=DESCRIPTOR), + serialized_options=b'\030\001', file=DESCRIPTOR), _descriptor.FieldDescriptor( name='data', full_name='chromeos_update_engine.Signatures.Signature.data', index=1, number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), + has_default_value=False, default_value=b"", message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), @@ -214,7 +212,7 @@ _descriptor.FieldDescriptor( name='hash', full_name='chromeos_update_engine.PartitionInfo.hash', index=1, number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), + has_default_value=False, default_value=b"", message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), @@ -245,42 +243,42 @@ _descriptor.FieldDescriptor( name='board', full_name='chromeos_update_engine.ImageInfo.board', index=0, number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='key', full_name='chromeos_update_engine.ImageInfo.key', index=1, number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='channel', full_name='chromeos_update_engine.ImageInfo.channel', index=2, number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='version', full_name='chromeos_update_engine.ImageInfo.version', index=3, number=4, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='build_channel', full_name='chromeos_update_engine.ImageInfo.build_channel', index=4, number=5, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='build_version', full_name='chromeos_update_engine.ImageInfo.build_version', index=5, number=6, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), @@ -360,14 +358,14 @@ _descriptor.FieldDescriptor( name='data_sha256_hash', full_name='chromeos_update_engine.InstallOperation.data_sha256_hash', index=7, number=8, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), + has_default_value=False, default_value=b"", message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='src_sha256_hash', full_name='chromeos_update_engine.InstallOperation.src_sha256_hash', index=8, number=9, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), + has_default_value=False, default_value=b"", message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), @@ -399,7 +397,7 @@ _descriptor.FieldDescriptor( name='partition_name', full_name='chromeos_update_engine.PartitionUpdate.partition_name', index=0, number=1, type=9, cpp_type=9, label=2, - has_default_value=False, default_value=_b("").decode('utf-8'), + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), @@ -413,14 +411,14 @@ _descriptor.FieldDescriptor( name='postinstall_path', full_name='chromeos_update_engine.PartitionUpdate.postinstall_path', index=2, number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='filesystem_type', full_name='chromeos_update_engine.PartitionUpdate.filesystem_type', index=3, number=4, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), @@ -476,14 +474,14 @@ _descriptor.FieldDescriptor( name='hash_tree_algorithm', full_name='chromeos_update_engine.PartitionUpdate.hash_tree_algorithm', index=11, number=12, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='hash_tree_salt', full_name='chromeos_update_engine.PartitionUpdate.hash_tree_salt', index=12, number=13, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), + has_default_value=False, default_value=b"", message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), @@ -508,6 +506,13 @@ message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='version', full_name='chromeos_update_engine.PartitionUpdate.version', index=16, + number=17, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], @@ -521,7 +526,7 @@ oneofs=[ ], serialized_start=926, - serialized_end=1653, + serialized_end=1670, ) @@ -535,7 +540,7 @@ _descriptor.FieldDescriptor( name='name', full_name='chromeos_update_engine.DynamicPartitionGroup.name', index=0, number=1, type=9, cpp_type=9, label=2, - has_default_value=False, default_value=_b("").decode('utf-8'), + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), @@ -565,8 +570,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1655, - serialized_end=1731, + serialized_start=1672, + serialized_end=1748, ) @@ -603,8 +608,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1733, - serialized_end=1848, + serialized_start=1750, + serialized_end=1865, ) @@ -621,14 +626,14 @@ has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b('\030\001'), file=DESCRIPTOR), + serialized_options=b'\030\001', file=DESCRIPTOR), _descriptor.FieldDescriptor( name='kernel_install_operations', full_name='chromeos_update_engine.DeltaArchiveManifest.kernel_install_operations', index=1, number=2, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b('\030\001'), file=DESCRIPTOR), + serialized_options=b'\030\001', file=DESCRIPTOR), _descriptor.FieldDescriptor( name='block_size', full_name='chromeos_update_engine.DeltaArchiveManifest.block_size', index=2, number=3, type=13, cpp_type=3, label=1, @@ -656,28 +661,28 @@ has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b('\030\001'), file=DESCRIPTOR), + serialized_options=b'\030\001', file=DESCRIPTOR), _descriptor.FieldDescriptor( name='new_kernel_info', full_name='chromeos_update_engine.DeltaArchiveManifest.new_kernel_info', index=6, number=7, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b('\030\001'), file=DESCRIPTOR), + serialized_options=b'\030\001', file=DESCRIPTOR), _descriptor.FieldDescriptor( name='old_rootfs_info', full_name='chromeos_update_engine.DeltaArchiveManifest.old_rootfs_info', index=7, number=8, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b('\030\001'), file=DESCRIPTOR), + serialized_options=b'\030\001', file=DESCRIPTOR), _descriptor.FieldDescriptor( name='new_rootfs_info', full_name='chromeos_update_engine.DeltaArchiveManifest.new_rootfs_info', index=8, number=9, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b('\030\001'), file=DESCRIPTOR), + serialized_options=b'\030\001', file=DESCRIPTOR), _descriptor.FieldDescriptor( name='old_image_info', full_name='chromeos_update_engine.DeltaArchiveManifest.old_image_info', index=9, number=10, type=11, cpp_type=10, label=1, @@ -739,8 +744,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1851, - serialized_end=2716, + serialized_start=1868, + serialized_end=2733, ) _SIGNATURES_SIGNATURE.containing_type = _SIGNATURES diff --git a/update_metadata.proto b/update_metadata.proto index e6a067e7..f79e38b9 100644 --- a/update_metadata.proto +++ b/update_metadata.proto @@ -288,6 +288,11 @@ message PartitionUpdate { // The number of FEC roots. optional uint32 fec_roots = 16 [default = 2]; + + // Per-partition version used for downgrade detection, added + // as an effort to support partial updates. For most partitions, + // this is the build timestamp. + optional string version = 17; } message DynamicPartitionGroup { From 5ef2d8ae7bc8ccd03ea8fc19e3e0b9a11f1dfeaf Mon Sep 17 00:00:00 2001 From: Ankit Goyal Date: Wed, 26 Aug 2020 07:01:26 +0000 Subject: [PATCH 365/624] Revert "Expose extent_ranges for libsnapshot to use in resolving..." Revert "libsnapshot: Off-line tool for converting OTA payloads t..." Revert submission 1405248-vab-offline-cow Reason for revert: Reason for revert: Droidcop-triggered revert due to breakage https://android-build.googleplex.com/builds/quarterdeck?branch=git_master-without-vendor&target=sdk_mac&lkgb=6790614&lkbb=6791107&fkbb=6791107, bug 166383275 Reverted Changes: If8a1bbf99:Expose extent_ranges for libsnapshot to use in res... I22c86546f:libsnapshot: Off-line tool for converting OTA payl... Change-Id: Ifa26210fc526585535d67ff9475076eb18560633 --- Android.bp | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/Android.bp b/Android.bp index a2e4ba04..b6ee4769 100644 --- a/Android.bp +++ b/Android.bp @@ -468,20 +468,6 @@ cc_defaults { ], } -cc_library_static { - name: "libpayload_extent_ranges", - defaults: [ - "ue_defaults", - ], - host_supported: true, - srcs: [ - "payload_generator/extent_ranges.cc", - ], - static_libs: [ - "update_metadata-protos", - ], -} - cc_library_static { name: "libpayload_generator", defaults: [ @@ -751,5 +737,4 @@ cc_library_headers { apex_available: [ "com.android.gki.*", ], - host_supported: true, } From 1f4964213f67ae682af8cd332cd37b3d9afdbace Mon Sep 17 00:00:00 2001 From: Kelvin Zhang Date: Tue, 11 Aug 2020 17:18:23 -0400 Subject: [PATCH 366/624] Set per-partition timestamps in OTA generation update_engine can also accept payload with per-partition timestamps. This CL updates OTA generation script to emit per-partition timestamps when writing an OTA package. Test: Generate && serve an ota Change-Id: I17529a004b8e0bbcb7d69dde93fb0fd7124b3b17 --- payload_generator/generate_delta_main.cc | 45 +++++++++++++++++++ payload_generator/payload_file.cc | 4 ++ payload_generator/payload_file.h | 2 + payload_generator/payload_generation_config.h | 3 ++ scripts/brillo_update_payload | 8 ++++ 5 files changed, 62 insertions(+) diff --git a/payload_generator/generate_delta_main.cc b/payload_generator/generate_delta_main.cc index 18cff4b1..dd41a29b 100644 --- a/payload_generator/generate_delta_main.cc +++ b/payload_generator/generate_delta_main.cc @@ -14,6 +14,7 @@ // limitations under the License. // +#include #include #include @@ -22,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -47,6 +49,7 @@ // and an output file as arguments and the path to an output file and // generates a delta that can be sent to Chrome OS clients. +using std::map; using std::string; using std::vector; @@ -294,6 +297,39 @@ bool ExtractProperties(const string& payload_path, return true; } +template +string ToString(const map& map) { + vector result; + result.reserve(map.size()); + for (const auto& it : map) { + result.emplace_back(it.first + ": " + it.second); + } + return "{" + base::JoinString(result, ",") + "}"; +} + +bool ParsePerPartitionTimestamps(const string& partition_timestamps, + PayloadGenerationConfig* config) { + base::StringPairs pairs; + CHECK(base::SplitStringIntoKeyValuePairs( + partition_timestamps, ':', ',', &pairs)) + << "--partition_timestamps accepts commad " + "separated pairs. e.x. system:1234,vendor:5678"; + map partition_timestamps_map{ + std::move_iterator(pairs.begin()), std::move_iterator(pairs.end())}; + for (auto&& partition : config->target.partitions) { + auto&& it = partition_timestamps_map.find(partition.name); + if (it != partition_timestamps_map.end()) { + partition.version = std::move(it->second); + partition_timestamps_map.erase(it); + } + } + if (!partition_timestamps_map.empty()) { + LOG(ERROR) << "Unused timestamps: " << ToString(partition_timestamps_map); + return false; + } + return true; +} + int Main(int argc, char** argv) { DEFINE_string(old_image, "", "Path to the old rootfs"); DEFINE_string(new_image, "", "Path to the new rootfs"); @@ -384,6 +420,11 @@ int Main(int argc, char** argv) { 0, "The maximum timestamp of the OS allowed to apply this " "payload."); + DEFINE_string( + partition_timestamps, + "", + "The per-partition maximum timestamps which the OS allowed to apply this " + "payload. Passed in comma separated pairs, e.x. system:1234,vendor:5678"); DEFINE_string(old_channel, "", @@ -709,6 +750,10 @@ int Main(int argc, char** argv) { } payload_config.max_timestamp = FLAGS_max_timestamp; + if (!FLAGS_partition_timestamps.empty()) { + CHECK(ParsePerPartitionTimestamps(FLAGS_partition_timestamps, + &payload_config)); + } if (payload_config.is_delta && payload_config.version.minor >= kVerityMinorPayloadVersion) diff --git a/payload_generator/payload_file.cc b/payload_generator/payload_file.cc index c1594c75..1388f2da 100644 --- a/payload_generator/payload_file.cc +++ b/payload_generator/payload_file.cc @@ -92,6 +92,7 @@ bool PayloadFile::AddPartition(const PartitionConfig& old_conf, part.aops = std::move(aops); part.postinstall = new_conf.postinstall; part.verity = new_conf.verity; + part.version = new_conf.version; // Initialize the PartitionInfo objects if present. if (!old_conf.path.empty()) TEST_AND_RETURN_FALSE( @@ -132,6 +133,9 @@ bool PayloadFile::WritePayload(const string& payload_file, for (const auto& part : part_vec_) { PartitionUpdate* partition = manifest_.add_partitions(); partition->set_partition_name(part.name); + if (!part.version.empty()) { + partition->set_version(part.version); + } if (part.postinstall.run) { partition->set_run_postinstall(true); if (!part.postinstall.path.empty()) diff --git a/payload_generator/payload_file.h b/payload_generator/payload_file.h index d1f8196e..3dce00fc 100644 --- a/payload_generator/payload_file.h +++ b/payload_generator/payload_file.h @@ -96,6 +96,8 @@ class PayloadFile { PostInstallConfig postinstall; VerityConfig verity; + // Per partition timestamp. + std::string version; }; std::vector part_vec_; diff --git a/payload_generator/payload_generation_config.h b/payload_generator/payload_generation_config.h index 9abb97f3..ec630435 100644 --- a/payload_generator/payload_generation_config.h +++ b/payload_generator/payload_generation_config.h @@ -119,6 +119,9 @@ struct PartitionConfig { // Enables the on device fec data computation by default. bool disable_fec_computation = false; + + // Per-partition version, usually a number representing timestamp. + std::string version; }; // The ImageConfig struct describes a pair of binaries kernel and rootfs and the diff --git a/scripts/brillo_update_payload b/scripts/brillo_update_payload index 9bae74ef..3bc87bd6 100755 --- a/scripts/brillo_update_payload +++ b/scripts/brillo_update_payload @@ -186,6 +186,10 @@ if [[ "${COMMAND}" == "generate" ]]; then "Optional: The maximum unix timestamp of the OS allowed to apply this \ payload, should be set to a number higher than the build timestamp of the \ system running on the device, 0 if not specified." + DEFINE_string partition_timestamps "" \ + "Optional: Per-partition maximum unix timestamp of the OS allowed to \ +apply this payload. Should be a comma separated key value pairs. e.x.\ +system:1234,vendor:456" DEFINE_string disable_fec_computation "" \ "Optional: Disables the on device fec data computation for incremental \ update. This feature is enabled by default." @@ -696,6 +700,10 @@ cmd_generate() { GENERATOR_ARGS+=( --max_timestamp="${FLAGS_max_timestamp}" ) fi + if [[ -n "${FLAGS_partition_timestamps}" ]]; then + GENERATOR_ARGS+=( --partition_timestamps="${FLAGS_partition_timestamps}" ) + fi + if [[ -n "${POSTINSTALL_CONFIG_FILE}" ]]; then GENERATOR_ARGS+=( --new_postinstall_config_file="${POSTINSTALL_CONFIG_FILE}" From e9156ec8de400e24602bd08a06a02b4a47c76c7f Mon Sep 17 00:00:00 2001 From: Tianjie Date: Tue, 11 Aug 2020 11:13:54 -0700 Subject: [PATCH 367/624] Add CowMergeOperations as a hint for snapshot write As proposed in http://go/vabc, we want to reduce the cow size for VAB. One nature apporach is to skip writing the idential blocks to snapshot; instead we can read from the souce blocks. Similiar to the non-A/B update schema, we need to compute a sequence for snapshot merge to avoid the read after write problem. If there is a circular dependency, we will omit some blocks in the result sequence to break the cycles. So libsnapshot will write the raw data of these blocks to cow. All extents in the CowMergeOperations are subsets of a particular OTA SOURCE_COPY InstallOperation. Also, these src & ext extents will be contiguous to improve the libsnapshot read performance before merge completes, as well as to simplify the sequence generation. Bug: 162274240 Test: unittest pass, genertes an OTA Change-Id: I12c952593d83a8e34a0a6cff5a2066c9103a0d30 --- Android.bp | 1 + payload_consumer/delta_performer_unittest.cc | 4 +- payload_generator/delta_diff_generator.cc | 23 ++++++- payload_generator/merge_sequence_generator.cc | 43 +++++++++++++ payload_generator/merge_sequence_generator.h | 63 +++++++++++++++++++ payload_generator/payload_file.cc | 8 ++- payload_generator/payload_file.h | 4 +- .../payload_properties_unittest.cc | 2 +- update_metadata.proto | 21 +++++++ 9 files changed, 163 insertions(+), 6 deletions(-) create mode 100644 payload_generator/merge_sequence_generator.cc create mode 100644 payload_generator/merge_sequence_generator.h diff --git a/Android.bp b/Android.bp index b6ee4769..751e3554 100644 --- a/Android.bp +++ b/Android.bp @@ -491,6 +491,7 @@ cc_library_static { "payload_generator/extent_utils.cc", "payload_generator/full_update_generator.cc", "payload_generator/mapfile_filesystem.cc", + "payload_generator/merge_sequence_generator.cc", "payload_generator/payload_file.cc", "payload_generator/payload_generation_config_android.cc", "payload_generator/payload_generation_config.cc", diff --git a/payload_consumer/delta_performer_unittest.cc b/payload_consumer/delta_performer_unittest.cc index fbd754f4..449201ce 100644 --- a/payload_consumer/delta_performer_unittest.cc +++ b/payload_consumer/delta_performer_unittest.cc @@ -228,13 +228,13 @@ class DeltaPerformerTest : public ::testing::Test { new_part.path = "/dev/zero"; new_part.size = 1234; - payload.AddPartition(*old_part, new_part, aops); + payload.AddPartition(*old_part, new_part, aops, {}); // We include a kernel partition without operations. old_part->name = kPartitionNameKernel; new_part.name = kPartitionNameKernel; new_part.size = 0; - payload.AddPartition(*old_part, new_part, {}); + payload.AddPartition(*old_part, new_part, {}, {}); test_utils::ScopedTempFile payload_file("Payload-XXXXXX"); string private_key = diff --git a/payload_generator/delta_diff_generator.cc b/payload_generator/delta_diff_generator.cc index aa492524..c2b35ee8 100644 --- a/payload_generator/delta_diff_generator.cc +++ b/payload_generator/delta_diff_generator.cc @@ -39,6 +39,7 @@ #include "update_engine/payload_generator/blob_file_writer.h" #include "update_engine/payload_generator/delta_diff_utils.h" #include "update_engine/payload_generator/full_update_generator.h" +#include "update_engine/payload_generator/merge_sequence_generator.h" #include "update_engine/payload_generator/payload_file.h" using std::string; @@ -59,12 +60,14 @@ class PartitionProcessor : public base::DelegateSimpleThread::Delegate { const PartitionConfig& new_part, BlobFileWriter* file_writer, std::vector* aops, + std::vector* cow_merge_sequence, std::unique_ptr strategy) : config_(config), old_part_(old_part), new_part_(new_part), file_writer_(file_writer), aops_(aops), + cow_merge_sequence_(cow_merge_sequence), strategy_(std::move(strategy)) {} PartitionProcessor(PartitionProcessor&&) noexcept = default; void Run() override { @@ -78,6 +81,17 @@ class PartitionProcessor : public base::DelegateSimpleThread::Delegate { LOG(FATAL) << "GenerateOperations(" << old_part_.name << ", " << new_part_.name << ") failed"; } + + bool snapshot_enabled = + config_.target.dynamic_partition_metadata && + config_.target.dynamic_partition_metadata->snapshot_enabled(); + if (old_part_.path.empty() || !snapshot_enabled) { + return; + } + auto generator = MergeSequenceGenerator::Create(*aops_); + if (!generator || !generator->Generate(cow_merge_sequence_)) { + LOG(FATAL) << "Failed to generate merge sequence"; + } } private: @@ -86,6 +100,7 @@ class PartitionProcessor : public base::DelegateSimpleThread::Delegate { const PartitionConfig& new_part_; BlobFileWriter* file_writer_; std::vector* aops_; + std::vector* cow_merge_sequence_; std::unique_ptr strategy_; DISALLOW_COPY_AND_ASSIGN(PartitionProcessor); }; @@ -123,6 +138,8 @@ bool GenerateUpdatePayloadFile(const PayloadGenerationConfig& config, PartitionConfig empty_part(""); std::vector> all_aops; all_aops.resize(config.target.partitions.size()); + std::vector> all_merge_sequences; + all_merge_sequences.resize(config.target.partitions.size()); std::vector partition_tasks{}; auto thread_count = std::min(diff_utils::GetMaxThreads(), config.target.partitions.size()); @@ -153,6 +170,7 @@ bool GenerateUpdatePayloadFile(const PayloadGenerationConfig& config, new_part, &blob_file, &all_aops[i], + &all_merge_sequences[i], std::move(strategy))); } thread_pool.Start(); @@ -166,7 +184,10 @@ bool GenerateUpdatePayloadFile(const PayloadGenerationConfig& config, config.is_delta ? config.source.partitions[i] : empty_part; const PartitionConfig& new_part = config.target.partitions[i]; TEST_AND_RETURN_FALSE( - payload.AddPartition(old_part, new_part, std::move(all_aops[i]))); + payload.AddPartition(old_part, + new_part, + std::move(all_aops[i]), + std::move(all_merge_sequences[i]))); } } diff --git a/payload_generator/merge_sequence_generator.cc b/payload_generator/merge_sequence_generator.cc new file mode 100644 index 00000000..cf73ba3d --- /dev/null +++ b/payload_generator/merge_sequence_generator.cc @@ -0,0 +1,43 @@ +// +// Copyright (C) 2020 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include "update_engine/payload_generator/merge_sequence_generator.h" + +namespace chromeos_update_engine { + +std::unique_ptr MergeSequenceGenerator::Create( + const std::vector& aops) { + return std::unique_ptr( + new MergeSequenceGenerator({})); +} + +bool MergeSequenceGenerator::FindDependency( + std::map>* result) const { + CHECK(result); + return true; +} + +bool MergeSequenceGenerator::Generate( + std::vector* sequence) const { + return true; +} + +bool MergeSequenceGenerator::ValidateSequence( + const std::vector& sequence) { + return true; +} + +} // namespace chromeos_update_engine diff --git a/payload_generator/merge_sequence_generator.h b/payload_generator/merge_sequence_generator.h new file mode 100644 index 00000000..bfc04d9f --- /dev/null +++ b/payload_generator/merge_sequence_generator.h @@ -0,0 +1,63 @@ +// +// Copyright (C) 2020 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef UPDATE_ENGINE_PAYLOAD_GENERATOR_MERGE_SEQUENCE_GENERATOR_H_ +#define UPDATE_ENGINE_PAYLOAD_GENERATOR_MERGE_SEQUENCE_GENERATOR_H_ + +#include +#include +#include +#include +#include + +#include "update_engine/payload_generator/annotated_operation.h" +#include "update_engine/payload_generator/extent_ranges.h" +#include "update_engine/payload_generator/extent_utils.h" +#include "update_engine/update_metadata.pb.h" + +namespace chromeos_update_engine { + +// This class takes a list of CowMergeOperations; and sorts them so that no +// read after write will happen by following the sequence. When there is a +// cycle, we will omit some operations in the list. Therefore, the result +// sequence may not contain all blocks in the input list. +class MergeSequenceGenerator { + public: + // Creates an object from a list of OTA InstallOperations. Returns nullptr on + // failure. + static std::unique_ptr Create( + const std::vector& aops); + // Checks that no read after write happens in the given sequence. + static bool ValidateSequence(const std::vector& sequence); + + // Generates a merge sequence from |operations_|, puts the result in + // |sequence|. Returns false on failure. + bool Generate(std::vector* sequence) const; + + private: + explicit MergeSequenceGenerator(std::vector transfers) + : operations_(std::move(transfers)) {} + + // For a given merge operation, finds all the operations that should merge + // after myself. Put the result in |merge_after|. + bool FindDependency(std::map>* + merge_after) const; + // The list of CowMergeOperations to sort. + std::vector operations_; +}; + +} // namespace chromeos_update_engine +#endif diff --git a/payload_generator/payload_file.cc b/payload_generator/payload_file.cc index 1388f2da..49dff4e7 100644 --- a/payload_generator/payload_file.cc +++ b/payload_generator/payload_file.cc @@ -86,10 +86,12 @@ bool PayloadFile::Init(const PayloadGenerationConfig& config) { bool PayloadFile::AddPartition(const PartitionConfig& old_conf, const PartitionConfig& new_conf, - vector aops) { + vector aops, + vector merge_sequence) { Partition part; part.name = new_conf.name; part.aops = std::move(aops); + part.cow_merge_sequence = std::move(merge_sequence); part.postinstall = new_conf.postinstall; part.verity = new_conf.verity; part.version = new_conf.version; @@ -163,6 +165,10 @@ bool PayloadFile::WritePayload(const string& payload_file, for (const AnnotatedOperation& aop : part.aops) { *partition->add_operations() = aop.op; } + for (const auto& merge_op : part.cow_merge_sequence) { + *partition->add_merge_operations() = merge_op; + } + if (part.old_info.has_size() || part.old_info.has_hash()) *(partition->mutable_old_partition_info()) = part.old_info; if (part.new_info.has_size() || part.new_info.has_hash()) diff --git a/payload_generator/payload_file.h b/payload_generator/payload_file.h index 3dce00fc..8b179569 100644 --- a/payload_generator/payload_file.h +++ b/payload_generator/payload_file.h @@ -43,7 +43,8 @@ class PayloadFile { // reference a blob stored in the file provided to WritePayload(). bool AddPartition(const PartitionConfig& old_conf, const PartitionConfig& new_conf, - std::vector aops); + std::vector aops, + std::vector merge_sequence); // Write the payload to the |payload_file| file. The operations reference // blobs in the |data_blobs_path| file and the blobs will be reordered in the @@ -90,6 +91,7 @@ class PayloadFile { // The operations to be performed to this partition. std::vector aops; + std::vector cow_merge_sequence; PartitionInfo old_info; PartitionInfo new_info; diff --git a/payload_generator/payload_properties_unittest.cc b/payload_generator/payload_properties_unittest.cc index db3902ce..e0072fc6 100644 --- a/payload_generator/payload_properties_unittest.cc +++ b/payload_generator/payload_properties_unittest.cc @@ -98,7 +98,7 @@ class PayloadPropertiesTest : public ::testing::Test { EXPECT_TRUE(strategy->GenerateOperations( config, old_part, new_part, &blob_file_writer, &aops)); - payload.AddPartition(old_part, new_part, aops); + payload.AddPartition(old_part, new_part, aops, {}); uint64_t metadata_size; EXPECT_TRUE(payload.WritePayload( diff --git a/update_metadata.proto b/update_metadata.proto index f79e38b9..373ee5ef 100644 --- a/update_metadata.proto +++ b/update_metadata.proto @@ -225,6 +225,22 @@ message InstallOperation { optional bytes src_sha256_hash = 9; } +// Hints to VAB snapshot to skip writing some blocks if these blocks are +// identical to the ones on the source image. The src & dst extents for each +// CowMergeOperation should be contiguous, and they're a subset of an OTA +// InstallOperation. +// During merge time, we need to follow the pre-computed sequence to avoid +// read after write, similar to the inplace update schema. +message CowMergeOperation { + enum Type { + COW_COPY = 0; // identical blocks + } + optional Type type = 1; + + optional Extent src_extent = 2; + optional Extent dst_extent = 3; +} + // Describes the update to apply to a single partition. message PartitionUpdate { // A platform-specific name to identify the partition set being updated. For @@ -293,6 +309,11 @@ message PartitionUpdate { // as an effort to support partial updates. For most partitions, // this is the build timestamp. optional string version = 17; + + // A sorted list of CowMergeOperation. When writing cow, we can choose to + // skip writing the raw bytes for these extents. During snapshot merge, the + // bytes will read from the source partitions instead. + repeated CowMergeOperation merge_operations = 18; } message DynamicPartitionGroup { From f2e7ee5a3dfdeee1ecce9111d02e7538a7576eb9 Mon Sep 17 00:00:00 2001 From: Kelvin Zhang Date: Thu, 13 Aug 2020 14:58:43 -0400 Subject: [PATCH 368/624] Display per-partition timestamp in payload_info.py Test: python3 payload_info.py some_ota.zip Change-Id: I129d032948a4e962efda38c6d78991c23113f97f --- scripts/payload_info.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/scripts/payload_info.py b/scripts/payload_info.py index 965bb76f..7625ee8e 100755 --- a/scripts/payload_info.py +++ b/scripts/payload_info.py @@ -74,7 +74,9 @@ def _DisplayManifest(self): for partition in manifest.partitions: DisplayValue(' Number of "%s" ops' % partition.partition_name, len(partition.operations)) - + for partition in manifest.partitions: + DisplayValue("Timestamp for " + + partition.partition_name, partition.version) DisplayValue('Block size', manifest.block_size) DisplayValue('Minor version', manifest.minor_version) From 03277ded5a781a28b2485ba38a957e1a333ab3c6 Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Tue, 28 Jul 2020 12:32:49 -0700 Subject: [PATCH 369/624] update_engine: Providing testing capability for periodic update checks Currently, we are not able to properly test periodic update checks because these update checks are disabled on test images. To solve this problem this CL introduces a new pref test-update-check-interval-timeout that contains the number of seconds between periodic update checks. The tests can put this file in /var/lib/update_engine/prefs and restart the update_engine. The update_engine should start checking for update after the number of seconds identified in the above pref and continue checking for update with that interval. The tests also need to make sure this file is deleted at the end so it doesn't interfere with future device updates. This pref internally is deleted after it has been read/used 3 times so it can't be abused. For the same reason, the maximum value that can be set in the pref is limited to 10 minutes. BUG=chromium:953471 TEST=FEATURES=test emerge-reef update_engine TEST=flashed a device with this new image, put the pref with value of 10 seconds and restarted the update_engine, the update check happened. Change-Id: I3ad0e300f7908f17da26b0eb0d1510348a2d2435 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2333308 Commit-Queue: Amin Hassani Tested-by: Amin Hassani Reviewed-by: Jae Hoon Kim Reviewed-by: Andrew Lassalle --- common/constants.cc | 2 + common/constants.h | 1 + update_manager/chromeos_policy_unittest.cc | 20 ++++++++ update_manager/evaluation_context-inl.h | 2 +- update_manager/fake_updater_provider.h | 6 +++ .../next_update_check_policy_impl.cc | 21 ++++++-- .../official_build_check_policy_impl.cc | 12 ++++- update_manager/real_updater_provider.cc | 48 ++++++++++++++++++- update_manager/real_updater_provider.h | 5 ++ .../real_updater_provider_unittest.cc | 26 ++++++++++ update_manager/updater_provider.h | 4 ++ update_manager/variable.h | 12 ++++- 12 files changed, 149 insertions(+), 10 deletions(-) diff --git a/common/constants.cc b/common/constants.cc index ac652ea7..ad511d55 100644 --- a/common/constants.cc +++ b/common/constants.cc @@ -64,6 +64,8 @@ const char kPrefsP2PEnabled[] = "p2p-enabled"; const char kPrefsP2PFirstAttemptTimestamp[] = "p2p-first-attempt-timestamp"; const char kPrefsP2PNumAttempts[] = "p2p-num-attempts"; const char kPrefsPayloadAttemptNumber[] = "payload-attempt-number"; +const char kPrefsTestUpdateCheckIntervalTimeout[] = + "test-update-check-interval-timeout"; // Keep |kPrefsPingActive| in sync with |kDlcMetadataFilePingActive| in // dlcservice. const char kPrefsPingActive[] = "active"; diff --git a/common/constants.h b/common/constants.h index 248fd05e..446b147a 100644 --- a/common/constants.h +++ b/common/constants.h @@ -67,6 +67,7 @@ extern const char kPrefsP2PEnabled[]; extern const char kPrefsP2PFirstAttemptTimestamp[]; extern const char kPrefsP2PNumAttempts[]; extern const char kPrefsPayloadAttemptNumber[]; +extern const char kPrefsTestUpdateCheckIntervalTimeout[]; extern const char kPrefsPingActive[]; extern const char kPrefsPingLastActive[]; extern const char kPrefsPingLastRollcall[]; diff --git a/update_manager/chromeos_policy_unittest.cc b/update_manager/chromeos_policy_unittest.cc index f4ad165c..996db2bf 100644 --- a/update_manager/chromeos_policy_unittest.cc +++ b/update_manager/chromeos_policy_unittest.cc @@ -341,6 +341,26 @@ TEST_F(UmChromeOSPolicyTest, EvalStatus::kAskMeAgainLater, &Policy::UpdateCheckAllowed, &result); } +TEST_F(UmChromeOSPolicyTest, TestUpdateCheckIntervalTimeout) { + fake_state_.updater_provider() + ->var_test_update_check_interval_timeout() + ->reset(new int64_t(10)); + fake_state_.system_provider()->var_is_official_build()->reset( + new bool(false)); + + // The first time, update should not be allowed. + UpdateCheckParams result; + ExpectPolicyStatus( + EvalStatus::kAskMeAgainLater, &Policy::UpdateCheckAllowed, &result); + + // After moving the time forward more than the update check interval, it + // should now allow for update. + fake_clock_.SetWallclockTime(fake_clock_.GetWallclockTime() + + TimeDelta::FromSeconds(11)); + ExpectPolicyStatus( + EvalStatus::kSucceeded, &Policy::UpdateCheckAllowed, &result); +} + TEST_F(UmChromeOSPolicyTest, UpdateCheckAllowedUpdatesDisabledWhenNotEnoughSlotsAbUpdates) { // UpdateCheckAllowed should return false (kSucceeded) if the image booted diff --git a/update_manager/evaluation_context-inl.h b/update_manager/evaluation_context-inl.h index 59d85da0..82861fa2 100644 --- a/update_manager/evaluation_context-inl.h +++ b/update_manager/evaluation_context-inl.h @@ -39,7 +39,7 @@ const T* EvaluationContext::GetValue(Variable* var) { std::string errmsg; const T* result = var->GetValue(RemainingTime(evaluation_monotonic_deadline_), &errmsg); - if (result == nullptr) { + if (result == nullptr && !var->IsMissingOk()) { LOG(WARNING) << "Error reading Variable " << var->GetName() << ": \"" << errmsg << "\""; } diff --git a/update_manager/fake_updater_provider.h b/update_manager/fake_updater_provider.h index 7295765d..d967f420 100644 --- a/update_manager/fake_updater_provider.h +++ b/update_manager/fake_updater_provider.h @@ -83,6 +83,10 @@ class FakeUpdaterProvider : public UpdaterProvider { return &var_update_restrictions_; } + FakeVariable* var_test_update_check_interval_timeout() override { + return &var_test_update_check_interval_timeout_; + } + private: FakeVariable var_updater_started_time_{"updater_started_time", kVariableModePoll}; @@ -108,6 +112,8 @@ class FakeUpdaterProvider : public UpdaterProvider { "forced_update_requested", kVariableModeAsync}; FakeVariable var_update_restrictions_{ "update_restrictions", kVariableModePoll}; + FakeVariable var_test_update_check_interval_timeout_{ + "test_update_check_interval_timeout", kVariableModePoll}; DISALLOW_COPY_AND_ASSIGN(FakeUpdaterProvider); }; diff --git a/update_manager/next_update_check_policy_impl.cc b/update_manager/next_update_check_policy_impl.cc index 6f9748e3..0a787186 100644 --- a/update_manager/next_update_check_policy_impl.cc +++ b/update_manager/next_update_check_policy_impl.cc @@ -72,6 +72,11 @@ EvalStatus NextUpdateCheckTimePolicyImpl::NextUpdateCheckTime( ec->GetValue(updater_provider->var_updater_started_time()); POLICY_CHECK_VALUE_AND_FAIL(updater_started_time, error); + // This value is used for testing only and it will get deleted after the first + // time it is read. + const int64_t* interval_timeout = + ec->GetValue(updater_provider->var_test_update_check_interval_timeout()); + const Time* last_checked_time = ec->GetValue(updater_provider->var_last_checked_time()); @@ -83,13 +88,21 @@ EvalStatus NextUpdateCheckTimePolicyImpl::NextUpdateCheckTime( // If this is the first attempt, compute and return an initial value. if (last_checked_time == nullptr || *last_checked_time < *updater_started_time) { - *next_update_check = *updater_started_time + - FuzzedInterval(&prng, - constants.timeout_initial_interval, - constants.timeout_regular_fuzz); + TimeDelta time_diff = + interval_timeout == nullptr + ? FuzzedInterval(&prng, + constants.timeout_initial_interval, + constants.timeout_regular_fuzz) + : TimeDelta::FromSeconds(*interval_timeout); + *next_update_check = *updater_started_time + time_diff; return EvalStatus::kSucceeded; } + if (interval_timeout != nullptr) { + *next_update_check = + *last_checked_time + TimeDelta::FromSeconds(*interval_timeout); + return EvalStatus::kSucceeded; + } // Check whether the server is enforcing a poll interval; if not, this value // will be zero. const unsigned int* server_dictated_poll_interval = diff --git a/update_manager/official_build_check_policy_impl.cc b/update_manager/official_build_check_policy_impl.cc index 096f7bf5..e80c09f8 100644 --- a/update_manager/official_build_check_policy_impl.cc +++ b/update_manager/official_build_check_policy_impl.cc @@ -27,8 +27,16 @@ EvalStatus OnlyUpdateOfficialBuildsPolicyImpl::UpdateCheckAllowed( const bool* is_official_build_p = ec->GetValue(state->system_provider()->var_is_official_build()); if (is_official_build_p != nullptr && !(*is_official_build_p)) { - LOG(INFO) << "Unofficial build, blocking periodic update checks."; - return EvalStatus::kAskMeAgainLater; + const int64_t* interval_timeout_p = ec->GetValue( + state->updater_provider()->var_test_update_check_interval_timeout()); + // The |interval_timeout | is used for testing only to test periodic + // update checks on unofficial images. + if (interval_timeout_p == nullptr) { + LOG(INFO) << "Unofficial build, blocking periodic update checks."; + return EvalStatus::kAskMeAgainLater; + } + LOG(INFO) << "Unofficial build, but periodic update check interval " + << "timeout is defined, so update is not blocked."; } return EvalStatus::kContinue; } diff --git a/update_manager/real_updater_provider.cc b/update_manager/real_updater_provider.cc index 134db691..268f3bb5 100644 --- a/update_manager/real_updater_provider.cc +++ b/update_manager/real_updater_provider.cc @@ -18,6 +18,7 @@ #include +#include #include #include @@ -439,6 +440,46 @@ class UpdateRestrictionsVariable DISALLOW_COPY_AND_ASSIGN(UpdateRestrictionsVariable); }; +// A variable class for reading timeout interval prefs value. +class TestUpdateCheckIntervalTimeoutVariable : public Variable { + public: + TestUpdateCheckIntervalTimeoutVariable( + const string& name, chromeos_update_engine::PrefsInterface* prefs) + : Variable(name, kVariableModePoll), + prefs_(prefs), + read_count_(0) { + SetMissingOk(); + } + ~TestUpdateCheckIntervalTimeoutVariable() = default; + + private: + const int64_t* GetValue(TimeDelta /* timeout */, + string* /* errmsg */) override { + auto key = chromeos_update_engine::kPrefsTestUpdateCheckIntervalTimeout; + int64_t result; + if (prefs_ && prefs_->Exists(key) && prefs_->GetInt64(key, &result)) { + // This specific value is used for testing only. So it should not be kept + // around and should be deleted after a few reads. + if (++read_count_ > 2) + prefs_->Delete(key); + + // Limit the timeout interval to 10 minutes so it is not abused if it is + // seen on official images. + return new int64_t(std::min(result, static_cast(10 * 60))); + } + return nullptr; + } + + chromeos_update_engine::PrefsInterface* prefs_; + + // Counts how many times this variable is read. This is used to delete the + // underlying file defining the variable after a certain number of reads in + // order to prevent any abuse of this variable. + int read_count_; + + DISALLOW_COPY_AND_ASSIGN(TestUpdateCheckIntervalTimeoutVariable); +}; + // RealUpdaterProvider methods. RealUpdaterProvider::RealUpdaterProvider(SystemState* system_state) @@ -472,6 +513,9 @@ RealUpdaterProvider::RealUpdaterProvider(SystemState* system_state) "server_dictated_poll_interval", system_state_)), var_forced_update_requested_(new ForcedUpdateRequestedVariable( "forced_update_requested", system_state_)), - var_update_restrictions_(new UpdateRestrictionsVariable( - "update_restrictions", system_state_)) {} + var_update_restrictions_( + new UpdateRestrictionsVariable("update_restrictions", system_state_)), + var_test_update_check_interval_timeout_( + new TestUpdateCheckIntervalTimeoutVariable( + "test_update_check_interval_timeout", system_state_->prefs())) {} } // namespace chromeos_update_manager diff --git a/update_manager/real_updater_provider.h b/update_manager/real_updater_provider.h index 1b468956..08193577 100644 --- a/update_manager/real_updater_provider.h +++ b/update_manager/real_updater_provider.h @@ -94,6 +94,10 @@ class RealUpdaterProvider : public UpdaterProvider { return var_update_restrictions_.get(); } + Variable* var_test_update_check_interval_timeout() override { + return var_test_update_check_interval_timeout_.get(); + } + private: // A pointer to the update engine's system state aggregator. chromeos_update_engine::SystemState* system_state_; @@ -114,6 +118,7 @@ class RealUpdaterProvider : public UpdaterProvider { std::unique_ptr> var_server_dictated_poll_interval_; std::unique_ptr> var_forced_update_requested_; std::unique_ptr> var_update_restrictions_; + std::unique_ptr> var_test_update_check_interval_timeout_; DISALLOW_COPY_AND_ASSIGN(RealUpdaterProvider); }; diff --git a/update_manager/real_updater_provider_unittest.cc b/update_manager/real_updater_provider_unittest.cc index fb7a7633..e31f6f31 100644 --- a/update_manager/real_updater_provider_unittest.cc +++ b/update_manager/real_updater_provider_unittest.cc @@ -445,4 +445,30 @@ TEST_F(UmRealUpdaterProviderTest, GetUpdateRestrictionsNone) { UmTestUtils::ExpectVariableHasValue(UpdateRestrictions::kNone, provider_->var_update_restrictions()); } + +TEST_F(UmRealUpdaterProviderTest, TestUpdateCheckIntervalTimeout) { + UmTestUtils::ExpectVariableNotSet( + provider_->var_test_update_check_interval_timeout()); + fake_prefs_.SetInt64( + chromeos_update_engine::kPrefsTestUpdateCheckIntervalTimeout, 1); + UmTestUtils::ExpectVariableHasValue( + static_cast(1), + provider_->var_test_update_check_interval_timeout()); + + // Make sure the value does not exceed a threshold of 10 minutes. + fake_prefs_.SetInt64( + chromeos_update_engine::kPrefsTestUpdateCheckIntervalTimeout, 11 * 60); + UmTestUtils::ExpectVariableHasValue( + static_cast(10 * 60), + provider_->var_test_update_check_interval_timeout()); + UmTestUtils::ExpectVariableHasValue( + static_cast(10 * 60), + provider_->var_test_update_check_interval_timeout()); + + // Just to make sure it is not cached anywhere and deleted. The variable is + // allowd to be read 3 times. + UmTestUtils::ExpectVariableNotSet( + provider_->var_test_update_check_interval_timeout()); +} + } // namespace chromeos_update_manager diff --git a/update_manager/updater_provider.h b/update_manager/updater_provider.h index cb626238..98fd6d14 100644 --- a/update_manager/updater_provider.h +++ b/update_manager/updater_provider.h @@ -115,6 +115,10 @@ class UpdaterProvider : public Provider { // for all updates. virtual Variable* var_update_restrictions() = 0; + // A variable that returns the number of seconds for the first update check to + // happen. + virtual Variable* var_test_update_check_interval_timeout() = 0; + protected: UpdaterProvider() {} diff --git a/update_manager/variable.h b/update_manager/variable.h index 6c7d3506..9ac7dae6 100644 --- a/update_manager/variable.h +++ b/update_manager/variable.h @@ -83,6 +83,10 @@ class BaseVariable { // variable. In other case, it returns 0. base::TimeDelta GetPollInterval() const { return poll_interval_; } + // Returns true, if the value for this variable is expected to be missing + // sometimes so we can avoid printing confusing error logs. + bool IsMissingOk() const { return missing_ok_; } + // Adds and removes observers for value changes on the variable. This only // works for kVariableAsync variables since the other modes don't track value // changes. Adding the same observer twice has no effect. @@ -115,6 +119,8 @@ class BaseVariable { poll_interval_ = poll_interval; } + void SetMissingOk() { missing_ok_ = true; } + // Calls ValueChanged on all the observers. void NotifyValueChanged() { // Fire all the observer methods from the main loop as single call. In order @@ -140,7 +146,8 @@ class BaseVariable { : name_(name), mode_(mode), poll_interval_(mode == kVariableModePoll ? poll_interval - : base::TimeDelta()) {} + : base::TimeDelta()), + missing_ok_(false) {} void OnValueChangedNotification() { // A ValueChanged() method can change the list of observers, for example @@ -174,6 +181,9 @@ class BaseVariable { // The list of value changes observers. std::list observer_list_; + // Defines whether this variable is expected to have no value. + bool missing_ok_; + DISALLOW_COPY_AND_ASSIGN(BaseVariable); }; From 87af6c083dca39250c29c9ad3a0a6627891eccb7 Mon Sep 17 00:00:00 2001 From: Tianjie Date: Tue, 11 Aug 2020 15:06:26 -0700 Subject: [PATCH 370/624] Implement the functions in CowMergeOperations Implement the function to create an object & validate sequence. Bug: 162274240 Test: unit tests pass Change-Id: Id41460a886d94a98e154b222c3401a5f95b9e047 --- Android.bp | 1 + payload_generator/extent_ranges.cc | 9 ++ payload_generator/extent_ranges.h | 3 + payload_generator/merge_sequence_generator.cc | 82 +++++++++++++++- payload_generator/merge_sequence_generator.h | 11 +++ .../merge_sequence_generator_unittest.cc | 97 +++++++++++++++++++ 6 files changed, 202 insertions(+), 1 deletion(-) create mode 100644 payload_generator/merge_sequence_generator_unittest.cc diff --git a/Android.bp b/Android.bp index 751e3554..e5f0dd85 100644 --- a/Android.bp +++ b/Android.bp @@ -696,6 +696,7 @@ cc_test { "payload_generator/fake_filesystem.cc", "payload_generator/full_update_generator_unittest.cc", "payload_generator/mapfile_filesystem_unittest.cc", + "payload_generator/merge_sequence_generator_unittest.cc", "payload_generator/payload_file_unittest.cc", "payload_generator/payload_generation_config_android_unittest.cc", "payload_generator/payload_generation_config_unittest.cc", diff --git a/payload_generator/extent_ranges.cc b/payload_generator/extent_ranges.cc index 4600efe7..2098639b 100644 --- a/payload_generator/extent_ranges.cc +++ b/payload_generator/extent_ranges.cc @@ -202,6 +202,15 @@ void ExtentRanges::SubtractRepeatedExtents( } } +bool ExtentRanges::OverlapsWithExtent(const Extent& extent) const { + for (const auto& entry : extent_set_) { + if (ExtentsOverlap(entry, extent)) { + return true; + } + } + return false; +} + bool ExtentRanges::ContainsBlock(uint64_t block) const { auto lower = extent_set_.lower_bound(ExtentForRange(block, 1)); // The block could be on the extent before the one in |lower|. diff --git a/payload_generator/extent_ranges.h b/payload_generator/extent_ranges.h index 62ffff40..68aa27f8 100644 --- a/payload_generator/extent_ranges.h +++ b/payload_generator/extent_ranges.h @@ -63,6 +63,9 @@ class ExtentRanges { void AddRanges(const ExtentRanges& ranges); void SubtractRanges(const ExtentRanges& ranges); + // Returns true if the input extent overlaps with the current ExtentRanges. + bool OverlapsWithExtent(const Extent& extent) const; + // Returns whether the block |block| is in this ExtentRange. bool ContainsBlock(uint64_t block) const; diff --git a/payload_generator/merge_sequence_generator.cc b/payload_generator/merge_sequence_generator.cc index cf73ba3d..dd801d6a 100644 --- a/payload_generator/merge_sequence_generator.cc +++ b/payload_generator/merge_sequence_generator.cc @@ -16,12 +16,77 @@ #include "update_engine/payload_generator/merge_sequence_generator.h" +#include "update_engine/payload_generator/extent_utils.h" + namespace chromeos_update_engine { +CowMergeOperation CreateCowMergeOperation(const Extent& src_extent, + const Extent& dst_extent) { + CowMergeOperation ret; + ret.set_type(CowMergeOperation::COW_COPY); + *ret.mutable_src_extent() = src_extent; + *ret.mutable_dst_extent() = dst_extent; + return ret; +} + +std::ostream& operator<<(std::ostream& os, + const CowMergeOperation& merge_operation) { + os << "CowMergeOperation src extent: " + << ExtentsToString({merge_operation.src_extent()}) + << ", dst extent: " << ExtentsToString({merge_operation.dst_extent()}); + return os; +} + +// The OTA generation guarantees that all blocks in the dst extent will be +// written only once. So we can use it to order the CowMergeOperation. +bool operator<(const CowMergeOperation& op1, const CowMergeOperation& op2) { + return op1.dst_extent().start_block() < op2.dst_extent().start_block(); +} + +bool operator==(const CowMergeOperation& op1, const CowMergeOperation& op2) { + return op1.type() == op2.type() && op1.src_extent() == op2.src_extent() && + op1.dst_extent() == op2.dst_extent(); +} + std::unique_ptr MergeSequenceGenerator::Create( const std::vector& aops) { + std::vector sequence; + for (const auto& aop : aops) { + // Only handle SOURCE_COPY now for the cow size optimization. + if (aop.op.type() != InstallOperation::SOURCE_COPY) { + continue; + } + if (aop.op.dst_extents().size() != 1) { + std::vector out_extents; + ExtentsToVector(aop.op.dst_extents(), &out_extents); + LOG(ERROR) << "The dst extents for source_copy expects to be contiguous," + << " dst extents: " << ExtentsToString(out_extents); + return nullptr; + } + + // Split the source extents. + size_t used_blocks = 0; + for (const auto& src_extent : aop.op.src_extents()) { + // The dst_extent in the merge sequence will be a subset of + // InstallOperation's dst_extent. This will simplify the OTA -> COW + // conversion when we install the payload. + Extent dst_extent = + ExtentForRange(aop.op.dst_extents(0).start_block() + used_blocks, + src_extent.num_blocks()); + sequence.emplace_back(CreateCowMergeOperation(src_extent, dst_extent)); + used_blocks += src_extent.num_blocks(); + } + + if (used_blocks != aop.op.dst_extents(0).num_blocks()) { + LOG(ERROR) << "Number of blocks in src extents doesn't equal to the" + << " ones in the dst extents, src blocks " << used_blocks + << ", dst blocks " << aop.op.dst_extents(0).num_blocks(); + return nullptr; + } + } + return std::unique_ptr( - new MergeSequenceGenerator({})); + new MergeSequenceGenerator(sequence)); } bool MergeSequenceGenerator::FindDependency( @@ -37,6 +102,21 @@ bool MergeSequenceGenerator::Generate( bool MergeSequenceGenerator::ValidateSequence( const std::vector& sequence) { + LOG(INFO) << "Validating merge sequence"; + ExtentRanges visited; + for (const auto& op : sequence) { + if (visited.OverlapsWithExtent(op.src_extent())) { + LOG(ERROR) << "Transfer violates the merge sequence " << op + << "Visited extent ranges: "; + visited.Dump(); + return false; + } + + CHECK(!visited.OverlapsWithExtent(op.dst_extent())) + << "dst extent should write only once."; + visited.AddExtent(op.dst_extent()); + } + return true; } diff --git a/payload_generator/merge_sequence_generator.h b/payload_generator/merge_sequence_generator.h index bfc04d9f..bc0158ee 100644 --- a/payload_generator/merge_sequence_generator.h +++ b/payload_generator/merge_sequence_generator.h @@ -29,6 +29,16 @@ #include "update_engine/update_metadata.pb.h" namespace chromeos_update_engine { +// Constructs CowMergeOperation from src & dst extents +CowMergeOperation CreateCowMergeOperation(const Extent& src_extent, + const Extent& dst_extent); + +// Comparator for CowMergeOperation. +bool operator<(const CowMergeOperation& op1, const CowMergeOperation& op2); +bool operator==(const CowMergeOperation& op1, const CowMergeOperation& op2); + +std::ostream& operator<<(std::ostream& os, + const CowMergeOperation& merge_operation); // This class takes a list of CowMergeOperations; and sorts them so that no // read after write will happen by following the sequence. When there is a @@ -48,6 +58,7 @@ class MergeSequenceGenerator { bool Generate(std::vector* sequence) const; private: + friend class MergeSequenceGeneratorTest; explicit MergeSequenceGenerator(std::vector transfers) : operations_(std::move(transfers)) {} diff --git a/payload_generator/merge_sequence_generator_unittest.cc b/payload_generator/merge_sequence_generator_unittest.cc new file mode 100644 index 00000000..83cf78ff --- /dev/null +++ b/payload_generator/merge_sequence_generator_unittest.cc @@ -0,0 +1,97 @@ +// +// Copyright (C) 2020 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include +#include + +#include + +#include "update_engine/common/test_utils.h" +#include "update_engine/payload_consumer/payload_constants.h" +#include "update_engine/payload_generator/extent_utils.h" +#include "update_engine/payload_generator/merge_sequence_generator.h" + +using chromeos_update_engine::test_utils::FillWithData; +using std::string; +using std::vector; + +namespace chromeos_update_engine { +class MergeSequenceGeneratorTest : public ::testing::Test { + protected: + void VerifyTransfers(MergeSequenceGenerator* generator, + const std::vector& expected) { + ASSERT_EQ(expected, generator->operations_); + } +}; + +TEST_F(MergeSequenceGeneratorTest, Create) { + std::vector aops{{"file1", {}}, {"file2", {}}}; + aops[0].op.set_type(InstallOperation::SOURCE_COPY); + *aops[0].op.add_src_extents() = ExtentForRange(10, 10); + *aops[0].op.add_dst_extents() = ExtentForRange(30, 10); + + aops[1].op.set_type(InstallOperation::SOURCE_COPY); + *aops[1].op.add_src_extents() = ExtentForRange(20, 10); + *aops[1].op.add_dst_extents() = ExtentForRange(40, 10); + + auto generator = MergeSequenceGenerator::Create(aops); + ASSERT_TRUE(generator); + std::vector expected = { + CreateCowMergeOperation(ExtentForRange(10, 10), ExtentForRange(30, 10)), + CreateCowMergeOperation(ExtentForRange(20, 10), ExtentForRange(40, 10))}; + VerifyTransfers(generator.get(), expected); + + *aops[1].op.add_src_extents() = ExtentForRange(30, 5); + *aops[1].op.add_dst_extents() = ExtentForRange(50, 5); + generator = MergeSequenceGenerator::Create(aops); + ASSERT_FALSE(generator); +} + +TEST_F(MergeSequenceGeneratorTest, Create_SplitSource) { + InstallOperation op; + op.set_type(InstallOperation::SOURCE_COPY); + *(op.add_src_extents()) = ExtentForRange(2, 3); + *(op.add_src_extents()) = ExtentForRange(6, 1); + *(op.add_src_extents()) = ExtentForRange(8, 4); + *(op.add_dst_extents()) = ExtentForRange(10, 8); + + AnnotatedOperation aop{"file1", op}; + auto generator = MergeSequenceGenerator::Create({aop}); + ASSERT_TRUE(generator); + std::vector expected = { + CreateCowMergeOperation(ExtentForRange(2, 3), ExtentForRange(10, 3)), + CreateCowMergeOperation(ExtentForRange(6, 1), ExtentForRange(13, 1)), + CreateCowMergeOperation(ExtentForRange(8, 4), ExtentForRange(14, 4))}; + VerifyTransfers(generator.get(), expected); +} + +TEST_F(MergeSequenceGeneratorTest, ValidateSequence) { + std::vector transfers = { + CreateCowMergeOperation(ExtentForRange(10, 10), ExtentForRange(15, 10)), + CreateCowMergeOperation(ExtentForRange(30, 10), ExtentForRange(40, 10)), + }; + + // Self overlapping + ASSERT_TRUE(MergeSequenceGenerator::ValidateSequence(transfers)); + + transfers = { + CreateCowMergeOperation(ExtentForRange(30, 10), ExtentForRange(20, 10)), + CreateCowMergeOperation(ExtentForRange(15, 10), ExtentForRange(10, 10)), + }; + ASSERT_FALSE(MergeSequenceGenerator::ValidateSequence(transfers)); +} + +} // namespace chromeos_update_engine From b9c8210c59150de6aec43ff42013980e50285901 Mon Sep 17 00:00:00 2001 From: Qijiang Fan Date: Fri, 28 Aug 2020 16:06:33 +0900 Subject: [PATCH 371/624] update_engine: use libmetrics.pc not libmetrics-${libbase_ver}.pc libmetrics is now providing unversioned library. and versioned pkg-config file will be removed. BUG=chromium:920513 TEST=CQ Change-Id: I8562bdc7cdc5ea9a714ac411b9dbcb355155d836 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2380054 Reviewed-by: Amin Hassani Commit-Queue: Qijiang Fan Tested-by: Qijiang Fan --- BUILD.gn | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/BUILD.gn b/BUILD.gn index 5d2e498b..59ad2484 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -254,7 +254,7 @@ static_library("libupdate_engine") { "expat", "libcurl", "libdebugd-client", - "libmetrics-${libbase_ver}", + "libmetrics", "libpower_manager-client", "libsession_manager-client", "libshill-client", From 31ad11922cd966772271c76a6297969b6034952e Mon Sep 17 00:00:00 2001 From: Tianjie Date: Mon, 24 Aug 2020 16:34:35 -0700 Subject: [PATCH 372/624] Implement the topology sort in CreateCowMergeOperation Generate a sequence of COW_COPY operations. The extents in these operations won't be written as raw bytes in snapshots. Instead, they will read from the source partitions. So it's important to make sure no read after write happens on the source partitions, similiar to the inplace update. If the topology sort isn't possible due to cycles, we will omit some blocks to break the cycles. And these blocks will be carried as raw bytes in cow instead. Bug: 162274240 Test: generate a payload, unittests pass Change-Id: If4866704459b919d4bc09be48286b3e321b70497 --- payload_generator/merge_sequence_generator.cc | 146 ++++++++++++++++++ .../merge_sequence_generator_unittest.cc | 111 ++++++++++++- 2 files changed, 251 insertions(+), 6 deletions(-) diff --git a/payload_generator/merge_sequence_generator.cc b/payload_generator/merge_sequence_generator.cc index dd801d6a..eaffeac2 100644 --- a/payload_generator/merge_sequence_generator.cc +++ b/payload_generator/merge_sequence_generator.cc @@ -16,6 +16,8 @@ #include "update_engine/payload_generator/merge_sequence_generator.h" +#include + #include "update_engine/payload_generator/extent_utils.h" namespace chromeos_update_engine { @@ -85,6 +87,7 @@ std::unique_ptr MergeSequenceGenerator::Create( } } + std::sort(sequence.begin(), sequence.end()); return std::unique_ptr( new MergeSequenceGenerator(sequence)); } @@ -92,11 +95,154 @@ std::unique_ptr MergeSequenceGenerator::Create( bool MergeSequenceGenerator::FindDependency( std::map>* result) const { CHECK(result); + LOG(INFO) << "Finding dependencies"; + + // Since the OTA operation may reuse some source blocks, use the binary + // search on sorted dst extents to find overlaps. + std::map> merge_after; + for (const auto& op : operations_) { + // lower bound (inclusive): dst extent's end block >= src extent's start + // block. + const auto lower_it = std::lower_bound( + operations_.begin(), + operations_.end(), + op, + [](const CowMergeOperation& it, const CowMergeOperation& op) { + auto dst_end_block = + it.dst_extent().start_block() + it.dst_extent().num_blocks() - 1; + return dst_end_block < op.src_extent().start_block(); + }); + // upper bound: dst extent's start block > src extent's end block + const auto upper_it = std::upper_bound( + lower_it, + operations_.end(), + op, + [](const CowMergeOperation& op, const CowMergeOperation& it) { + auto src_end_block = + op.src_extent().start_block() + op.src_extent().num_blocks() - 1; + return src_end_block < it.dst_extent().start_block(); + }); + + // TODO(xunchang) skip inserting the empty set to merge_after. + if (lower_it == upper_it) { + merge_after.insert({op, {}}); + } else { + std::set operations(lower_it, upper_it); + auto it = operations.find(op); + if (it != operations.end()) { + LOG(INFO) << "Self overlapping " << op; + operations.erase(it); + } + auto ret = merge_after.emplace(op, std::move(operations)); + // Check the insertion indeed happens. + CHECK(ret.second); + } + } + + *result = std::move(merge_after); return true; } bool MergeSequenceGenerator::Generate( std::vector* sequence) const { + sequence->clear(); + std::map> merge_after; + if (!FindDependency(&merge_after)) { + LOG(ERROR) << "Failed to find dependencies"; + return false; + } + + LOG(INFO) << "Generating sequence"; + + // Use the non-DFS version of the topology sort. So we can control the + // operations to discard to break cycles; thus yielding a deterministic + // sequence. + std::map incoming_edges; + for (const auto& it : merge_after) { + for (const auto& blocked : it.second) { + // Value is default initialized to 0. + incoming_edges[blocked] += 1; + } + } + + std::set free_operations; + for (const auto& op : operations_) { + if (incoming_edges.find(op) == incoming_edges.end()) { + free_operations.insert(op); + } + } + + std::vector merge_sequence; + std::set convert_to_raw; + while (!incoming_edges.empty()) { + if (!free_operations.empty()) { + merge_sequence.insert( + merge_sequence.end(), free_operations.begin(), free_operations.end()); + } else { + auto to_convert = incoming_edges.begin()->first; + free_operations.insert(to_convert); + convert_to_raw.insert(to_convert); + LOG(INFO) << "Converting operation to raw " << to_convert; + } + + std::set next_free_operations; + for (const auto& op : free_operations) { + incoming_edges.erase(op); + + // Now that this particular operation is merged, other operations blocked + // by this one may be free. Decrement the count of blocking operations, + // and set up the free operations for the next iteration. + for (const auto& blocked : merge_after[op]) { + auto it = incoming_edges.find(blocked); + if (it == incoming_edges.end()) { + continue; + } + + auto blocking_transfer_count = &it->second; + if (*blocking_transfer_count <= 0) { + LOG(ERROR) << "Unexpected count in merge after map " + << blocking_transfer_count; + return false; + } + // This operation is no longer blocked by anyone. Add it to the merge + // sequence in the next iteration. + *blocking_transfer_count -= 1; + if (*blocking_transfer_count == 0) { + next_free_operations.insert(blocked); + } + } + } + + LOG(INFO) << "Remaining transfers " << incoming_edges.size() + << ", free transfers " << free_operations.size() + << ", merge_sequence size " << merge_sequence.size(); + free_operations = std::move(next_free_operations); + } + + if (!free_operations.empty()) { + merge_sequence.insert( + merge_sequence.end(), free_operations.begin(), free_operations.end()); + } + + CHECK_EQ(operations_.size(), merge_sequence.size() + convert_to_raw.size()); + + size_t blocks_in_sequence = 0; + for (const CowMergeOperation& transfer : merge_sequence) { + blocks_in_sequence += transfer.dst_extent().num_blocks(); + } + + size_t blocks_in_raw = 0; + for (const CowMergeOperation& transfer : convert_to_raw) { + blocks_in_raw += transfer.dst_extent().num_blocks(); + } + + LOG(INFO) << "Blocks in merge sequence " << blocks_in_sequence + << ", blocks in raw " << blocks_in_raw; + if (!ValidateSequence(merge_sequence)) { + return false; + } + + *sequence = std::move(merge_sequence); return true; } diff --git a/payload_generator/merge_sequence_generator_unittest.cc b/payload_generator/merge_sequence_generator_unittest.cc index 83cf78ff..567ede1e 100644 --- a/payload_generator/merge_sequence_generator_unittest.cc +++ b/payload_generator/merge_sequence_generator_unittest.cc @@ -14,20 +14,15 @@ // limitations under the License. // -#include +#include #include #include -#include "update_engine/common/test_utils.h" #include "update_engine/payload_consumer/payload_constants.h" #include "update_engine/payload_generator/extent_utils.h" #include "update_engine/payload_generator/merge_sequence_generator.h" -using chromeos_update_engine::test_utils::FillWithData; -using std::string; -using std::vector; - namespace chromeos_update_engine { class MergeSequenceGeneratorTest : public ::testing::Test { protected: @@ -35,6 +30,23 @@ class MergeSequenceGeneratorTest : public ::testing::Test { const std::vector& expected) { ASSERT_EQ(expected, generator->operations_); } + + void FindDependency( + std::vector transfers, + std::map>* result) { + std::sort(transfers.begin(), transfers.end()); + MergeSequenceGenerator generator(std::move(transfers)); + ASSERT_TRUE(generator.FindDependency(result)); + } + + void GenerateSequence(std::vector transfers, + const std::vector& expected) { + std::sort(transfers.begin(), transfers.end()); + MergeSequenceGenerator generator(std::move(transfers)); + std::vector sequence; + ASSERT_TRUE(generator.Generate(&sequence)); + ASSERT_EQ(expected, sequence); + } }; TEST_F(MergeSequenceGeneratorTest, Create) { @@ -78,6 +90,47 @@ TEST_F(MergeSequenceGeneratorTest, Create_SplitSource) { VerifyTransfers(generator.get(), expected); } +TEST_F(MergeSequenceGeneratorTest, FindDependency) { + std::vector transfers = { + CreateCowMergeOperation(ExtentForRange(10, 10), ExtentForRange(15, 10)), + CreateCowMergeOperation(ExtentForRange(40, 10), ExtentForRange(50, 10)), + }; + + std::map> merge_after; + FindDependency(transfers, &merge_after); + ASSERT_EQ(std::set(), merge_after.at(transfers[0])); + ASSERT_EQ(std::set(), merge_after.at(transfers[1])); + + transfers = { + CreateCowMergeOperation(ExtentForRange(10, 10), ExtentForRange(25, 10)), + CreateCowMergeOperation(ExtentForRange(24, 5), ExtentForRange(35, 5)), + CreateCowMergeOperation(ExtentForRange(30, 10), ExtentForRange(15, 10)), + }; + + FindDependency(transfers, &merge_after); + ASSERT_EQ(std::set({transfers[2]}), + merge_after.at(transfers[0])); + ASSERT_EQ(std::set({transfers[0], transfers[2]}), + merge_after.at(transfers[1])); + ASSERT_EQ(std::set({transfers[0], transfers[1]}), + merge_after.at(transfers[2])); +} + +TEST_F(MergeSequenceGeneratorTest, FindDependency_ReusedSourceBlocks) { + std::vector transfers = { + CreateCowMergeOperation(ExtentForRange(5, 10), ExtentForRange(15, 10)), + CreateCowMergeOperation(ExtentForRange(6, 5), ExtentForRange(30, 5)), + CreateCowMergeOperation(ExtentForRange(50, 5), ExtentForRange(5, 5)), + }; + + std::map> merge_after; + FindDependency(transfers, &merge_after); + ASSERT_EQ(std::set({transfers[2]}), + merge_after.at(transfers[0])); + ASSERT_EQ(std::set({transfers[2]}), + merge_after.at(transfers[1])); +} + TEST_F(MergeSequenceGeneratorTest, ValidateSequence) { std::vector transfers = { CreateCowMergeOperation(ExtentForRange(10, 10), ExtentForRange(15, 10)), @@ -94,4 +147,50 @@ TEST_F(MergeSequenceGeneratorTest, ValidateSequence) { ASSERT_FALSE(MergeSequenceGenerator::ValidateSequence(transfers)); } +TEST_F(MergeSequenceGeneratorTest, GenerateSequenceNoCycles) { + std::vector transfers = { + CreateCowMergeOperation(ExtentForRange(10, 10), ExtentForRange(15, 10)), + // file3 should merge before file2 + CreateCowMergeOperation(ExtentForRange(40, 5), ExtentForRange(25, 5)), + CreateCowMergeOperation(ExtentForRange(25, 10), ExtentForRange(30, 10)), + }; + + std::vector expected{ + transfers[0], transfers[2], transfers[1]}; + GenerateSequence(transfers, expected); +} + +TEST_F(MergeSequenceGeneratorTest, GenerateSequenceWithCycles) { + std::vector transfers = { + CreateCowMergeOperation(ExtentForRange(25, 10), ExtentForRange(30, 10)), + CreateCowMergeOperation(ExtentForRange(30, 10), ExtentForRange(40, 10)), + CreateCowMergeOperation(ExtentForRange(40, 10), ExtentForRange(25, 10)), + CreateCowMergeOperation(ExtentForRange(10, 10), ExtentForRange(15, 10)), + }; + + // file 1,2,3 form a cycle. And file3, whose dst ext has smallest offset, will + // be converted to raw blocks + std::vector expected{ + transfers[3], transfers[1], transfers[0]}; + GenerateSequence(transfers, expected); +} + +TEST_F(MergeSequenceGeneratorTest, GenerateSequenceMultipleCycles) { + std::vector transfers = { + // cycle 1 + CreateCowMergeOperation(ExtentForRange(10, 10), ExtentForRange(25, 10)), + CreateCowMergeOperation(ExtentForRange(24, 5), ExtentForRange(35, 5)), + CreateCowMergeOperation(ExtentForRange(30, 10), ExtentForRange(15, 10)), + // cycle 2 + CreateCowMergeOperation(ExtentForRange(55, 10), ExtentForRange(60, 10)), + CreateCowMergeOperation(ExtentForRange(60, 10), ExtentForRange(70, 10)), + CreateCowMergeOperation(ExtentForRange(70, 10), ExtentForRange(55, 10)), + }; + + // file 3, 6 will be converted to raw. + std::vector expected{ + transfers[1], transfers[0], transfers[4], transfers[3]}; + GenerateSequence(transfers, expected); +} + } // namespace chromeos_update_engine From d89464430ff22190cfe73cb72eb14592cb8c8aa8 Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Fri, 28 Aug 2020 12:26:35 -0700 Subject: [PATCH 373/624] update_engine: Move to C++17 The AOSP is already on C++17 and CrOS stuck on C++14 prevents them from adding C++17 features and even if they add it, we have to revert them when we merge. Let's enable C++17 on update_engine at least so to make things easier. BUG=b:163153182 TEST=CQ passes Change-Id: I037a56fbea2b7dd8429dc3734c0b3230c87e0e55 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2382415 Tested-by: Amin Hassani Reviewed-by: Kelvin Zhang Reviewed-by: Andrew Lassalle Reviewed-by: Jae Hoon Kim Reviewed-by: Mike Frysinger Commit-Queue: Amin Hassani --- BUILD.gn | 1 + 1 file changed, 1 insertion(+) diff --git a/BUILD.gn b/BUILD.gn index 59ad2484..43bc7878 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -60,6 +60,7 @@ group("all") { pkg_config("target_defaults") { cflags_cc = [ "-fno-strict-aliasing", + "-std=gnu++17", "-Wnon-virtual-dtor", ] cflags = [ From c1c930547968f75be0964f2c48bfe4b2d0e04a92 Mon Sep 17 00:00:00 2001 From: David Anderson Date: Wed, 26 Aug 2020 18:22:09 +0000 Subject: [PATCH 374/624] Revert "Revert "Expose extent_ranges for libsnapshot to use in resolving..."" This reverts commit 5ef2d8ae7bc8ccd03ea8fc19e3e0b9a11f1dfeaf. Reason for revert: re-landing Change-Id: If9d055e67158f003ba4aa05f170b90844948e2b4 --- Android.bp | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/Android.bp b/Android.bp index e5f0dd85..95de8b2b 100644 --- a/Android.bp +++ b/Android.bp @@ -468,6 +468,20 @@ cc_defaults { ], } +cc_library_static { + name: "libpayload_extent_ranges", + defaults: [ + "ue_defaults", + ], + host_supported: true, + srcs: [ + "payload_generator/extent_ranges.cc", + ], + static_libs: [ + "update_metadata-protos", + ], +} + cc_library_static { name: "libpayload_generator", defaults: [ @@ -739,4 +753,5 @@ cc_library_headers { apex_available: [ "com.android.gki.*", ], + host_supported: true, } From 7ad016b3807dd3cfef547b3db7def7cbfb228570 Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Thu, 3 Sep 2020 14:19:13 -0700 Subject: [PATCH 375/624] update_engine: Increase periodic update check read allows Delete the periodic update check interval marker file after 6 attempts at reading it instead of the previous 3 so the auto updater tests can do more interesting things with it. BUG=chromium:953471 TEST=sudo FEATURES=test emerge update_engine Change-Id: I1a67fc5dadfd1ae0fde1308e398e6eb21171df05 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2393098 Tested-by: Amin Hassani Auto-Submit: Amin Hassani Reviewed-by: David Haddock Commit-Queue: David Haddock --- update_manager/real_updater_provider.cc | 2 +- update_manager/real_updater_provider_unittest.cc | 13 ++++++------- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/update_manager/real_updater_provider.cc b/update_manager/real_updater_provider.cc index 268f3bb5..84657180 100644 --- a/update_manager/real_updater_provider.cc +++ b/update_manager/real_updater_provider.cc @@ -460,7 +460,7 @@ class TestUpdateCheckIntervalTimeoutVariable : public Variable { if (prefs_ && prefs_->Exists(key) && prefs_->GetInt64(key, &result)) { // This specific value is used for testing only. So it should not be kept // around and should be deleted after a few reads. - if (++read_count_ > 2) + if (++read_count_ > 5) prefs_->Delete(key); // Limit the timeout interval to 10 minutes so it is not abused if it is diff --git a/update_manager/real_updater_provider_unittest.cc b/update_manager/real_updater_provider_unittest.cc index e31f6f31..f0804c44 100644 --- a/update_manager/real_updater_provider_unittest.cc +++ b/update_manager/real_updater_provider_unittest.cc @@ -458,15 +458,14 @@ TEST_F(UmRealUpdaterProviderTest, TestUpdateCheckIntervalTimeout) { // Make sure the value does not exceed a threshold of 10 minutes. fake_prefs_.SetInt64( chromeos_update_engine::kPrefsTestUpdateCheckIntervalTimeout, 11 * 60); - UmTestUtils::ExpectVariableHasValue( - static_cast(10 * 60), - provider_->var_test_update_check_interval_timeout()); - UmTestUtils::ExpectVariableHasValue( - static_cast(10 * 60), - provider_->var_test_update_check_interval_timeout()); + // The next 5 reads should return valid values. + for (int i = 0; i < 5; ++i) + UmTestUtils::ExpectVariableHasValue( + static_cast(10 * 60), + provider_->var_test_update_check_interval_timeout()); // Just to make sure it is not cached anywhere and deleted. The variable is - // allowd to be read 3 times. + // allowd to be read 6 times. UmTestUtils::ExpectVariableNotSet( provider_->var_test_update_check_interval_timeout()); } From 9612503d166744cfa8c37aac1b41444d517929ff Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Wed, 2 Sep 2020 16:23:54 -0700 Subject: [PATCH 376/624] update_engine: separate update_engine.conf into its own for Chrome OS Let's have separate files that indicates the minor and major versions for Chrome OS and Android. This way each system can individually move the major and minor versions up and down without interfering with each other. BUG=b:163153182 TEST=None Change-Id: Idd3b0f692e88d8592aa8b13c2be65b21ac8efe8e Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2392896 Tested-by: Amin Hassani Reviewed-by: Tianjie Xu Reviewed-by: Jae Hoon Kim Reviewed-by: Andrew Lassalle --- update_engine.conf.chromeos | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 update_engine.conf.chromeos diff --git a/update_engine.conf.chromeos b/update_engine.conf.chromeos new file mode 100644 index 00000000..af213ad9 --- /dev/null +++ b/update_engine.conf.chromeos @@ -0,0 +1,2 @@ +PAYLOAD_MAJOR_VERSION=2 +PAYLOAD_MINOR_VERSION=6 From 87029337e3e244440e460a496a1381b533213481 Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Tue, 1 Sep 2020 17:20:08 -0700 Subject: [PATCH 377/624] HardwareInterface::IsPartitionUpdateValid: fine grained error Let the function emit an error code instead of a boolean to indicate details of the error that is encountered. For every partition, if downgrade is detected, emit kPayloadTimestampError. In this case, still check other partitions for more severe errors before returning this error. In some cases, e.g. DeltaArchiveManifest carries a version field that is not a recognized format, or timestamp sysprops in Android is not an integer, report a more severe error. If only downgrade errors are encountered, AllowDowngrade() can still override the result, and proceed with the update; but, AllowDowngrade cannot override those severe errors. Test: update_engine_unittest Bug: 162623577 Bug: 162553432 Change-Id: Ifc2a6fcd66239c755fb4f6528c3d8c6848afcb27 --- common/fake_hardware.h | 6 +- common/hardware_interface.h | 13 ++- common/utils.cc | 19 +++- common/utils.h | 13 ++- common/utils_unittest.cc | 12 +- hardware_android.cc | 14 ++- hardware_android.h | 3 +- hardware_chromeos.cc | 5 +- hardware_chromeos.h | 6 +- payload_consumer/delta_performer.cc | 106 ++++++++++++------ payload_consumer/delta_performer.h | 18 ++- .../delta_performer_integration_test.cc | 56 +++++++-- 12 files changed, 191 insertions(+), 80 deletions(-) diff --git a/common/fake_hardware.h b/common/fake_hardware.h index 30c08978..30b57189 100644 --- a/common/fake_hardware.h +++ b/common/fake_hardware.h @@ -23,6 +23,7 @@ #include +#include "update_engine/common/error_code.h" #include "update_engine/common/hardware_interface.h" #include "update_engine/common/utils.h" @@ -216,8 +217,9 @@ class FakeHardware : public HardwareInterface { void SetVersion(const std::string& partition_name, std::string timestamp) { partition_timestamps_[partition_name] = std::move(timestamp); } - bool IsPartitionUpdateValid(const std::string& partition_name, - const std::string& new_version) const override { + ErrorCode IsPartitionUpdateValid( + const std::string& partition_name, + const std::string& new_version) const override { const auto old_version = GetVersionForLogging(partition_name); return utils::IsTimestampNewer(old_version, new_version); } diff --git a/common/hardware_interface.h b/common/hardware_interface.h index 0fffbfb7..b37b0074 100644 --- a/common/hardware_interface.h +++ b/common/hardware_interface.h @@ -25,6 +25,8 @@ #include #include +#include "update_engine/common/error_code.h" + namespace chromeos_update_engine { // The hardware interface allows access to the crossystem exposed properties, @@ -153,8 +155,15 @@ class HardwareInterface { // version number of partition `partition_name`. The notion of // "newer" is defined by this function. Caller should not make // any assumption about the underlying logic. - virtual bool IsPartitionUpdateValid(const std::string& partition_name, - const std::string& new_version) const = 0; + // Return: + // - kSuccess if update is valid. + // - kPayloadTimestampError if downgrade is detected + // - kDownloadManifestParseError if |new_version| has an incorrect format + // - Other error values if the source of error is known, or kError for + // a generic error on the device. + virtual ErrorCode IsPartitionUpdateValid( + const std::string& partition_name, + const std::string& new_version) const = 0; }; } // namespace chromeos_update_engine diff --git a/common/utils.cc b/common/utils.cc index bbb155f6..5d76f3f2 100644 --- a/common/utils.cc +++ b/common/utils.cc @@ -990,19 +990,26 @@ static bool ParseTimestamp(const std::string& str, int64_t* out) { return true; } -bool IsTimestampNewer(const std::string& old_version, - const std::string& new_version) { +ErrorCode IsTimestampNewer(const std::string& old_version, + const std::string& new_version) { if (old_version.empty() || new_version.empty()) { LOG(WARNING) << "One of old/new timestamp is empty, permit update anyway. Old: " << old_version << " New: " << new_version; - return true; + return ErrorCode::kSuccess; } int64_t old_ver = 0; - TEST_AND_RETURN_FALSE(ParseTimestamp(old_version, &old_ver)); + if (!ParseTimestamp(old_version, &old_ver)) { + return ErrorCode::kError; + } int64_t new_ver = 0; - TEST_AND_RETURN_FALSE(ParseTimestamp(new_version, &new_ver)); - return old_ver <= new_ver; + if (!ParseTimestamp(new_version, &new_ver)) { + return ErrorCode::kDownloadManifestParseError; + } + if (old_ver > new_ver) { + return ErrorCode::kPayloadTimestampError; + } + return ErrorCode::kSuccess; } } // namespace utils diff --git a/common/utils.h b/common/utils.h index 5dfee3bd..0a1dc0c8 100644 --- a/common/utils.h +++ b/common/utils.h @@ -324,10 +324,15 @@ std::string GetTimeAsString(time_t utime); std::string GetExclusionName(const std::string& str_to_convert); // Parse `old_version` and `new_version` as integer timestamps and -// return true if `new_version` is larger/newer. -// Returns true if either one is empty. Return false if -bool IsTimestampNewer(const std::string& old_version, - const std::string& new_version); +// Return kSuccess if `new_version` is larger/newer. +// Return kSuccess if either one is empty. +// Return kError if |old_version| is not empty and not an integer. +// Return kDownloadManifestParseError if |new_version| is not empty and not an +// integer. +// Return kPayloadTimestampError if both are integers but |new_version| < +// |old_version|. +ErrorCode IsTimestampNewer(const std::string& old_version, + const std::string& new_version); } // namespace utils diff --git a/common/utils_unittest.cc b/common/utils_unittest.cc index 37871d2d..d73b3da7 100644 --- a/common/utils_unittest.cc +++ b/common/utils_unittest.cc @@ -482,11 +482,13 @@ TEST(UtilsTest, GetFilePathTest) { } TEST(UtilsTest, ValidatePerPartitionTimestamp) { - ASSERT_FALSE(utils::IsTimestampNewer("10", "5")); - ASSERT_TRUE(utils::IsTimestampNewer("10", "11")); - ASSERT_FALSE(utils::IsTimestampNewer("10", "lol")); - ASSERT_FALSE(utils::IsTimestampNewer("lol", "ZZZ")); - ASSERT_TRUE(utils::IsTimestampNewer("10", "")); + ASSERT_EQ(ErrorCode::kPayloadTimestampError, + utils::IsTimestampNewer("10", "5")); + ASSERT_EQ(ErrorCode::kSuccess, utils::IsTimestampNewer("10", "11")); + ASSERT_EQ(ErrorCode::kDownloadManifestParseError, + utils::IsTimestampNewer("10", "lol")); + ASSERT_EQ(ErrorCode::kError, utils::IsTimestampNewer("lol", "ZZZ")); + ASSERT_EQ(ErrorCode::kSuccess, utils::IsTimestampNewer("10", "")); } } // namespace chromeos_update_engine diff --git a/hardware_android.cc b/hardware_android.cc index 659e67e8..361b9f18 100644 --- a/hardware_android.cc +++ b/hardware_android.cc @@ -27,6 +27,7 @@ #include #include +#include "update_engine/common/error_code_utils.h" #include "update_engine/common/hardware.h" #include "update_engine/common/platform_constants.h" #include "update_engine/common/utils.h" @@ -233,18 +234,19 @@ std::string HardwareAndroid::GetVersionForLogging( ""); } -bool HardwareAndroid::IsPartitionUpdateValid( +ErrorCode HardwareAndroid::IsPartitionUpdateValid( const std::string& partition_name, const std::string& new_version) const { const auto old_version = GetVersionForLogging(partition_name); // TODO(zhangkelvin) for some partitions, missing a current timestamp should // be an error, e.g. system, vendor, product etc. - auto applicable = utils::IsTimestampNewer(old_version, new_version); - if (!applicable) { - LOG(ERROR) << "Timestamp on partition " << partition_name - << " is newer than update. Partition timestamp: " << old_version + auto error_code = utils::IsTimestampNewer(old_version, new_version); + if (error_code != ErrorCode::kSuccess) { + LOG(ERROR) << "Timestamp check failed with " + << utils::ErrorCodeToString(error_code) + << " Partition timestamp: " << old_version << " Update timestamp: " << new_version; } - return applicable; + return error_code; } } // namespace chromeos_update_engine diff --git a/hardware_android.h b/hardware_android.h index 2e55f97f..d8fbbbe0 100644 --- a/hardware_android.h +++ b/hardware_android.h @@ -23,6 +23,7 @@ #include #include +#include "update_engine/common/error_code.h" #include "update_engine/common/hardware.h" #include "update_engine/common/hardware_interface.h" @@ -61,7 +62,7 @@ class HardwareAndroid : public HardwareInterface { void SetWarmReset(bool warm_reset) override; [[nodiscard]] std::string GetVersionForLogging( const std::string& partition_name) const override; - [[nodiscard]] bool IsPartitionUpdateValid( + [[nodiscard]] ErrorCode IsPartitionUpdateValid( const std::string& partition_name, const std::string& new_version) const override; diff --git a/hardware_chromeos.cc b/hardware_chromeos.cc index 58f30dba..807e086c 100644 --- a/hardware_chromeos.cc +++ b/hardware_chromeos.cc @@ -389,10 +389,11 @@ std::string HardwareChromeOS::GetVersionForLogging( // TODO(zhangkelvin) Implement per-partition timestamp for Chrome OS. return ""; } -bool HardwareChromeOS::IsPartitionUpdateValid( + +ErrorCode HardwareChromeOS::IsPartitionUpdateValid( const std::string& partition_name, const std::string& new_version) const { // TODO(zhangkelvin) Implement per-partition timestamp for Chrome OS. - return true; + return ErrorCode::kSuccess; } } // namespace chromeos_update_engine diff --git a/hardware_chromeos.h b/hardware_chromeos.h index 49fed88d..bbfe2739 100644 --- a/hardware_chromeos.h +++ b/hardware_chromeos.h @@ -25,6 +25,7 @@ #include #include +#include "update_engine/common/error_code.h" #include "update_engine/common/hardware_interface.h" namespace chromeos_update_engine { @@ -65,8 +66,9 @@ class HardwareChromeOS final : public HardwareInterface { void SetWarmReset(bool warm_reset) override; std::string GetVersionForLogging( const std::string& partition_name) const override; - bool IsPartitionUpdateValid(const std::string& partition_name, - const std::string& new_version) const override; + ErrorCode IsPartitionUpdateValid( + const std::string& partition_name, + const std::string& new_version) const override; private: friend class HardwareChromeOSTest; diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc index aa0b4f56..ba96047a 100644 --- a/payload_consumer/delta_performer.cc +++ b/payload_consumer/delta_performer.cc @@ -41,6 +41,8 @@ #include #include "update_engine/common/constants.h" +#include "update_engine/common/error_code.h" +#include "update_engine/common/error_code_utils.h" #include "update_engine/common/hardware_interface.h" #include "update_engine/common/prefs_interface.h" #include "update_engine/common/subprocess.h" @@ -1628,15 +1630,19 @@ ErrorCode DeltaPerformer::ValidateManifest() { LOG(ERROR) << "Manifest contains deprecated fields."; return ErrorCode::kPayloadMismatchedType; } - TimestampCheckResult result = CheckTimestampError(); - if (result == TimestampCheckResult::DOWNGRADE) { - if (!hardware_->AllowDowngrade()) { - return ErrorCode::kPayloadTimestampError; + ErrorCode error_code = CheckTimestampError(); + if (error_code != ErrorCode::kSuccess) { + if (error_code == ErrorCode::kPayloadTimestampError) { + if (!hardware_->AllowDowngrade()) { + return ErrorCode::kPayloadTimestampError; + } + LOG(INFO) << "The current OS build allows downgrade, continuing to apply" + " the payload with an older timestamp."; + } else { + LOG(ERROR) << "Timestamp check returned " + << utils::ErrorCodeToString(error_code); + return error_code; } - LOG(INFO) << "The current OS build allows downgrade, continuing to apply" - " the payload with an older timestamp."; - } else if (result == TimestampCheckResult::FAILURE) { - return ErrorCode::kPayloadTimestampError; } // TODO(crbug.com/37661) we should be adding more and more manifest checks, @@ -1645,51 +1651,87 @@ ErrorCode DeltaPerformer::ValidateManifest() { return ErrorCode::kSuccess; } -TimestampCheckResult DeltaPerformer::CheckTimestampError() const { +ErrorCode DeltaPerformer::CheckTimestampError() const { bool is_partial_update = manifest_.has_partial_update() && manifest_.partial_update(); const auto& partitions = manifest_.partitions(); - auto&& timestamp_valid = [this](const PartitionUpdate& partition) { - return hardware_->IsPartitionUpdateValid(partition.partition_name(), - partition.version()); + + // Check version field for a given PartitionUpdate object. If an error + // is encountered, set |error_code| accordingly. If downgrade is detected, + // |downgrade_detected| is set. Return true if the program should continue to + // check the next partition or not, or false if it should exit early due to + // errors. + auto&& timestamp_valid = [this](const PartitionUpdate& partition, + bool allow_empty_version, + bool* downgrade_detected) -> ErrorCode { + if (!partition.has_version()) { + if (allow_empty_version) { + return ErrorCode::kSuccess; + } + LOG(ERROR) + << "PartitionUpdate " << partition.partition_name() + << " does ot have a version field. Not allowed in partial updates."; + return ErrorCode::kDownloadManifestParseError; + } + + auto error_code = hardware_->IsPartitionUpdateValid( + partition.partition_name(), partition.version()); + switch (error_code) { + case ErrorCode::kSuccess: + break; + case ErrorCode::kPayloadTimestampError: + *downgrade_detected = true; + LOG(WARNING) << "PartitionUpdate " << partition.partition_name() + << " has an older version than partition on device."; + break; + default: + LOG(ERROR) << "IsPartitionUpdateValid(" << partition.partition_name() + << ") returned" << utils::ErrorCodeToString(error_code); + break; + } + return error_code; }; + + bool downgrade_detected = false; + if (is_partial_update) { // for partial updates, all partition MUST have valid timestamps // But max_timestamp can be empty for (const auto& partition : partitions) { - if (!partition.has_version()) { - LOG(ERROR) - << "PartitionUpdate " << partition.partition_name() - << " does ot have a version field. Not allowed in partial updates."; - return TimestampCheckResult::FAILURE; - } - if (!timestamp_valid(partition)) { - // Warning because the system might allow downgrade. - LOG(WARNING) << "PartitionUpdate " << partition.partition_name() - << " has an older version than partition on device."; - return TimestampCheckResult::DOWNGRADE; + auto error_code = timestamp_valid( + partition, false /* allow_empty_version */, &downgrade_detected); + if (error_code != ErrorCode::kSuccess && + error_code != ErrorCode::kPayloadTimestampError) { + return error_code; } } - - return TimestampCheckResult::SUCCESS; + if (downgrade_detected) { + return ErrorCode::kPayloadTimestampError; + } + return ErrorCode::kSuccess; } + + // For non-partial updates, check max_timestamp first. if (manifest_.max_timestamp() < hardware_->GetBuildTimestamp()) { LOG(ERROR) << "The current OS build timestamp (" << hardware_->GetBuildTimestamp() << ") is newer than the maximum timestamp in the manifest (" << manifest_.max_timestamp() << ")"; - return TimestampCheckResult::DOWNGRADE; + return ErrorCode::kPayloadTimestampError; } // Otherwise... partitions can have empty timestamps. for (const auto& partition : partitions) { - if (partition.has_version() && !timestamp_valid(partition)) { - // Warning because the system might allow downgrade. - LOG(WARNING) << "PartitionUpdate " << partition.partition_name() - << " has an older version than partition on device."; - return TimestampCheckResult::DOWNGRADE; + auto error_code = timestamp_valid( + partition, true /* allow_empty_version */, &downgrade_detected); + if (error_code != ErrorCode::kSuccess && + error_code != ErrorCode::kPayloadTimestampError) { + return error_code; } } - return TimestampCheckResult::SUCCESS; + if (downgrade_detected) { + return ErrorCode::kPayloadTimestampError; + } + return ErrorCode::kSuccess; } ErrorCode DeltaPerformer::ValidateOperationHash( diff --git a/payload_consumer/delta_performer.h b/payload_consumer/delta_performer.h index 0718ef60..88076af3 100644 --- a/payload_consumer/delta_performer.h +++ b/payload_consumer/delta_performer.h @@ -48,13 +48,6 @@ class PrefsInterface; // This class performs the actions in a delta update synchronously. The delta // update itself should be passed in in chunks as it is received. - -enum class TimestampCheckResult { - SUCCESS, - FAILURE, - DOWNGRADE, -}; - class DeltaPerformer : public FileWriter { public: // Defines the granularity of progress logging in terms of how many "completed @@ -316,9 +309,14 @@ class DeltaPerformer : public FileWriter { // Also see comment for the static PreparePartitionsForUpdate(). bool PreparePartitionsForUpdate(uint64_t* required_size); - // Check if current manifest contains timestamp errors. (ill-formed or - // downgrade) - TimestampCheckResult CheckTimestampError() const; + // Check if current manifest contains timestamp errors. + // Return: + // - kSuccess if update is valid. + // - kPayloadTimestampError if downgrade is detected + // - kDownloadManifestParseError if |new_version| has an incorrect format + // - Other error values if the source of error is known, or kError for + // a generic error on the device. + ErrorCode CheckTimestampError() const; // Update Engine preference store. PrefsInterface* prefs_; diff --git a/payload_consumer/delta_performer_integration_test.cc b/payload_consumer/delta_performer_integration_test.cc index c257b284..d2e0f6c7 100644 --- a/payload_consumer/delta_performer_integration_test.cc +++ b/payload_consumer/delta_performer_integration_test.cc @@ -1206,28 +1206,68 @@ TEST_F(DeltaPerformerIntegrationTest, } TEST_F(DeltaPerformerIntegrationTest, - ValidatePerPartitionTimestampPartialUpdate) { - // The Manifest we are validating. + ValidatePerPartitionTimestampPartialUpdatePass) { + fake_hardware_.SetVersion("system", "5"); + fake_hardware_.SetVersion("product", "99"); + DeltaArchiveManifest manifest; + manifest.set_minor_version(kPartialUpdateMinorPayloadVersion); + manifest.set_partial_update(true); + AddPartition(&manifest, "product", 100); + RunManifestValidation( + manifest, kMaxSupportedMajorPayloadVersion, ErrorCode::kSuccess); +} +TEST_F(DeltaPerformerIntegrationTest, + ValidatePerPartitionTimestampPartialUpdateDowngrade) { fake_hardware_.SetVersion("system", "5"); fake_hardware_.SetVersion("product", "99"); - fake_hardware_.SetBuildTimestamp(1); + DeltaArchiveManifest manifest; + manifest.set_minor_version(kPartialUpdateMinorPayloadVersion); + manifest.set_partial_update(true); + AddPartition(&manifest, "product", 98); + RunManifestValidation(manifest, + kMaxSupportedMajorPayloadVersion, + ErrorCode::kPayloadTimestampError); +} + +TEST_F(DeltaPerformerIntegrationTest, + ValidatePerPartitionTimestampPartialUpdateMissingVersion) { + fake_hardware_.SetVersion("system", "5"); + fake_hardware_.SetVersion("product", "99"); + + DeltaArchiveManifest manifest; manifest.set_minor_version(kPartialUpdateMinorPayloadVersion); - manifest.set_max_timestamp(2); manifest.set_partial_update(true); - AddPartition(&manifest, "system", 10); { auto& partition = *manifest.add_partitions(); - // For partial updates, missing timestamp should - // trigger an error + // For partial updates, missing timestamp should trigger an error partition.set_partition_name("product"); + // has_version() == false. } + RunManifestValidation(manifest, + kMaxSupportedMajorPayloadVersion, + ErrorCode::kDownloadManifestParseError); +} +TEST_F(DeltaPerformerIntegrationTest, + ValidatePerPartitionTimestampPartialUpdateEmptyVersion) { + fake_hardware_.SetVersion("system", "5"); + fake_hardware_.SetVersion("product", "99"); + + DeltaArchiveManifest manifest; + manifest.set_minor_version(kPartialUpdateMinorPayloadVersion); + manifest.set_partial_update(true); + { + auto& partition = *manifest.add_partitions(); + // For partial updates, missing timestamp should trigger an error + partition.set_partition_name("product"); + partition.set_version("something"); + } RunManifestValidation(manifest, kMaxSupportedMajorPayloadVersion, - ErrorCode::kPayloadTimestampError); + ErrorCode::kDownloadManifestParseError); } } // namespace chromeos_update_engine From fd6640f792a54ca19cf282889d8e5c9780f04300 Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Thu, 27 Aug 2020 19:13:17 -0700 Subject: [PATCH 378/624] Check boot image version before update. Rely on libkver to report boot image version and check updateability correctly before the update. Test: apply GKI update Bug: 162554855 Bug: 162623577 Change-Id: If7668346db5dcb03a1fdd31a738dd5952e30ca1a --- Android.bp | 3 ++ hardware_android.cc | 65 ++++++++++++++++++++++++++++++---- hardware_android.h | 7 ++++ hardware_android_unittest.cc | 67 ++++++++++++++++++++++++++++++++++++ 4 files changed, 136 insertions(+), 6 deletions(-) create mode 100644 hardware_android_unittest.cc diff --git a/Android.bp b/Android.bp index 95de8b2b..f61b2559 100644 --- a/Android.bp +++ b/Android.bp @@ -260,6 +260,7 @@ cc_defaults { ], static_libs: [ + "libkver", "libpayload_consumer", "libupdate_engine_boot_control", ], @@ -389,6 +390,7 @@ cc_binary { "libbrillo-stream", "libbrillo", "libchrome", + "libkver", ], target: { recovery: { @@ -682,6 +684,7 @@ cc_test { "common/utils_unittest.cc", "dynamic_partition_control_android_unittest.cc", "libcurl_http_fetcher_unittest.cc", + "hardware_android_unittest.cc", "payload_consumer/bzip_extent_writer_unittest.cc", "payload_consumer/cached_file_descriptor_unittest.cc", "payload_consumer/certificate_parser_android_unittest.cc", diff --git a/hardware_android.cc b/hardware_android.cc index 361b9f18..48945224 100644 --- a/hardware_android.cc +++ b/hardware_android.cc @@ -17,6 +17,7 @@ #include "update_engine/hardware_android.h" #include +#include #include #include @@ -26,6 +27,8 @@ #include #include #include +#include +#include #include "update_engine/common/error_code_utils.h" #include "update_engine/common/hardware.h" @@ -35,6 +38,8 @@ using android::base::GetBoolProperty; using android::base::GetIntProperty; using android::base::GetProperty; +using android::kver::IsKernelUpdateValid; +using android::kver::KernelRelease; using std::string; namespace chromeos_update_engine { @@ -51,6 +56,11 @@ const char kPropBootHardwareSKU[] = "ro.boot.hardware.sku"; const char kPropBootRevision[] = "ro.boot.revision"; const char kPropBuildDateUTC[] = "ro.build.date.utc"; +string GetPartitionBuildDate(const string& partition_name) { + return android::base::GetProperty("ro." + partition_name + ".build.date.utc", + ""); +} + } // namespace namespace hardware { @@ -228,15 +238,33 @@ void HardwareAndroid::SetWarmReset(bool warm_reset) { } } -std::string HardwareAndroid::GetVersionForLogging( - const std::string& partition_name) const { - return android::base::GetProperty("ro." + partition_name + ".build.date.utc", - ""); +string HardwareAndroid::GetVersionForLogging( + const string& partition_name) const { + if (partition_name == "boot") { + struct utsname buf; + if (uname(&buf) != 0) { + PLOG(ERROR) << "Unable to call uname()"; + return ""; + } + auto kernel_release = + KernelRelease::Parse(buf.release, true /* allow_suffix */); + return kernel_release.has_value() ? kernel_release->string() : ""; + } + return GetPartitionBuildDate(partition_name); } ErrorCode HardwareAndroid::IsPartitionUpdateValid( - const std::string& partition_name, const std::string& new_version) const { - const auto old_version = GetVersionForLogging(partition_name); + const string& partition_name, const string& new_version) const { + if (partition_name == "boot") { + struct utsname buf; + if (uname(&buf) != 0) { + PLOG(ERROR) << "Unable to call uname()"; + return ErrorCode::kError; + } + return IsKernelUpdateValid(buf.release, new_version); + } + + const auto old_version = GetPartitionBuildDate(partition_name); // TODO(zhangkelvin) for some partitions, missing a current timestamp should // be an error, e.g. system, vendor, product etc. auto error_code = utils::IsTimestampNewer(old_version, new_version); @@ -249,4 +277,29 @@ ErrorCode HardwareAndroid::IsPartitionUpdateValid( return error_code; } +ErrorCode HardwareAndroid::IsKernelUpdateValid(const string& old_release, + const string& new_release) { + // Check that the package either contain an empty version (indicating that the + // new build does not use GKI), or a valid GKI kernel release. + std::optional new_kernel_release; + if (new_release.empty()) { + LOG(INFO) << "New build does not contain GKI."; + } else { + new_kernel_release = + KernelRelease::Parse(new_release, true /* allow_suffix */); + if (!new_kernel_release.has_value()) { + LOG(ERROR) << "New kernel release is not valid GKI kernel release: " + << new_release; + return ErrorCode::kDownloadManifestParseError; + } + } + + auto old_kernel_release = + KernelRelease::Parse(old_release, true /* allow_suffix */); + return android::kver::IsKernelUpdateValid(old_kernel_release, + new_kernel_release) + ? ErrorCode::kSuccess + : ErrorCode::kPayloadTimestampError; +} + } // namespace chromeos_update_engine diff --git a/hardware_android.h b/hardware_android.h index d8fbbbe0..b6704477 100644 --- a/hardware_android.h +++ b/hardware_android.h @@ -22,6 +22,7 @@ #include #include +#include #include "update_engine/common/error_code.h" #include "update_engine/common/hardware.h" @@ -67,6 +68,12 @@ class HardwareAndroid : public HardwareInterface { const std::string& new_version) const override; private: + FRIEND_TEST(HardwareAndroidTest, IsKernelUpdateValid); + + // Helper for IsPartitionUpdateValid. + static ErrorCode IsKernelUpdateValid(const std::string& old_release, + const std::string& new_release); + DISALLOW_COPY_AND_ASSIGN(HardwareAndroid); }; diff --git a/hardware_android_unittest.cc b/hardware_android_unittest.cc new file mode 100644 index 00000000..9a491f3c --- /dev/null +++ b/hardware_android_unittest.cc @@ -0,0 +1,67 @@ +// +// Copyright (C) 2020 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include + +#include "update_engine/common/error_code.h" +#include "update_engine/common/test_utils.h" +#include "update_engine/hardware_android.h" + +namespace chromeos_update_engine { + +TEST(HardwareAndroidTest, IsKernelUpdateValid) { + EXPECT_EQ(ErrorCode::kSuccess, + HardwareAndroid::IsKernelUpdateValid("5.4.42-not-gki", "")) + << "Legacy update should be fine"; + + EXPECT_EQ(ErrorCode::kSuccess, + HardwareAndroid::IsKernelUpdateValid("5.4.42-not-gki", + "5.4.42-android12-0")) + << "Update to GKI should be fine"; + + EXPECT_EQ( + ErrorCode::kDownloadManifestParseError, + HardwareAndroid::IsKernelUpdateValid("5.4.42-not-gki", "5.4.42-not-gki")) + << "Should report parse error for invalid version field"; + + EXPECT_EQ(ErrorCode::kSuccess, + HardwareAndroid::IsKernelUpdateValid( + "5.4.42-android12-0-something", "5.4.42-android12-0-something")) + << "Self update should be fine"; + + EXPECT_EQ(ErrorCode::kSuccess, + HardwareAndroid::IsKernelUpdateValid( + "5.4.42-android12-0-something", "5.4.43-android12-0-something")) + << "Sub-level update should be fine"; + + EXPECT_EQ( + ErrorCode::kSuccess, + HardwareAndroid::IsKernelUpdateValid("5.4.42-android12-0-something", + "5.10.10-android12-0-something")) + << "KMI version update should be fine"; + + EXPECT_EQ(ErrorCode::kPayloadTimestampError, + HardwareAndroid::IsKernelUpdateValid("5.4.42-android12-0-something", + "5.4.5-android12-0-something")) + << "Should detect sub-level downgrade"; + + EXPECT_EQ(ErrorCode::kPayloadTimestampError, + HardwareAndroid::IsKernelUpdateValid("5.4.42-android12-0-something", + "5.1.5-android12-0-something")) + << "Should detect KMI version downgrade"; +} + +} // namespace chromeos_update_engine From 0d01bbdfe437acd0f3fec6f78665012a84176db6 Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Tue, 8 Sep 2020 15:58:44 -0700 Subject: [PATCH 379/624] Update test comment. Test: pass Change-Id: Icd0e91f2ec441e11daf609d5d1439c61c9771c69 --- payload_consumer/delta_performer_integration_test.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/payload_consumer/delta_performer_integration_test.cc b/payload_consumer/delta_performer_integration_test.cc index d2e0f6c7..f2aeb03a 100644 --- a/payload_consumer/delta_performer_integration_test.cc +++ b/payload_consumer/delta_performer_integration_test.cc @@ -1261,7 +1261,7 @@ TEST_F(DeltaPerformerIntegrationTest, manifest.set_partial_update(true); { auto& partition = *manifest.add_partitions(); - // For partial updates, missing timestamp should trigger an error + // For partial updates, invalid timestamp should trigger an error partition.set_partition_name("product"); partition.set_version("something"); } From 8b113b21b81099df8c0bb9580729633e7f493863 Mon Sep 17 00:00:00 2001 From: Kelvin Zhang Date: Tue, 8 Sep 2020 15:01:00 -0400 Subject: [PATCH 380/624] Add a Doxyfile for update_engine codebase With a proper Doxyfile file, developer can execute `doxygen` and automatically generate HTML files with documentation of functions, graphs for inheritance hierarchy, function call graphs, etc. Test: doxygen Change-Id: I5679d8f6b6e3e7cc20b0a76ff4abed20f57704ab --- Doxyfile | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 Doxyfile diff --git a/Doxyfile b/Doxyfile new file mode 100644 index 00000000..db31f86e --- /dev/null +++ b/Doxyfile @@ -0,0 +1,9 @@ +CLANG_DATABASE_PATH=../../ +HAVE_DOT=YES +CALL_GRAPH=YES +CALLER_GRAPH=YES +GENERATE_HTML=YES +GENERATE_LATEX=NO +INPUT=. +RECURSIVE=YES + From 59ad2731511cfb1096de52c3f1093b41101e4252 Mon Sep 17 00:00:00 2001 From: Haibo Huang Date: Wed, 9 Sep 2020 15:23:59 -0700 Subject: [PATCH 381/624] Adds connection: close to test server The test http server does not really support connection reuse. After each request, the connection is closed. When a url returns a redirction, curl will try to reuse the previous connection, and fail. This was ok before, because this kind of failure didn't count into the total number of connection failure. But things changed in latest curl. The max redirection is actually capped by 5, the max allowed connection failure. This change adds "Connection: close" header to test server. So that curl will not try to reuse connecton. Test: atest update_engine_unittests Change-Id: I443c28ff3760a1cad80cfd40e354e1d0328f7561 --- test_http_server.cc | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/test_http_server.cc b/test_http_server.cc index 4fc89e53..1c3a2e08 100644 --- a/test_http_server.cc +++ b/test_http_server.cc @@ -189,7 +189,8 @@ ssize_t WriteHeaders(int fd, ret = WriteString(fd, string("HTTP/1.1 ") + Itoa(return_code) + " " + GetHttpResponseDescription(return_code) + - EOL "Content-Type: application/octet-stream" EOL); + EOL "Content-Type: application/octet-stream" EOL + "Connection: close" EOL); if (ret < 0) return -1; written += ret; @@ -406,7 +407,9 @@ void HandleRedirect(int fd, const HttpRequest& request) { if ((ret = WriteString(fd, "HTTP/1.1 " + Itoa(code) + " " + status + EOL)) < 0) return; + WriteString(fd, "Connection: close" EOL); WriteString(fd, "Location: " + url + EOL); + } // Generate a page not found error response with actual text payload. Return From f197cdb18c4c837125f8670bb4ca3ffd3e999ced Mon Sep 17 00:00:00 2001 From: Miriam Polzer Date: Thu, 27 Aug 2020 08:18:29 +0200 Subject: [PATCH 382/624] update_engine: Add ChannelDowngradeBehavior Add the ChannelDowngradeBehaviorPolicy to update_engine. It will be used to decide whether to rollback and powerwash or wait until the target channel catches up on a channel downgrade (e.g. beta to stable). BUG=chromium:1122531 TEST=FEATURES=test emerge-amd64-generic update_engine TEST=manual test on device Change-Id: Iad075e1019084fafec8509c23f2bd55e9755b39e Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2379690 Tested-by: Miriam Polzer Commit-Queue: Amin Hassani Reviewed-by: Amin Hassani --- update_manager/boxed_value.cc | 18 +++++++ update_manager/device_policy_provider.h | 5 ++ update_manager/fake_device_policy_provider.h | 7 +++ update_manager/real_device_policy_provider.cc | 17 ++++++ update_manager/real_device_policy_provider.h | 12 +++++ .../real_device_policy_provider_unittest.cc | 53 +++++++++++++++++++ update_manager/rollback_prefs.h | 13 +++++ 7 files changed, 125 insertions(+) diff --git a/update_manager/boxed_value.cc b/update_manager/boxed_value.cc index 4dff9efa..b499c30d 100644 --- a/update_manager/boxed_value.cc +++ b/update_manager/boxed_value.cc @@ -232,4 +232,22 @@ string BoxedValue::ValuePrinter(const void* value) { return retval; } +template <> +string BoxedValue::ValuePrinter(const void* value) { + const ChannelDowngradeBehavior* val = + reinterpret_cast(value); + switch (*val) { + case ChannelDowngradeBehavior::kUnspecified: + return "Unspecified"; + case ChannelDowngradeBehavior::kWaitForVersionToCatchUp: + return "Wait for the target channel to catch up"; + case ChannelDowngradeBehavior::kRollback: + return "Roll back and powerwash on channel downgrade"; + case ChannelDowngradeBehavior::kAllowUserToConfigure: + return "User decides on channel downgrade behavior"; + } + NOTREACHED(); + return "Unknown"; +} + } // namespace chromeos_update_manager diff --git a/update_manager/device_policy_provider.h b/update_manager/device_policy_provider.h index d63c416e..f177e71e 100644 --- a/update_manager/device_policy_provider.h +++ b/update_manager/device_policy_provider.h @@ -87,6 +87,11 @@ class DevicePolicyProvider : public Provider { virtual Variable* var_disallowed_time_intervals() = 0; + // Variable that determins whether we should powerwash and rollback on channel + // downgrade for enrolled devices. + virtual Variable* + var_channel_downgrade_behavior() = 0; + protected: DevicePolicyProvider() {} diff --git a/update_manager/fake_device_policy_provider.h b/update_manager/fake_device_policy_provider.h index 352e51eb..44a94640 100644 --- a/update_manager/fake_device_policy_provider.h +++ b/update_manager/fake_device_policy_provider.h @@ -95,6 +95,11 @@ class FakeDevicePolicyProvider : public DevicePolicyProvider { return &var_disallowed_time_intervals_; } + FakeVariable* var_channel_downgrade_behavior() + override { + return &var_channel_downgrade_behavior_; + } + private: FakeVariable var_device_policy_is_loaded_{"policy_is_loaded", kVariableModePoll}; @@ -126,6 +131,8 @@ class FakeDevicePolicyProvider : public DevicePolicyProvider { "auto_launched_kiosk_app_id", kVariableModePoll}; FakeVariable var_disallowed_time_intervals_{ "disallowed_time_intervals", kVariableModePoll}; + FakeVariable var_channel_downgrade_behavior_{ + "channel_downgrade_behavior", kVariableModePoll}; DISALLOW_COPY_AND_ASSIGN(FakeDevicePolicyProvider); }; diff --git a/update_manager/real_device_policy_provider.cc b/update_manager/real_device_policy_provider.cc index bd9d415b..51d01d5c 100644 --- a/update_manager/real_device_policy_provider.cc +++ b/update_manager/real_device_policy_provider.cc @@ -209,6 +209,21 @@ bool RealDevicePolicyProvider::ConvertHasOwner(bool* has_owner) const { return true; } +bool RealDevicePolicyProvider::ConvertChannelDowngradeBehavior( + ChannelDowngradeBehavior* channel_downgrade_behavior) const { + int behavior; + if (!policy_provider_->GetDevicePolicy().GetChannelDowngradeBehavior( + &behavior)) { + return false; + } + if (behavior < static_cast(ChannelDowngradeBehavior::kFirstValue) || + behavior > static_cast(ChannelDowngradeBehavior::kLastValue)) { + return false; + } + *channel_downgrade_behavior = static_cast(behavior); + return true; +} + void RealDevicePolicyProvider::RefreshDevicePolicy() { if (!policy_provider_->Reload()) { LOG(INFO) << "No device policies/settings present."; @@ -247,6 +262,8 @@ void RealDevicePolicyProvider::RefreshDevicePolicy() { &DevicePolicy::GetAutoLaunchedKioskAppId); UpdateVariable(&var_disallowed_time_intervals_, &RealDevicePolicyProvider::ConvertDisallowedTimeIntervals); + UpdateVariable(&var_channel_downgrade_behavior_, + &RealDevicePolicyProvider::ConvertChannelDowngradeBehavior); } } // namespace chromeos_update_manager diff --git a/update_manager/real_device_policy_provider.h b/update_manager/real_device_policy_provider.h index e6df18cb..134e1189 100644 --- a/update_manager/real_device_policy_provider.h +++ b/update_manager/real_device_policy_provider.h @@ -113,6 +113,11 @@ class RealDevicePolicyProvider : public DevicePolicyProvider { return &var_disallowed_time_intervals_; } + Variable* var_channel_downgrade_behavior() + override { + return &var_channel_downgrade_behavior_; + } + private: FRIEND_TEST(UmRealDevicePolicyProviderTest, RefreshScheduledTest); FRIEND_TEST(UmRealDevicePolicyProviderTest, NonExistentDevicePolicyReloaded); @@ -174,6 +179,11 @@ class RealDevicePolicyProvider : public DevicePolicyProvider { // devices do not have an owner). bool ConvertHasOwner(bool* has_owner) const; + // Wrapper for |DevicePolicy::GetChannelDowngradeBehavior| that converts the + // result to |ChannelDowngradeBehavior|. + bool ConvertChannelDowngradeBehavior( + ChannelDowngradeBehavior* channel_downgrade_behavior) const; + // Used for fetching information about the device policy. policy::PolicyProvider* policy_provider_; @@ -216,6 +226,8 @@ class RealDevicePolicyProvider : public DevicePolicyProvider { "update_time_restrictions"}; AsyncCopyVariable var_auto_launched_kiosk_app_id_{ "auto_launched_kiosk_app_id"}; + AsyncCopyVariable var_channel_downgrade_behavior_{ + "channel_downgrade_behavior"}; DISALLOW_COPY_AND_ASSIGN(RealDevicePolicyProvider); }; diff --git a/update_manager/real_device_policy_provider_unittest.cc b/update_manager/real_device_policy_provider_unittest.cc index 1384e6f1..fd46d6eb 100644 --- a/update_manager/real_device_policy_provider_unittest.cc +++ b/update_manager/real_device_policy_provider_unittest.cc @@ -195,6 +195,8 @@ TEST_F(UmRealDevicePolicyProviderTest, NonExistentDevicePolicyEmptyVariables) { UmTestUtils::ExpectVariableNotSet( provider_->var_auto_launched_kiosk_app_id()); UmTestUtils::ExpectVariableNotSet(provider_->var_disallowed_time_intervals()); + UmTestUtils::ExpectVariableNotSet( + provider_->var_channel_downgrade_behavior()); } TEST_F(UmRealDevicePolicyProviderTest, ValuesUpdated) { @@ -377,4 +379,55 @@ TEST_F(UmRealDevicePolicyProviderTest, DisallowedIntervalsConverted) { provider_->var_disallowed_time_intervals()); } +TEST_F(UmRealDevicePolicyProviderTest, ChannelDowngradeBehaviorConverted) { + SetUpExistentDevicePolicy(); + EXPECT_CALL(mock_device_policy_, GetChannelDowngradeBehavior(_)) +#if USE_DBUS + .Times(2) +#else + .Times(1) +#endif // USE_DBUS + .WillRepeatedly(DoAll(SetArgPointee<0>(static_cast( + ChannelDowngradeBehavior::kRollback)), + Return(true))); + EXPECT_TRUE(provider_->Init()); + loop_.RunOnce(false); + + UmTestUtils::ExpectVariableHasValue( + ChannelDowngradeBehavior::kRollback, + provider_->var_channel_downgrade_behavior()); +} + +TEST_F(UmRealDevicePolicyProviderTest, ChannelDowngradeBehaviorTooSmall) { + SetUpExistentDevicePolicy(); + EXPECT_CALL(mock_device_policy_, GetChannelDowngradeBehavior(_)) +#if USE_DBUS + .Times(2) +#else + .Times(1) +#endif // USE_DBUS + .WillRepeatedly(DoAll(SetArgPointee<0>(-1), Return(true))); + EXPECT_TRUE(provider_->Init()); + loop_.RunOnce(false); + + UmTestUtils::ExpectVariableNotSet( + provider_->var_channel_downgrade_behavior()); +} + +TEST_F(UmRealDevicePolicyProviderTest, ChannelDowngradeBehaviorTooLarge) { + SetUpExistentDevicePolicy(); + EXPECT_CALL(mock_device_policy_, GetChannelDowngradeBehavior(_)) +#if USE_DBUS + .Times(2) +#else + .Times(1) +#endif // USE_DBUS + .WillRepeatedly(DoAll(SetArgPointee<0>(10), Return(true))); + EXPECT_TRUE(provider_->Init()); + loop_.RunOnce(false); + + UmTestUtils::ExpectVariableNotSet( + provider_->var_channel_downgrade_behavior()); +} + } // namespace chromeos_update_manager diff --git a/update_manager/rollback_prefs.h b/update_manager/rollback_prefs.h index 95677011..6cbc447d 100644 --- a/update_manager/rollback_prefs.h +++ b/update_manager/rollback_prefs.h @@ -35,6 +35,19 @@ enum class RollbackToTargetVersion { kMaxValue = 4 }; +// Whether the device should do rollback and powerwash on channel downgrade. +// Matches chrome_device_policy.proto's +// |AutoUpdateSettingsProto::ChannelDowngradeBehavior|. +enum class ChannelDowngradeBehavior { + kUnspecified = 0, + kWaitForVersionToCatchUp = 1, + kRollback = 2, + kAllowUserToConfigure = 3, + // These values must be kept up to date. + kFirstValue = kUnspecified, + kLastValue = kAllowUserToConfigure +}; + } // namespace chromeos_update_manager #endif // UPDATE_ENGINE_UPDATE_MANAGER_ROLLBACK_PREFS_H_ From a02a1f1dc837f22226499d9856a949fb180d099a Mon Sep 17 00:00:00 2001 From: Miriam Polzer Date: Thu, 27 Aug 2020 08:30:14 +0200 Subject: [PATCH 383/624] update_engine: Add minimum version policy Make the minimum version policy available in the update_engine. This policy is set to a minimum Chrome OS version the device must have to run. No-update windows will not apply if the device's version is below minimum version. If during a Kiosk update Chrome is unreachable and we can not determine the required Chrome OS version, we will fall back to minimum device version and update if the device's version is older. BUG=chromium:1117450, chromium:1084453 TEST=cros_run_unit_test --board ${BOARD} --packages update_engine Change-Id: Ie4fc868805718c9a08a562350bfb015a70a190ac Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2388067 Tested-by: Miriam Polzer Commit-Queue: Miriam Polzer Reviewed-by: Amin Hassani --- update_manager/boxed_value.cc | 9 +++++++++ update_manager/device_policy_provider.h | 5 +++++ update_manager/fake_device_policy_provider.h | 6 ++++++ update_manager/real_device_policy_provider.cc | 2 ++ update_manager/real_device_policy_provider.h | 6 ++++++ .../real_device_policy_provider_unittest.cc | 15 +++++++++++++++ 6 files changed, 43 insertions(+) diff --git a/update_manager/boxed_value.cc b/update_manager/boxed_value.cc index b499c30d..ee7236cf 100644 --- a/update_manager/boxed_value.cc +++ b/update_manager/boxed_value.cc @@ -23,6 +23,7 @@ #include #include +#include #include "update_engine/common/utils.h" #include "update_engine/connection_utils.h" @@ -250,4 +251,12 @@ string BoxedValue::ValuePrinter(const void* value) { return "Unknown"; } +template <> +string BoxedValue::ValuePrinter(const void* value) { + const base::Version* val = reinterpret_cast(value); + if (val->IsValid()) + return val->GetString(); + return "Unknown"; +} + } // namespace chromeos_update_manager diff --git a/update_manager/device_policy_provider.h b/update_manager/device_policy_provider.h index f177e71e..a59f2a39 100644 --- a/update_manager/device_policy_provider.h +++ b/update_manager/device_policy_provider.h @@ -21,6 +21,7 @@ #include #include +#include #include #include "update_engine/update_manager/provider.h" @@ -92,6 +93,10 @@ class DevicePolicyProvider : public Provider { virtual Variable* var_channel_downgrade_behavior() = 0; + // Variable that contains Chrome OS minimum required version. It contains a + // Chrome OS version number. + virtual Variable* var_device_minimum_version() = 0; + protected: DevicePolicyProvider() {} diff --git a/update_manager/fake_device_policy_provider.h b/update_manager/fake_device_policy_provider.h index 44a94640..55d66b3e 100644 --- a/update_manager/fake_device_policy_provider.h +++ b/update_manager/fake_device_policy_provider.h @@ -100,6 +100,10 @@ class FakeDevicePolicyProvider : public DevicePolicyProvider { return &var_channel_downgrade_behavior_; } + FakeVariable* var_device_minimum_version() override { + return &var_device_minimum_version_; + } + private: FakeVariable var_device_policy_is_loaded_{"policy_is_loaded", kVariableModePoll}; @@ -133,6 +137,8 @@ class FakeDevicePolicyProvider : public DevicePolicyProvider { "disallowed_time_intervals", kVariableModePoll}; FakeVariable var_channel_downgrade_behavior_{ "channel_downgrade_behavior", kVariableModePoll}; + FakeVariable var_device_minimum_version_{ + "device_minimum_version", kVariableModePoll}; DISALLOW_COPY_AND_ASSIGN(FakeDevicePolicyProvider); }; diff --git a/update_manager/real_device_policy_provider.cc b/update_manager/real_device_policy_provider.cc index 51d01d5c..0aaf20ee 100644 --- a/update_manager/real_device_policy_provider.cc +++ b/update_manager/real_device_policy_provider.cc @@ -264,6 +264,8 @@ void RealDevicePolicyProvider::RefreshDevicePolicy() { &RealDevicePolicyProvider::ConvertDisallowedTimeIntervals); UpdateVariable(&var_channel_downgrade_behavior_, &RealDevicePolicyProvider::ConvertChannelDowngradeBehavior); + UpdateVariable(&var_device_minimum_version_, + &DevicePolicy::GetHighestDeviceMinimumVersion); } } // namespace chromeos_update_manager diff --git a/update_manager/real_device_policy_provider.h b/update_manager/real_device_policy_provider.h index 134e1189..ebda8fda 100644 --- a/update_manager/real_device_policy_provider.h +++ b/update_manager/real_device_policy_provider.h @@ -118,6 +118,10 @@ class RealDevicePolicyProvider : public DevicePolicyProvider { return &var_channel_downgrade_behavior_; } + Variable* var_device_minimum_version() override { + return &var_device_minimum_version_; + } + private: FRIEND_TEST(UmRealDevicePolicyProviderTest, RefreshScheduledTest); FRIEND_TEST(UmRealDevicePolicyProviderTest, NonExistentDevicePolicyReloaded); @@ -228,6 +232,8 @@ class RealDevicePolicyProvider : public DevicePolicyProvider { "auto_launched_kiosk_app_id"}; AsyncCopyVariable var_channel_downgrade_behavior_{ "channel_downgrade_behavior"}; + AsyncCopyVariable var_device_minimum_version_{ + "device_minimum_version"}; DISALLOW_COPY_AND_ASSIGN(RealDevicePolicyProvider); }; diff --git a/update_manager/real_device_policy_provider_unittest.cc b/update_manager/real_device_policy_provider_unittest.cc index fd46d6eb..4699ad18 100644 --- a/update_manager/real_device_policy_provider_unittest.cc +++ b/update_manager/real_device_policy_provider_unittest.cc @@ -430,4 +430,19 @@ TEST_F(UmRealDevicePolicyProviderTest, ChannelDowngradeBehaviorTooLarge) { provider_->var_channel_downgrade_behavior()); } +TEST_F(UmRealDevicePolicyProviderTest, DeviceMinimumVersionPolicySet) { + SetUpExistentDevicePolicy(); + + base::Version device_minimum_version("13315.60.12"); + + EXPECT_CALL(mock_device_policy_, GetHighestDeviceMinimumVersion(_)) + .WillRepeatedly( + DoAll(SetArgPointee<0>(device_minimum_version), Return(true))); + EXPECT_TRUE(provider_->Init()); + loop_.RunOnce(false); + + UmTestUtils::ExpectVariableHasValue(device_minimum_version, + provider_->var_device_minimum_version()); +} + } // namespace chromeos_update_manager From 18e9f3ca8d7c155da4f66aaf00e131f892fba420 Mon Sep 17 00:00:00 2001 From: Martin Stjernholm Date: Tue, 8 Sep 2020 13:56:59 +0100 Subject: [PATCH 384/624] Use sh_binary for a host shell script. It's more apt than cc_prebuilt_binary, and the latter breaks when it starts to symlink to the source tree, because the script looks for shflags relative to `readlink -f $0`. Test: m dist Bug: 145934348 Change-Id: I8bc80a43046e4553aa399e2ab77d268e11be325b --- Android.bp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Android.bp b/Android.bp index f61b2559..a8b5fc28 100644 --- a/Android.bp +++ b/Android.bp @@ -730,12 +730,12 @@ cc_test { // Brillo update payload generation script // ======================================================== -cc_prebuilt_binary { +sh_binary { name: "brillo_update_payload", device_supported: false, host_supported: true, - srcs: ["scripts/brillo_update_payload"], + src: "scripts/brillo_update_payload", required: [ "delta_generator", "shflags", From f624e1167dcfba1e7ca4ff34e10f12b2bfe6578c Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Tue, 15 Sep 2020 15:30:08 -0700 Subject: [PATCH 385/624] Check for public key before checking for signatures The current code is breaking the Chrome OS because in Chrome OS we can install an unsigned payload into an image that doesn't have the update payload's public key. The current code is checking for the existence of signature first. But there is no point checking for the existence of signature if there is not going to be a public key available to verify it. So the order needs to be switched. This used to be the older behavior. Bug: 163153182 Test: cros flash Change-Id: Ifa7026d2f288acdd4450017ce0d120272021267f --- payload_consumer/delta_performer.cc | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc index ba96047a..d9efc30e 100644 --- a/payload_consumer/delta_performer.cc +++ b/payload_consumer/delta_performer.cc @@ -1818,6 +1818,16 @@ ErrorCode DeltaPerformer::VerifyPayload( return ErrorCode::kPayloadSizeMismatchError; } + auto [payload_verifier, perform_verification] = CreatePayloadVerifier(); + if (!perform_verification) { + LOG(WARNING) << "Not verifying signed delta payload -- missing public key."; + return ErrorCode::kSuccess; + } + if (!payload_verifier) { + LOG(ERROR) << "Failed to create the payload verifier."; + return ErrorCode::kDownloadPayloadPubKeyVerificationError; + } + // Verifies the payload hash. TEST_AND_RETURN_VAL(ErrorCode::kDownloadPayloadVerificationError, !payload_hash_calculator_.raw_hash().empty()); @@ -1831,15 +1841,6 @@ ErrorCode DeltaPerformer::VerifyPayload( TEST_AND_RETURN_VAL(ErrorCode::kDownloadPayloadPubKeyVerificationError, hash_data.size() == kSHA256Size); - auto [payload_verifier, perform_verification] = CreatePayloadVerifier(); - if (!perform_verification) { - LOG(WARNING) << "Not verifying signed delta payload -- missing public key."; - return ErrorCode::kSuccess; - } - if (!payload_verifier) { - LOG(ERROR) << "Failed to create the payload verifier."; - return ErrorCode::kDownloadPayloadPubKeyVerificationError; - } if (!payload_verifier->VerifySignature(signatures_message_data_, hash_data)) { // The autoupdate_CatchBadSignatures test checks for this string // in log-files. Keep in sync. From d0ec651ac4ea7b6c807c3877e9534b44297d646b Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Tue, 15 Sep 2020 15:30:08 -0700 Subject: [PATCH 386/624] Add missing files to BUILD.gn Add remaining files that were missing in previous CLs in Android to Chrome OS's BUILD.gn Bug: 163153182 Test: FEATURES=test emerge-reef update_engine Change-Id: Iadccef0433ff8a0af32c2499a0835064f8b8ad51 --- BUILD.gn | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/BUILD.gn b/BUILD.gn index e438af46..b7de9fc9 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -133,6 +133,7 @@ static_library("libpayload_consumer") { "common/clock.cc", "common/constants.cc", "common/cpu_limiter.cc", + "common/dynamic_partition_control_stub.cc", "common/error_code_utils.cc", "common/hash_calculator.cc", "common/http_common.cc", @@ -147,6 +148,7 @@ static_library("libpayload_consumer") { "common/utils.cc", "payload_consumer/bzip_extent_writer.cc", "payload_consumer/cached_file_descriptor.cc", + "payload_consumer/certificate_parser_stub.cc", "payload_consumer/delta_performer.cc", "payload_consumer/download_action.cc", "payload_consumer/extent_reader.cc", @@ -157,6 +159,7 @@ static_library("libpayload_consumer") { "payload_consumer/filesystem_verifier_action.cc", "payload_consumer/install_plan.cc", "payload_consumer/mount_history.cc", + "payload_consumer/partition_update_generator_stub.cc", "payload_consumer/payload_constants.cc", "payload_consumer/payload_metadata.cc", "payload_consumer/payload_verifier.cc", @@ -200,6 +203,7 @@ static_library("libupdate_engine") { "hardware_chromeos.cc", "image_properties_chromeos.cc", "libcurl_http_fetcher.cc", + "logging.cc", "metrics_reporter_omaha.cc", "metrics_utils.cc", "omaha_request_action.cc", @@ -332,7 +336,7 @@ static_library("libpayload_generator") { "payload_generator/annotated_operation.cc", "payload_generator/blob_file_writer.cc", "payload_generator/block_mapping.cc", - "payload_generator/boot_img_filesystem.cc", + "payload_generator/boot_img_filesystem_stub.cc", "payload_generator/bzip.cc", "payload_generator/deflate_utils.cc", "payload_generator/delta_diff_generator.cc", @@ -342,6 +346,7 @@ static_library("libpayload_generator") { "payload_generator/extent_utils.cc", "payload_generator/full_update_generator.cc", "payload_generator/mapfile_filesystem.cc", + "payload_generator/merge_sequence_generator.cc", "payload_generator/payload_file.cc", "payload_generator/payload_generation_config.cc", "payload_generator/payload_generation_config_chromeos.cc", @@ -493,7 +498,6 @@ if (use.test) { "payload_generator/ab_generator_unittest.cc", "payload_generator/blob_file_writer_unittest.cc", "payload_generator/block_mapping_unittest.cc", - "payload_generator/boot_img_filesystem_unittest.cc", "payload_generator/deflate_utils_unittest.cc", "payload_generator/delta_diff_utils_unittest.cc", "payload_generator/ext2_filesystem_unittest.cc", @@ -501,6 +505,7 @@ if (use.test) { "payload_generator/extent_utils_unittest.cc", "payload_generator/full_update_generator_unittest.cc", "payload_generator/mapfile_filesystem_unittest.cc", + "payload_generator/merge_sequence_generator_unittest.cc", "payload_generator/payload_file_unittest.cc", "payload_generator/payload_generation_config_unittest.cc", "payload_generator/payload_properties_unittest.cc", From 70a90f5cd22b0ccf4b4de4e1da966fa9ddaae3e9 Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Tue, 15 Sep 2020 15:30:09 -0700 Subject: [PATCH 387/624] Add CleanupPreviousUpdate in a few missing places Bug: 163153182 Test: FEATURES=test emerge-reef update_engine Change-Id: I99ac444eda38974838ff5867b9f86bea01d61500 --- update_attempter.cc | 1 + update_manager/boxed_value.cc | 2 ++ update_manager/real_updater_provider.cc | 2 ++ update_manager/updater_provider.h | 1 + 4 files changed, 6 insertions(+) diff --git a/update_attempter.cc b/update_attempter.cc index f37973ef..c4fe348d 100644 --- a/update_attempter.cc +++ b/update_attempter.cc @@ -1376,6 +1376,7 @@ void UpdateAttempter::ActionCompleted(ActionProcessor* processor, case UpdateStatus::REPORTING_ERROR_EVENT: case UpdateStatus::ATTEMPTING_ROLLBACK: case UpdateStatus::DISABLED: + case UpdateStatus::CLEANUP_PREVIOUS_UPDATE: MarkDeltaUpdateFailure(); break; } diff --git a/update_manager/boxed_value.cc b/update_manager/boxed_value.cc index 4dff9efa..b031dfcc 100644 --- a/update_manager/boxed_value.cc +++ b/update_manager/boxed_value.cc @@ -177,6 +177,8 @@ string BoxedValue::ValuePrinter(const void* value) { return "Reporting Error Event"; case Stage::kAttemptingRollback: return "Attempting Rollback"; + case Stage::kCleanupPreviousUpdate: + return "Cleanup Previous Update"; } NOTREACHED(); return "Unknown"; diff --git a/update_manager/real_updater_provider.cc b/update_manager/real_updater_provider.cc index 134db691..1f9af0d8 100644 --- a/update_manager/real_updater_provider.cc +++ b/update_manager/real_updater_provider.cc @@ -169,6 +169,8 @@ const StageVariable::CurrOpStrToStage StageVariable::curr_op_str_to_stage[] = { Stage::kReportingErrorEvent}, {update_engine::kUpdateStatusAttemptingRollback, Stage::kAttemptingRollback}, + {update_engine::kUpdateStatusCleanupPreviousUpdate, + Stage::kCleanupPreviousUpdate}, }; const Stage* StageVariable::GetValue(TimeDelta /* timeout */, string* errmsg) { diff --git a/update_manager/updater_provider.h b/update_manager/updater_provider.h index cb626238..81ffb418 100644 --- a/update_manager/updater_provider.h +++ b/update_manager/updater_provider.h @@ -36,6 +36,7 @@ enum class Stage { kUpdatedNeedReboot, kReportingErrorEvent, kAttemptingRollback, + kCleanupPreviousUpdate, }; enum class UpdateRequestStatus { From 4bd46b3a582dfa52f25bf5066f984c49bee84799 Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Tue, 15 Sep 2020 15:30:09 -0700 Subject: [PATCH 388/624] Fix header includes for brillo/supprocess Chrome OS moved these files into a new directory a while ago. Unfortunately in the last CrOS to AOSP merge, these were reverted instead of being guarded by __CHROMEOS__. This CL does that. Bug: 163153182 Test: FEATURES=test emerge-reef update_engine Change-Id: If56cf95b9cecbb2f9dee74e41636bd90f9ace60b --- common/http_fetcher_unittest.cc | 4 ++++ common/subprocess.cc | 1 - common/subprocess.h | 5 +++++ 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/common/http_fetcher_unittest.cc b/common/http_fetcher_unittest.cc index 589579e3..9338087d 100644 --- a/common/http_fetcher_unittest.cc +++ b/common/http_fetcher_unittest.cc @@ -37,7 +37,11 @@ #include #include #include +#ifdef __CHROMEOS__ +#include +#else #include +#endif // __CHROMEOS__ #include #include #include diff --git a/common/subprocess.cc b/common/subprocess.cc index 3e197fb2..023017b9 100644 --- a/common/subprocess.cc +++ b/common/subprocess.cc @@ -32,7 +32,6 @@ #include #include #include -#include #include #include "update_engine/common/utils.h" diff --git a/common/subprocess.h b/common/subprocess.h index 432d4cb8..179a5c51 100644 --- a/common/subprocess.h +++ b/common/subprocess.h @@ -30,8 +30,13 @@ #include #include #include +#ifdef __CHROMEOS__ +#include +#include +#else #include #include +#endif // __CHROMEOS__ #include // for FRIEND_TEST // The Subprocess class is a singleton. It's used to spawn off a subprocess From 9956320ffa4edb340d20bd7f3c852a9e87437bd3 Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Tue, 15 Sep 2020 15:30:09 -0700 Subject: [PATCH 389/624] Fix remaining styling and compiling issues Bug: 163153182 Test: FEATURES=test emerge-reef update_engine Change-Id: I12d95920946fee1866f721783f16795c1ad4c2d9 --- common/fake_hardware.h | 2 +- common/subprocess_unittest.cc | 1 - libcurl_http_fetcher.cc | 13 ++++++------- payload_consumer/certificate_parser_stub.cc | 2 +- payload_consumer/certificate_parser_stub.h | 2 +- payload_consumer/partition_update_generator_stub.cc | 2 +- payload_consumer/postinstall_runner_action.cc | 1 - 7 files changed, 10 insertions(+), 13 deletions(-) diff --git a/common/fake_hardware.h b/common/fake_hardware.h index 30b57189..82382ffd 100644 --- a/common/fake_hardware.h +++ b/common/fake_hardware.h @@ -202,7 +202,7 @@ class FakeHardware : public HardwareInterface { build_timestamp_ = build_timestamp; } - void SetWarmReset(bool warm_reset) { warm_reset_ = warm_reset; } + void SetWarmReset(bool warm_reset) override { warm_reset_ = warm_reset; } // Getters to verify state. int GetMaxKernelKeyRollforward() const { return kernel_max_rollforward_; } diff --git a/common/subprocess_unittest.cc b/common/subprocess_unittest.cc index 74fee612..b4d068f0 100644 --- a/common/subprocess_unittest.cc +++ b/common/subprocess_unittest.cc @@ -75,7 +75,6 @@ class SubprocessTest : public ::testing::Test { brillo::AsynchronousSignalHandler async_signal_handler_; Subprocess subprocess_; unique_ptr watcher_; - }; namespace { diff --git a/libcurl_http_fetcher.cc b/libcurl_http_fetcher.cc index f8aed7c9..bce09209 100644 --- a/libcurl_http_fetcher.cc +++ b/libcurl_http_fetcher.cc @@ -33,7 +33,6 @@ #include #include - #ifdef __ANDROID__ #include #include @@ -464,12 +463,12 @@ void LibcurlHttpFetcher::CurlPerformOnce() { // possible to watch file descriptors. Just poll it later. This usually // happens if brillo::FakeMessageLoop is used. if (!base::ThreadTaskRunnerHandle::IsSet()) { - MessageLoop::current()->PostDelayedTask( - FROM_HERE, - base::Bind(&LibcurlHttpFetcher::CurlPerformOnce, - base::Unretained(this)), - TimeDelta::FromSeconds(1)); - return; + MessageLoop::current()->PostDelayedTask( + FROM_HERE, + base::Bind(&LibcurlHttpFetcher::CurlPerformOnce, + base::Unretained(this)), + TimeDelta::FromSeconds(1)); + return; } #endif SetupMessageLoopSources(); diff --git a/payload_consumer/certificate_parser_stub.cc b/payload_consumer/certificate_parser_stub.cc index 95fd6e89..a365ab84 100644 --- a/payload_consumer/certificate_parser_stub.cc +++ b/payload_consumer/certificate_parser_stub.cc @@ -14,7 +14,7 @@ // limitations under the License. // -#include +#include "update_engine/payload_consumer/certificate_parser_stub.h" namespace chromeos_update_engine { bool CertificateParserStub::ReadPublicKeysFromCertificates( diff --git a/payload_consumer/certificate_parser_stub.h b/payload_consumer/certificate_parser_stub.h index f4f8825f..a51c2c67 100644 --- a/payload_consumer/certificate_parser_stub.h +++ b/payload_consumer/certificate_parser_stub.h @@ -23,7 +23,7 @@ #include -#include "payload_consumer/certificate_parser_interface.h" +#include "update_engine/payload_consumer/certificate_parser_interface.h" namespace chromeos_update_engine { class CertificateParserStub : public CertificateParserInterface { diff --git a/payload_consumer/partition_update_generator_stub.cc b/payload_consumer/partition_update_generator_stub.cc index e2b64ec5..8f73fbbd 100644 --- a/payload_consumer/partition_update_generator_stub.cc +++ b/payload_consumer/partition_update_generator_stub.cc @@ -30,7 +30,7 @@ bool PartitionUpdateGeneratorStub::GenerateOperationsForPartitionsNotInPayload( namespace partition_update_generator { std::unique_ptr Create( - BootControlInterface* boot_control) { + BootControlInterface* boot_control, size_t block_size)) { return std::make_unique(); } } // namespace partition_update_generator diff --git a/payload_consumer/postinstall_runner_action.cc b/payload_consumer/postinstall_runner_action.cc index 94d03920..e8fa81bc 100644 --- a/payload_consumer/postinstall_runner_action.cc +++ b/payload_consumer/postinstall_runner_action.cc @@ -224,7 +224,6 @@ void PostinstallRunnerAction::PerformPartitionPostinstall() { progress_fd_, base::BindRepeating(&PostinstallRunnerAction::OnProgressFdReady, base::Unretained(this))); - } void PostinstallRunnerAction::OnProgressFdReady() { From 8055010b2c0379c7f5a34c1c96f6b4988d3878a2 Mon Sep 17 00:00:00 2001 From: Jie Jiang Date: Wed, 2 Sep 2020 13:00:36 +0900 Subject: [PATCH 390/624] init: put UE into its own net_cls cgroup This patch puts update_engine into its own net_cls cgroup, and assigns handle 1:1 to this cgroup, so that we can match the packets from update_engine using iptables. BUG=b:167479541 TEST=able to match egress packets from update_engine using: `iptables -t mangle -A OUTPUT -m cgroup --cgroup 0x10001` Cq-Depend: chromium:2388542 Change-Id: Id9c3ced473430a27f9719f0bd3fd727e9b1d0ea2 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2387886 Tested-by: Jie Jiang Commit-Queue: Jie Jiang Reviewed-by: Amin Hassani --- init/update-engine.conf | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/init/update-engine.conf b/init/update-engine.conf index ca54c4a5..36c89d79 100644 --- a/init/update-engine.conf +++ b/init/update-engine.conf @@ -37,7 +37,17 @@ exec ionice -c3 update_engine # Put update_engine process in its own cgroup. # Default cpu.shares is 1024. post-start script - cgroup_dir="/sys/fs/cgroup/cpu/${UPSTART_JOB}" - mkdir -p "${cgroup_dir}" - echo $(status | cut -f 4 -d ' ') > "${cgroup_dir}/tasks" + pid=$(status | cut -f 4 -d ' ') + + cgroup_cpu_dir="/sys/fs/cgroup/cpu/${UPSTART_JOB}" + mkdir -p "${cgroup_cpu_dir}" + echo ${pid} > "${cgroup_cpu_dir}/tasks" + + # Assigns net_cls handle 1:1 to packets generated from update_engine. For + # routing and tagging purposes, that value will be redefined in + # patchpanel/routing_service.h . + cgroup_net_cls_dir="/sys/fs/cgroup/net_cls/${UPSTART_JOB}" + mkdir -p "${cgroup_net_cls_dir}" + echo ${pid} > "${cgroup_net_cls_dir}/tasks" + echo "0x10001" > "${cgroup_net_cls_dir}/net_cls.classid" end script From fbc57355f863b5476ae1739375bb8ce026c8a629 Mon Sep 17 00:00:00 2001 From: Vyshu Date: Wed, 9 Sep 2020 20:50:02 +0000 Subject: [PATCH 391/624] update payload : Add unittest for paycheck.py Use generated sample payloads to verify and apply each payload. BUG=chromium:1028646 TEST=sudo FEATURES=test emerge update_payload TEST=./generate_payloads Cq-Depend: chromium:2401388 Change-Id: I2b817c4b71edf4cc6bd36d9ee021366818a42ebb Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2401389 Tested-by: Vyshu Khota Commit-Queue: Vyshu Khota Reviewed-by: Amin Hassani Reviewed-by: Jae Hoon Kim --- sample_images/generate_payloads.sh | 51 ++++++++ sample_images/sample_payloads.tar.xz | Bin 0 -> 4948 bytes scripts/paycheck_unittest.py | 105 +++++++++++++++++ scripts/run_unittests | 1 + scripts/test_paycheck.sh | 169 --------------------------- 5 files changed, 157 insertions(+), 169 deletions(-) create mode 100755 sample_images/generate_payloads.sh create mode 100644 sample_images/sample_payloads.tar.xz create mode 100755 scripts/paycheck_unittest.py delete mode 100755 scripts/test_paycheck.sh diff --git a/sample_images/generate_payloads.sh b/sample_images/generate_payloads.sh new file mode 100755 index 00000000..ee64229c --- /dev/null +++ b/sample_images/generate_payloads.sh @@ -0,0 +1,51 @@ +#!/bin/bash +# +# Copyright (C) 2020 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This script generates some sample payloads from the images in +# sample_images.tar.bz2. and packages them in the sample_payloads.tar.xz file. +# The payloads are then used in paycheck_unittests.py. The file names +# must match the ones used in update_payload ebuild and paycheck_unittests.py. + +set -e + +TEMP_IMG_DIR=./sample_images +OLD_KERNEL="${TEMP_IMG_DIR}/disk_ext2_4k_empty.img" +OLD_ROOT="${TEMP_IMG_DIR}/disk_sqfs_empty.img" +NEW_KERNEL="${TEMP_IMG_DIR}/disk_ext2_4k.img" +NEW_ROOT="${TEMP_IMG_DIR}/disk_sqfs_default.img" + + +mkdir -p "${TEMP_IMG_DIR}" +tar -xvf sample_images.tar.bz2 -C "${TEMP_IMG_DIR}" + +echo "Generating full payload" +delta_generator --out_file=full_payload.bin \ + --partition_names=kernel:root \ + --new_partitions="${NEW_KERNEL}":"${NEW_ROOT}" + +echo "Generating delta payload" +delta_generator --out_file=delta_payload.bin \ + --partition_names=kernel:root \ + --new_partitions="${NEW_KERNEL}":"${NEW_ROOT}" \ + --old_partitions="${OLD_KERNEL}":"${OLD_ROOT}" --minor_version=6 + +echo "Creating sample_payloads.tar" +tar -cJf sample_payloads.tar.xz {delta,full}_payload.bin + +rm -rf "${TEMP_IMG_DIR}" {delta,full}_payload.bin + +echo "Done" diff --git a/sample_images/sample_payloads.tar.xz b/sample_images/sample_payloads.tar.xz new file mode 100644 index 0000000000000000000000000000000000000000..d0bf6d9a1ff0bd29de2b05828f7dcd39b6589bb9 GIT binary patch literal 4948 zcmV-a6RYg~H+ooF000E$*0e?f03iVu0001VFXf})C;t-^T>vr}N!%_Fwfh~f=rmw5 z_8qFxBEJzTOFFEbsC8Qyzyx9~Qf-V<=3_+_?g$b?%_ls? zx!a)KZs0UFW{9w=SyeF@;YRU;O|XqL5MTK`bBbXahf0z&+rtp)k*j6%4g*i3$yNE2 z^m1^5%!QsJ#4B%spg+L$kx3ZGg+uSzk=3bb?Xn7Xd(IdmfwXR_U6{mvQ>&gi$+TZS zpPWs%eS-6*bEIjW-SJn@Vz0pt$W6T4>ILxsv*MFpC$PIp#GK1G5P=4d`s}1}2iP=% zGp3Iny5hpED+Aepc%0)OwjbM`PG*@JDhkhc3P%|z;T=!52(g<&CyFR<`5Xq{X)C+q z1u>$GYkltCWd&9#qw_WpA<;E&MD@vRE8^Epm8f`Z3@cp1H~=<@N8PQn0;)~xWQcZ_ zjV2IhGUH}wvEb)Uj}fa95uDGtgA$k^9%4(~kGgw)CE~ac5%~Xi5@>G3vnj2*$KprG z<#;c09yrN;E0?>qShF6CCETYfi_PFqqrZL*&Zel98q%o)a=jxR=w&&B@F&N`g)+(a z^vS;A3`lLue1DbwS>I+cv+sC)>s;#EKgKFKdKb31Zp^(OxSj!m2s~28m0G8Ce5GGa z19Qn>#NJm{m(N=j5d+o5*fH{+;)beI15@`O1K|Zjf8gV~R)(Ek9k%><_TU{5%N;u* z(Au(3v9qXiVS^PS-- z3aesA3B5OyO!6=`_yZ!Red55~M>$srE)LI9xf*Hldr@v@_BKxsU?Qc*7JyK8Jgy?C24+B>GNMYc~{nm8wfHwM($HVU<=OAthUI2jC-6Q4wXU`b+3duH3kHBbq*xAZFoMQe~0o9N#N0R3)H>jDQ zn!zEaZ+ZZGd-vj3f!OQLDU7)Z)s~?T=4H?tisye~PWD6g*H%)*s@KnE*2ZT~!`3_9 z)A^V0N>#XO09gG>i{reIf8%rL0IrQoPvUhG5G_8d5yF0x@-?~u50PO$+LO>K!VvAj z6vEGvU{3G*b3#x>+M}3edYQ3awhwCS1fZEMm zp;^}c>3>F6E_+nZYbuk_u#c_xO5h|CYvvgG_5(|Kb_>R^jNK^&DG`GJoBnOVWF=Vq z1O-VaCl2daRHkvk1TgoLw@3R%q74zVACDla_x zR(M{bwhpO5)*hO1lEwjjgY0(PcM=?Fe! z^D5xCJ2+p%ICFf)4Hqdxs(D~n7{D8I8`xnZNdhLy#fCva>@xd<)#LEb$~?cQ8(c-_ zOY_^*+e>iR<$VFC1Ah0R&Tt}6#4GJwV15qhzS<5@yBh?QFu>c$^Og6t1Y^ihKw-J3cpcv!?2UergtwK3|gLjvx@t$pi2+q}N%>))g&dZ_h*JOU(-FSM-uTUP16Ju;k0 z_!1KDCdE}6zPFECI2H0wILUK*9tq@kHqWtL6c?y7lva&C&hVyQX}P~_ z!S8tGw?h6;mw8^)$&sZ;l+9O&*w((4Mfp=2_ObW|6l{$4sxm;blH5n$0GqHWZ6@{3 z`v_3U6n|rINWL|ng>EPes?A7wTE3#Ycjo_nh87ClS|`W|a<=!Y{@%l3`Ts)+tL1r5 z^-w`tp|faU-C7PI94LNl4WJ-g3^#o(X-BS29 zN5IvIR>P`;n~DG1mI2UGgm>q!9Q@{(r%|Ztrh6ifp=72UPIV?nD`~@lMUsVX*F|N6 zG{9|h6UCI$3J@-%Z2G@Y6#W;=XC?y(Q-8Kn1*Zd1h0v+4;{K$33kr6(f(!?fvR6O^ zNW;(yT<6oE3Tb6*^yyp2b-^eztJUDPY&LL29S3CJmYAR7S@L|RE^dzI1RfhnO-C)Q zy&NHb{xcUKxLWvH?io10si33deF=#bZV1B5m=}B3?HUban5|piSrcl=(Qb>PI1Z$OD6YzM{!C20O z!wR4<$JM7l4RxfqM~B>alUSz>_!8}-&~s(asnn=uZ{AO~#t>(81a|oIO|)w1kQw)4 zqk!nI&D&N!F1n?lw*-j?Tl;~9RFoZgcL0~d%vWfyO%IJ@6Xta- zLs#t%P4MAMU^xvyqO-TG+DSm^r>+aMua?`jvRaMBAx)u?#5A1N(-7CgdJ z^bTQwU^xQWkO!W(Qqm!KSWpkCd+f&fE*1FK3*V46o&+-e%p*@EkZM!|_RnYj#YOaC zSwQsfu_~uh5c@pE774{6B>X0Az95YMHbk8EFbz#`(t+Cb^}Ka*hi*?1^JR*6NC>zZ zn}?QY(n~ti$lh~Z2}844*Pg1L!#=s7ZendAtUEE4hPbk@zU~v@gc0tytO5Kdo$p!E zJ zz*P-%4t5pM{e>j)+A<(|s*zeEiI3<1xq_>nOxJ)cW7B1Wkm8OZv}bSq>10YxAf68E z6xDzW7e0Ii2M8tRD~yJtcDcnbLto-&GJLElb21=O8YWrn961q!S~hmqYdU0(EgASJ zfAVmGl72b9M!R9GseI$3M$fv^G^f$1TOU~mA&U3 zNj9@!fiNtS@+NNznmd8rgcU@DU+8z!x|G?}`WPe>-XTxySwvE&DSJ+_n*w`?;CjQ9 z&~KL~fQg6ZU7tcw?>+F;mQ>pVnZK&(^g}cIbpV)xJJO!z;=xQ|H;K_~^!+*O(E~ zyb(^rQOG&9wMHkg*-mUDLcs>@>&}BemtpA)RD@q02N`P7w+ao7 z6`N7z2)h=R|fgl5~{bB{Ih9=_;S2o+@rxV&R6Gs8%yc2MzvS*ZDy1c@lVpMweAa)xs3~7A5OToufb2 zSko~RC69Qx!TW==mm^J(L)l3JTVOV_2RQs(&9u4?I?fTqyJwjPR#8K}8l!vYF0GQD z$lNkJ(Sot3F6y*c1!pzlJ62Lfp}NRh0?5>55TD?QNR!#8w0rmSDB_co z>V$#sb=SrfFsA`*z%o{#pn>iK{(?OFEV)aix0dgPF0Pvaw5EX9VDq9ny3in4`mb#jriAua^)Hg5RiH~rk^oB3%2DPLkvOI{x^8{ zoX+lblwQ#n9SDn*Y_^1xC=h^vyE}nOOI2Mu_bZ6XS0qJX1B=>~<_o||>;@`KZ)J60$GS0c!9_1Qa|B8O8;3rCz2}xf4KBIU9xGv_w z3R+QU{p6=#TVSfSS3lA`QxaYA5oB#16-taT^pQZ4#B1{&(Oz>~Pd)YULQ6xu3xNUd z_bA#?k^GVGV|&C(mdIx| zr#u(}tfy=4MF@r{48oSmR^R25!Z2-c-e7?UeI-XJXNrs8)PJ<68&Kkq*MV30GMo90 ztDI8aprcF2usj6Wm@Ojl0Z(%KZyljP#ZZnPlB-B~x^R|URjlnth{T24vg4p8`Z#J* z+1I#-j|ynce|F|F-Ev+*dj#DYE4eiYBMK5r^wG9w`1D5%(ZIelVbE6$)uF zQR%?8{>M=kt{@Zh0hU?Ynv!zKbad%XiJU9^mL*@w?Tf(oh6*{HUBUT6!z<6d@Pddy z{@qSB;b`75WceiL-ns@CH_85p*V-*aZ;LJKTj^(WAWA&#SrdcgO#wzQMplF(d*=a+ zb1OR;P4>q|Lcxm+b)wyPOB&rM=g}fas2ZMH<88vxN`&J$pPDtDG< z-?%0b?6>K&IbGvm#6R=Bq?8s!Wnqv{N5#A~9Z|lTyA(2e+1OH#>$nv6d(t`$y`<4i z|8ymBkoAI~eKq8NX9v&ZvjOl3Ddj5c>5uLkcqp=Ch6>DG_4QkKi4qd~k#_ zNGRx-r)skhBL#k`TQSHx_|drDdrodX8$cv>KOV7SyZ``jN>FqYZ)({90k9^3Pyhgj Sh5Ht<#Ao{g000001X)_32%k*= literal 0 HcmV?d00001 diff --git a/scripts/paycheck_unittest.py b/scripts/paycheck_unittest.py new file mode 100755 index 00000000..e54a3c03 --- /dev/null +++ b/scripts/paycheck_unittest.py @@ -0,0 +1,105 @@ +#!/usr/bin/env python +# +# Copyright (C) 2020 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""Unit testing paycheck.py.""" + +# This test requires new (Y) and old (X) images, as well as a full payload +# from image Y and a delta payload from Y to X for each partition. +# Payloads are from sample_images/generate_payloads. +# +# The test performs the following: +# +# - It statically applies the full and delta payloads. +# +# - It applies full_payload to yield a new kernel (kern.part) and rootfs +# (root.part) and compares them to the new image partitions. +# +# - It applies delta_payload to the old image to yield a new kernel and rootfs +# and compares them to the new image partitions. +# +# Previously test_paycheck.sh. Run with update_payload ebuild. + +# Disable check for function names to avoid errors based on old code +# pylint: disable-msg=invalid-name + +import filecmp +import os +import subprocess +import unittest + + +class PaycheckTest(unittest.TestCase): + """Test paycheck functions.""" + + def setUp(self): + self.tmpdir = os.getenv('T') + + self._full_payload = os.path.join(self.tmpdir, 'full_payload.bin') + self._delta_payload = os.path.join(self.tmpdir, 'delta_payload.bin') + + self._new_kernel = os.path.join(self.tmpdir, 'disk_ext2_4k.img') + self._new_root = os.path.join(self.tmpdir, 'disk_sqfs_default.img') + self._old_kernel = os.path.join(self.tmpdir, + 'disk_ext2_4k_empty.img') + self._old_root = os.path.join(self.tmpdir, 'disk_sqfs_empty.img') + + # Temp output files. + self._kernel_part = os.path.join(self.tmpdir, 'kern.part') + self._root_part = os.path.join(self.tmpdir, 'root.part') + + def checkPayload(self, type_arg, payload): + """Checks Payload.""" + self.assertEqual(0, subprocess.check_call(['./paycheck.py', '-t', + type_arg, payload])) + + def testFullPayload(self): + """Checks the full payload statically.""" + self.checkPayload('full', self._full_payload) + + def testDeltaPayload(self): + """Checks the delta payload statically.""" + self.checkPayload('delta', self._delta_payload) + + def testApplyFullPayload(self): + """Applies full payloads and compares results to new sample images.""" + self.assertEqual(0, subprocess.check_call(['./paycheck.py', + self._full_payload, + '--part_names', 'kernel', 'root', + '--out_dst_part_paths', + self._kernel_part, + self._root_part])) + + # Check if generated full image is equal to sample image. + self.assertTrue(filecmp.cmp(self._kernel_part, self._new_kernel)) + self.assertTrue(filecmp.cmp(self._root_part, self._new_root)) + + def testApplyDeltaPayload(self): + """Applies delta to old image and checks against new sample images.""" + self.assertEqual(0, subprocess.check_call(['./paycheck.py', + self._delta_payload, + '--part_names', 'kernel', 'root', + '--src_part_paths', + self._old_kernel, self._old_root, + '--out_dst_part_paths', + self._kernel_part, + self._root_part])) + + self.assertTrue(filecmp.cmp(self._kernel_part, self._new_kernel)) + self.assertTrue(filecmp.cmp(self._root_part, self._new_root)) + +if __name__ == '__main__': + unittest.main() diff --git a/scripts/run_unittests b/scripts/run_unittests index 0d301bad..db5ed73d 100755 --- a/scripts/run_unittests +++ b/scripts/run_unittests @@ -26,5 +26,6 @@ for unittest_script in update_payload/*_unittest.py; do done ./payload_info_unittest.py +./paycheck_unittest.py exit 0 diff --git a/scripts/test_paycheck.sh b/scripts/test_paycheck.sh deleted file mode 100755 index 239b9845..00000000 --- a/scripts/test_paycheck.sh +++ /dev/null @@ -1,169 +0,0 @@ -#!/bin/bash -# -# Copyright (C) 2013 The Android Open Source Project -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# A test script for paycheck.py and the update_payload.py library. -# -# This script requires three payload files, along with a metadata signature for -# each, and a public key for verifying signatures. Payload include: -# -# - A full payload for release X (old_full_payload) -# -# - A full payload for release Y (new_full_payload), where Y > X -# -# - A delta payload from X to Y (delta_payload) -# -# The test performs the following: -# -# - It verifies each payload against its metadata signature, also asserting the -# payload type. Another artifact is a human-readable payload report, which -# is output to stdout to be inspected by the user. -# -# - It applies old_full_payload to yield old kernel (old_kern.part) and rootfs -# (old_root.part) partitions. -# -# - It applies delta_payload to old_{kern,root}.part to yield new kernel -# (new_delta_kern.part) and rootfs (new_delta_root.part) partitions. -# -# - It applies new_full_payload to yield reference new kernel -# (new_full_kern.part) and rootfs (new_full_root.part) partitions. -# -# - It compares new_{delta,full}_kern.part and new_{delta,full}_root.part to -# ensure that they are binary identical. -# -# If all steps have completed successfully we know with high certainty that -# paycheck.py (and hence update_payload.py) correctly parses both full and delta -# payloads, and applies them to yield the expected result. Finally, each -# paycheck.py execution is timed. - - -# Stop on errors, unset variables. -set -e -set -u - -# Temporary image files. -OLD_KERN_PART=old_kern.part -OLD_ROOT_PART=old_root.part -NEW_DELTA_KERN_PART=new_delta_kern.part -NEW_DELTA_ROOT_PART=new_delta_root.part -NEW_FULL_KERN_PART=new_full_kern.part -NEW_FULL_ROOT_PART=new_full_root.part -CROS_PARTS="kernel root" - - -log() { - echo "$@" >&2 -} - -die() { - log "$@" - exit 1 -} - -usage_and_exit() { - cat >&2 < Date: Thu, 17 Sep 2020 07:09:05 -0700 Subject: [PATCH 392/624] Remove syntax error Test: TreeHugger Change-Id: I17e14c42a58748a4d4d40ce3b3d82b714f793f7d --- payload_consumer/partition_update_generator_stub.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/payload_consumer/partition_update_generator_stub.cc b/payload_consumer/partition_update_generator_stub.cc index 8f73fbbd..cfbd5e14 100644 --- a/payload_consumer/partition_update_generator_stub.cc +++ b/payload_consumer/partition_update_generator_stub.cc @@ -30,7 +30,7 @@ bool PartitionUpdateGeneratorStub::GenerateOperationsForPartitionsNotInPayload( namespace partition_update_generator { std::unique_ptr Create( - BootControlInterface* boot_control, size_t block_size)) { + BootControlInterface* boot_control, size_t block_size) { return std::make_unique(); } } // namespace partition_update_generator From 5e6dfe3933a7ea6e934adfa0319d52a8919a29e8 Mon Sep 17 00:00:00 2001 From: David Anderson Date: Thu, 17 Sep 2020 15:50:33 -0700 Subject: [PATCH 393/624] Allow including file_descriptor.h in fs_mgr. Note that file_descriptor.h includes base/logging.h, which conflicts with android-base/logging.h. Since it doesn't actually need this header, change it to base/macros.h which is safe. Bug: 168554689 Test: builds Change-Id: I1e903cec857c50f988b537d87c1f254338da686c --- Android.bp | 9 +++++++++ payload_consumer/fec_file_descriptor.cc | 2 ++ payload_consumer/file_descriptor.h | 2 +- 3 files changed, 12 insertions(+), 1 deletion(-) diff --git a/Android.bp b/Android.bp index a8b5fc28..6585ea2e 100644 --- a/Android.bp +++ b/Android.bp @@ -755,6 +755,15 @@ cc_library_headers { export_include_dirs: ["."], apex_available: [ "com.android.gki.*", + "//apex_available:platform", ], host_supported: true, + recovery_available: true, + ramdisk_available: true, + + target: { + darwin: { + enabled: false, + }, + } } diff --git a/payload_consumer/fec_file_descriptor.cc b/payload_consumer/fec_file_descriptor.cc index de22cf3f..3fee1963 100644 --- a/payload_consumer/fec_file_descriptor.cc +++ b/payload_consumer/fec_file_descriptor.cc @@ -16,6 +16,8 @@ #include "update_engine/payload_consumer/fec_file_descriptor.h" +#include + namespace chromeos_update_engine { bool FecFileDescriptor::Open(const char* path, int flags) { diff --git a/payload_consumer/file_descriptor.h b/payload_consumer/file_descriptor.h index 55f76c66..fb07ff0a 100644 --- a/payload_consumer/file_descriptor.h +++ b/payload_consumer/file_descriptor.h @@ -21,7 +21,7 @@ #include #include -#include +#include // Abstraction for managing opening, reading, writing and closing of file // descriptors. This includes an abstract class and one standard implementation From 126d13ee33880555bc55dd3b49fa6af53b3b0ed4 Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Mon, 21 Sep 2020 19:50:06 -0700 Subject: [PATCH 394/624] Only prevent boot image downgrade if sysprop set. Only return timestamp error if ro.build.ab_update.gki.prevent_downgrade_version is set. Otherwise, log warning message and continue the update. This allows devices that does not support GKI updates to install OTA packages where the boot image has a lower version than the existing, which is supported before. Bug: 162623577 Test: apply OTA on devices with and without sysprop set Change-Id: Ie98fb49ffaae1aa60fc94766f53a6fbbae519a5b --- Android.bp | 2 ++ hardware_android.cc | 24 ++++++++++++++----- hardware_android.h | 10 ++++++-- hardware_android_unittest.cc | 46 ++++++++++++++++++++++++------------ 4 files changed, 59 insertions(+), 23 deletions(-) diff --git a/Android.bp b/Android.bp index 6585ea2e..193928bc 100644 --- a/Android.bp +++ b/Android.bp @@ -260,6 +260,7 @@ cc_defaults { ], static_libs: [ + "gkiprops", "libkver", "libpayload_consumer", "libupdate_engine_boot_control", @@ -383,6 +384,7 @@ cc_binary { // We add the static versions of the shared libraries that are not installed to // recovery image due to size concerns. Need to include all the static library // dependencies of these static libraries. + "gkiprops", "libevent", "libmodpb64", "libgtest_prod", diff --git a/hardware_android.cc b/hardware_android.cc index 48945224..fc6e1dc3 100644 --- a/hardware_android.cc +++ b/hardware_android.cc @@ -23,6 +23,7 @@ #include #include +#include #include #include #include @@ -261,7 +262,10 @@ ErrorCode HardwareAndroid::IsPartitionUpdateValid( PLOG(ERROR) << "Unable to call uname()"; return ErrorCode::kError; } - return IsKernelUpdateValid(buf.release, new_version); + bool prevent_downgrade = + android::sysprop::GkiProperties::prevent_downgrade_version().value_or( + false); + return IsKernelUpdateValid(buf.release, new_version, prevent_downgrade); } const auto old_version = GetPartitionBuildDate(partition_name); @@ -278,7 +282,8 @@ ErrorCode HardwareAndroid::IsPartitionUpdateValid( } ErrorCode HardwareAndroid::IsKernelUpdateValid(const string& old_release, - const string& new_release) { + const string& new_release, + bool prevent_downgrade) { // Check that the package either contain an empty version (indicating that the // new build does not use GKI), or a valid GKI kernel release. std::optional new_kernel_release; @@ -296,10 +301,17 @@ ErrorCode HardwareAndroid::IsKernelUpdateValid(const string& old_release, auto old_kernel_release = KernelRelease::Parse(old_release, true /* allow_suffix */); - return android::kver::IsKernelUpdateValid(old_kernel_release, - new_kernel_release) - ? ErrorCode::kSuccess - : ErrorCode::kPayloadTimestampError; + bool is_update_valid = android::kver::IsKernelUpdateValid(old_kernel_release, + new_kernel_release); + + if (!is_update_valid) { + if (prevent_downgrade) { + return ErrorCode::kPayloadTimestampError; + } + LOG(WARNING) << "Boot version downgrade detected, allowing update because " + << "prevent_downgrade_version sysprop is not set."; + } + return ErrorCode::kSuccess; } } // namespace chromeos_update_engine diff --git a/hardware_android.h b/hardware_android.h index b6704477..552cb534 100644 --- a/hardware_android.h +++ b/hardware_android.h @@ -70,9 +70,15 @@ class HardwareAndroid : public HardwareInterface { private: FRIEND_TEST(HardwareAndroidTest, IsKernelUpdateValid); - // Helper for IsPartitionUpdateValid. + // Helper for IsPartitionUpdateValid. Check an update from |old_release| + // to |new_release| is valid or not. + // - If |new_release| is invalid, return kDownloadManifestParseError + // - If downgrade detected, kPayloadTimestampError if |prevent_downgrade| is + // set to true, or kSuccess if |prevent_downgrade| is set to false + // - If update is valid, kSuccess. static ErrorCode IsKernelUpdateValid(const std::string& old_release, - const std::string& new_release); + const std::string& new_release, + bool prevent_downgrade); DISALLOW_COPY_AND_ASSIGN(HardwareAndroid); }; diff --git a/hardware_android_unittest.cc b/hardware_android_unittest.cc index 9a491f3c..679356c9 100644 --- a/hardware_android_unittest.cc +++ b/hardware_android_unittest.cc @@ -14,54 +14,70 @@ // limitations under the License. // +#include #include #include "update_engine/common/error_code.h" -#include "update_engine/common/test_utils.h" #include "update_engine/hardware_android.h" +using ::testing::NiceMock; +using ::testing::Return; + namespace chromeos_update_engine { TEST(HardwareAndroidTest, IsKernelUpdateValid) { EXPECT_EQ(ErrorCode::kSuccess, - HardwareAndroid::IsKernelUpdateValid("5.4.42-not-gki", "")) + HardwareAndroid::IsKernelUpdateValid( + "5.4.42-not-gki", "", true /*prevent_downgrade*/)) << "Legacy update should be fine"; - EXPECT_EQ(ErrorCode::kSuccess, - HardwareAndroid::IsKernelUpdateValid("5.4.42-not-gki", - "5.4.42-android12-0")) + EXPECT_EQ( + ErrorCode::kSuccess, + HardwareAndroid::IsKernelUpdateValid( + "5.4.42-not-gki", "5.4.42-android12-0", true /*prevent_downgrade*/)) << "Update to GKI should be fine"; - EXPECT_EQ( - ErrorCode::kDownloadManifestParseError, - HardwareAndroid::IsKernelUpdateValid("5.4.42-not-gki", "5.4.42-not-gki")) + EXPECT_EQ(ErrorCode::kDownloadManifestParseError, + HardwareAndroid::IsKernelUpdateValid( + "5.4.42-not-gki", "5.4.42-not-gki", true /*prevent_downgrade*/)) << "Should report parse error for invalid version field"; EXPECT_EQ(ErrorCode::kSuccess, - HardwareAndroid::IsKernelUpdateValid( - "5.4.42-android12-0-something", "5.4.42-android12-0-something")) + HardwareAndroid::IsKernelUpdateValid("5.4.42-android12-0-something", + "5.4.42-android12-0-something", + true /*prevent_downgrade*/)) << "Self update should be fine"; EXPECT_EQ(ErrorCode::kSuccess, - HardwareAndroid::IsKernelUpdateValid( - "5.4.42-android12-0-something", "5.4.43-android12-0-something")) + HardwareAndroid::IsKernelUpdateValid("5.4.42-android12-0-something", + "5.4.43-android12-0-something", + true /*prevent_downgrade*/)) << "Sub-level update should be fine"; EXPECT_EQ( ErrorCode::kSuccess, HardwareAndroid::IsKernelUpdateValid("5.4.42-android12-0-something", - "5.10.10-android12-0-something")) + "5.10.10-android12-0-something", + true /*prevent_downgrade*/)) << "KMI version update should be fine"; EXPECT_EQ(ErrorCode::kPayloadTimestampError, HardwareAndroid::IsKernelUpdateValid("5.4.42-android12-0-something", - "5.4.5-android12-0-something")) + "5.4.5-android12-0-something", + true /*prevent_downgrade*/)) << "Should detect sub-level downgrade"; EXPECT_EQ(ErrorCode::kPayloadTimestampError, HardwareAndroid::IsKernelUpdateValid("5.4.42-android12-0-something", - "5.1.5-android12-0-something")) + "5.1.5-android12-0-something", + true /*prevent_downgrade*/)) << "Should detect KMI version downgrade"; + + EXPECT_EQ(ErrorCode::kSuccess, + HardwareAndroid::IsKernelUpdateValid("5.4.42-android12-0-something", + "5.4.5-android12-0-something", + false /*prevent_downgrade*/)) + << "Should suppress sub-level downgrade"; } } // namespace chromeos_update_engine From a81598b5c2d7b74ba46c2daac1558cdcb959cc1b Mon Sep 17 00:00:00 2001 From: Vyshu Date: Thu, 17 Sep 2020 21:37:21 +0000 Subject: [PATCH 395/624] payload_generator: Remove unused attributes Some attributes are not used by the client. Removed all instances of of "ignored" and option ttributes from delta generation. Removed all instances of ImageInfo and moved target and source version to paygen_payload. BUG=b:163048638 TEST=FEATURES=test emerge-hatch update_engine TEST=FEATURES=test emerge update_payload TEST=./run_pytest lib/paygen/paygen_payload_lib_unittest.py Change-Id: I9102d37fcf054f2cbeb79e54113925a684de1cfb Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2388163 Reviewed-by: Amin Hassani Tested-by: Vyshu Khota Commit-Queue: Vyshu Khota --- .../delta_performer_integration_test.cc | 38 -------- payload_generator/generate_delta_main.cc | 94 ------------------- payload_generator/payload_file.cc | 7 -- .../payload_generation_config.cc | 11 --- payload_generator/payload_generation_config.h | 7 -- payload_generator/payload_properties.cc | 20 +--- payload_generator/payload_properties.h | 3 - .../payload_properties_unittest.cc | 18 ++-- scripts/paycheck.py | 6 -- scripts/update_payload/payload.py | 25 ----- update_metadata.proto | 16 ++-- 11 files changed, 16 insertions(+), 229 deletions(-) diff --git a/payload_consumer/delta_performer_integration_test.cc b/payload_consumer/delta_performer_integration_test.cc index af6682a4..1f7a119f 100644 --- a/payload_consumer/delta_performer_integration_test.cc +++ b/payload_consumer/delta_performer_integration_test.cc @@ -340,26 +340,6 @@ static void GenerateDeltaFile(bool full_kernel, state->image_size = utils::FileSize(state->a_img); - // Create ImageInfo A & B - ImageInfo old_image_info; - ImageInfo new_image_info; - - if (!full_rootfs) { - old_image_info.set_channel("src-channel"); - old_image_info.set_board("src-board"); - old_image_info.set_version("src-version"); - old_image_info.set_key("src-key"); - old_image_info.set_build_channel("src-build-channel"); - old_image_info.set_build_version("src-build-version"); - } - - new_image_info.set_channel("test-channel"); - new_image_info.set_board("test-board"); - new_image_info.set_version("test-version"); - new_image_info.set_key("test-key"); - new_image_info.set_build_channel("test-build-channel"); - new_image_info.set_build_version("test-build-version"); - // Make some changes to the A image. { string a_mnt; @@ -513,7 +493,6 @@ static void GenerateDeltaFile(bool full_kernel, payload_config.source.partitions.front().path = state->a_img; if (!full_kernel) payload_config.source.partitions.back().path = state->old_kernel; - payload_config.source.image_info = old_image_info; EXPECT_TRUE(payload_config.source.LoadImageSize()); for (PartitionConfig& part : payload_config.source.partitions) EXPECT_TRUE(part.OpenFilesystem()); @@ -526,7 +505,6 @@ static void GenerateDeltaFile(bool full_kernel, payload_config.target.partitions.back().path = state->b_img; payload_config.target.partitions.emplace_back(kPartitionNameKernel); payload_config.target.partitions.back().path = state->new_kernel; - payload_config.target.image_info = new_image_info; EXPECT_TRUE(payload_config.target.LoadImageSize()); for (PartitionConfig& part : payload_config.target.partitions) EXPECT_TRUE(part.OpenFilesystem()); @@ -664,22 +642,6 @@ static void ApplyDeltaFile(bool full_kernel, EXPECT_FALSE(rootfs_part.old_partition_info().hash().empty()); } EXPECT_FALSE(rootfs_part.new_partition_info().hash().empty()); - - EXPECT_EQ(manifest.new_image_info().channel(), "test-channel"); - EXPECT_EQ(manifest.new_image_info().board(), "test-board"); - EXPECT_EQ(manifest.new_image_info().version(), "test-version"); - EXPECT_EQ(manifest.new_image_info().key(), "test-key"); - EXPECT_EQ(manifest.new_image_info().build_channel(), "test-build-channel"); - EXPECT_EQ(manifest.new_image_info().build_version(), "test-build-version"); - - if (!full_rootfs) { - EXPECT_EQ(manifest.old_image_info().channel(), "src-channel"); - EXPECT_EQ(manifest.old_image_info().board(), "src-board"); - EXPECT_EQ(manifest.old_image_info().version(), "src-version"); - EXPECT_EQ(manifest.old_image_info().key(), "src-key"); - EXPECT_EQ(manifest.old_image_info().build_channel(), "src-build-channel"); - EXPECT_EQ(manifest.old_image_info().build_version(), "src-build-version"); - } } MockPrefs prefs; diff --git a/payload_generator/generate_delta_main.cc b/payload_generator/generate_delta_main.cc index fe0a10be..8809e430 100644 --- a/payload_generator/generate_delta_main.cc +++ b/payload_generator/generate_delta_main.cc @@ -74,37 +74,6 @@ void ParseSignatureSizes(const string& signature_sizes_flag, } } -bool ParseImageInfo(const string& channel, - const string& board, - const string& version, - const string& key, - const string& build_channel, - const string& build_version, - ImageInfo* image_info) { - // All of these arguments should be present or missing. - bool empty = channel.empty(); - - CHECK_EQ(channel.empty(), empty); - CHECK_EQ(board.empty(), empty); - CHECK_EQ(version.empty(), empty); - CHECK_EQ(key.empty(), empty); - - if (empty) - return false; - - image_info->set_channel(channel); - image_info->set_board(board); - image_info->set_version(version); - image_info->set_key(key); - - image_info->set_build_channel(build_channel.empty() ? channel - : build_channel); - - image_info->set_build_version(build_version.empty() ? version - : build_version); - - return true; -} void CalculateHashForSigning(const vector& sizes, const string& out_hash_file, @@ -386,51 +355,6 @@ int Main(int argc, char** argv) { "The maximum timestamp of the OS allowed to apply this " "payload."); - DEFINE_string(old_channel, - "", - "The channel for the old image. 'dev-channel', 'npo-channel', " - "etc. Ignored, except during delta generation."); - DEFINE_string(old_board, - "", - "The board for the old image. 'x86-mario', 'lumpy', " - "etc. Ignored, except during delta generation."); - DEFINE_string( - old_version, "", "The build version of the old image. 1.2.3, etc."); - DEFINE_string(old_key, - "", - "The key used to sign the old image. 'premp', 'mp', 'mp-v3'," - " etc"); - DEFINE_string(old_build_channel, - "", - "The channel for the build of the old image. 'dev-channel', " - "etc, but will never contain special channels such as " - "'npo-channel'. Ignored, except during delta generation."); - DEFINE_string(old_build_version, - "", - "The version of the build containing the old image."); - - DEFINE_string(new_channel, - "", - "The channel for the new image. 'dev-channel', 'npo-channel', " - "etc. Ignored, except during delta generation."); - DEFINE_string(new_board, - "", - "The board for the new image. 'x86-mario', 'lumpy', " - "etc. Ignored, except during delta generation."); - DEFINE_string( - new_version, "", "The build version of the new image. 1.2.3, etc."); - DEFINE_string(new_key, - "", - "The key used to sign the new image. 'premp', 'mp', 'mp-v3'," - " etc"); - DEFINE_string(new_build_channel, - "", - "The channel for the build of the new image. 'dev-channel', " - "etc, but will never contain special channels such as " - "'npo-channel'. Ignored, except during delta generation."); - DEFINE_string(new_build_version, - "", - "The version of the build containing the new image."); DEFINE_string(new_postinstall_config_file, "", "A config file specifying postinstall related metadata. " @@ -600,24 +524,6 @@ int Main(int argc, char** argv) { CHECK(!FLAGS_out_file.empty()); - // Ignore failures. These are optional arguments. - ParseImageInfo(FLAGS_new_channel, - FLAGS_new_board, - FLAGS_new_version, - FLAGS_new_key, - FLAGS_new_build_channel, - FLAGS_new_build_version, - &payload_config.target.image_info); - - // Ignore failures. These are optional arguments. - ParseImageInfo(FLAGS_old_channel, - FLAGS_old_board, - FLAGS_old_version, - FLAGS_old_key, - FLAGS_old_build_channel, - FLAGS_old_build_version, - &payload_config.source.image_info); - payload_config.rootfs_partition_size = FLAGS_rootfs_partition_size; if (payload_config.is_delta) { diff --git a/payload_generator/payload_file.cc b/payload_generator/payload_file.cc index b55d03c0..aa9b4d36 100644 --- a/payload_generator/payload_file.cc +++ b/payload_generator/payload_file.cc @@ -64,13 +64,6 @@ bool PayloadFile::Init(const PayloadGenerationConfig& config) { TEST_AND_RETURN_FALSE(config.version.Validate()); major_version_ = config.version.major; manifest_.set_minor_version(config.version.minor); - - if (!config.source.ImageInfoIsEmpty()) - *(manifest_.mutable_old_image_info()) = config.source.image_info; - - if (!config.target.ImageInfoIsEmpty()) - *(manifest_.mutable_new_image_info()) = config.target.image_info; - manifest_.set_block_size(config.block_size); manifest_.set_max_timestamp(config.max_timestamp); diff --git a/payload_generator/payload_generation_config.cc b/payload_generator/payload_generation_config.cc index 71587963..90d9f1b1 100644 --- a/payload_generator/payload_generation_config.cc +++ b/payload_generator/payload_generation_config.cc @@ -103,7 +103,6 @@ bool PartitionConfig::OpenFilesystem() { } bool ImageConfig::ValidateIsEmpty() const { - TEST_AND_RETURN_FALSE(ImageInfoIsEmpty()); return partitions.empty(); } @@ -206,13 +205,6 @@ bool ImageConfig::ValidateDynamicPartitionMetadata() const { return true; } -bool ImageConfig::ImageInfoIsEmpty() const { - return image_info.board().empty() && image_info.key().empty() && - image_info.channel().empty() && image_info.version().empty() && - image_info.build_channel().empty() && - image_info.build_version().empty(); -} - PayloadVersion::PayloadVersion(uint64_t major_version, uint32_t minor_version) { major = major_version; minor = minor_version; @@ -278,9 +270,6 @@ bool PayloadGenerationConfig::Validate() const { TEST_AND_RETURN_FALSE(part.verity.IsEmpty()); } - // If new_image_info is present, old_image_info must be present. - TEST_AND_RETURN_FALSE(source.ImageInfoIsEmpty() == - target.ImageInfoIsEmpty()); } else { // All the "source" image fields must be empty for full payloads. TEST_AND_RETURN_FALSE(source.ValidateIsEmpty()); diff --git a/payload_generator/payload_generation_config.h b/payload_generator/payload_generation_config.h index 32f12292..b7b2eb0f 100644 --- a/payload_generator/payload_generation_config.h +++ b/payload_generator/payload_generation_config.h @@ -143,13 +143,6 @@ struct ImageConfig { // Validate |dynamic_partition_metadata| against |partitions|. bool ValidateDynamicPartitionMetadata() const; - // Returns whether the |image_info| field is empty. - bool ImageInfoIsEmpty() const; - - // The ImageInfo message defined in the update_metadata.proto file describes - // the metadata of the image. - ImageInfo image_info; - // The updated partitions. std::vector partitions; diff --git a/payload_generator/payload_properties.cc b/payload_generator/payload_properties.cc index bc82eb7a..bcf4fbda 100644 --- a/payload_generator/payload_properties.cc +++ b/payload_generator/payload_properties.cc @@ -47,8 +47,6 @@ const char kPayloadPropertyJsonMetadataSignature[] = "metadata_signature"; // These are needed by the Nebraska and devserver. const char kPayloadPropertyJsonPayloadSize[] = "size"; const char kPayloadPropertyJsonIsDelta[] = "is_delta"; -const char kPayloadPropertyJsonTargetVersion[] = "target_version"; -const char kPayloadPropertyJsonSourceVersion[] = "source_version"; } // namespace PayloadProperties::PayloadProperties(const string& payload_path) @@ -65,10 +63,6 @@ bool PayloadProperties::GetPropertiesAsJson(string* json_str) { properties.SetInteger(kPayloadPropertyJsonPayloadSize, payload_size_); properties.SetString(kPayloadPropertyJsonPayloadHash, payload_hash_); properties.SetBoolean(kPayloadPropertyJsonIsDelta, is_delta_); - properties.SetString(kPayloadPropertyJsonTargetVersion, target_version_); - if (is_delta_) { - properties.SetString(kPayloadPropertyJsonSourceVersion, source_version_); - } return base::JSONWriter::Write(properties, json_str); } @@ -119,23 +113,11 @@ bool PayloadProperties::LoadFromPayload() { metadata_signatures_ = base::JoinString(base64_signatures, ":"); } - is_delta_ = manifest.has_old_image_info() || - std::any_of(manifest.partitions().begin(), + is_delta_ = std::any_of(manifest.partitions().begin(), manifest.partitions().end(), [](const PartitionUpdate& part) { return part.has_old_partition_info(); }); - - if (manifest.has_new_image_info()) { - target_version_ = manifest.new_image_info().version(); - } else { - target_version_ = "99999.0.0"; - } - - // No need to set the source version if it was not a delta payload. - if (is_delta_ && manifest.has_old_image_info()) { - source_version_ = manifest.old_image_info().version(); - } return true; } diff --git a/payload_generator/payload_properties.h b/payload_generator/payload_properties.h index 3b34511c..846b181d 100644 --- a/payload_generator/payload_properties.h +++ b/payload_generator/payload_properties.h @@ -62,9 +62,6 @@ class PayloadProperties { // Whether the payload is a delta (true) or full (false). bool is_delta_; - std::string target_version_; - std::string source_version_; - DISALLOW_COPY_AND_ASSIGN(PayloadProperties); }; diff --git a/payload_generator/payload_properties_unittest.cc b/payload_generator/payload_properties_unittest.cc index db3902ce..a923379e 100644 --- a/payload_generator/payload_properties_unittest.cc +++ b/payload_generator/payload_properties_unittest.cc @@ -57,8 +57,6 @@ class PayloadPropertiesTest : public ::testing::Test { PayloadGenerationConfig config; config.version.major = kBrilloMajorPayloadVersion; config.version.minor = kSourceMinorPayloadVersion; - config.source.image_info.set_version("123.0.0"); - config.target.image_info.set_version("456.7.8"); PayloadFile payload; EXPECT_TRUE(payload.Init(config)); @@ -114,11 +112,9 @@ TEST_F(PayloadPropertiesTest, GetPropertiesAsJsonTestHash) { "{" R"("is_delta":true,)" R"("metadata_signature":"",)" - R"("metadata_size":187,)" - R"("sha256_hex":"Rtrj9v3xXhrAi1741HAojtGxAQEOZ7mDyhzskIF4PJc=",)" - R"("size":233,)" - R"("source_version":"123.0.0",)" - R"("target_version":"456.7.8",)" + R"("metadata_size":165,)" + R"("sha256_hex":"cV7kfZBH3K0B6QJHxxykDh6b6x0WgVOmc63whPLOy7U=",)" + R"("size":211,)" R"("version":2)" "}"; string json; @@ -130,10 +126,10 @@ TEST_F(PayloadPropertiesTest, GetPropertiesAsJsonTestHash) { // Validate the hash of file and metadata are within the output. TEST_F(PayloadPropertiesTest, GetPropertiesAsKeyValueTestHash) { constexpr char kKeyValueProperties[] = - "FILE_HASH=Rtrj9v3xXhrAi1741HAojtGxAQEOZ7mDyhzskIF4PJc=\n" - "FILE_SIZE=233\n" - "METADATA_HASH=kiXTexy/s2aPttf4+r8KRZWYZ6FYvwhU6rJGcnnI+U0=\n" - "METADATA_SIZE=187\n"; + "FILE_HASH=cV7kfZBH3K0B6QJHxxykDh6b6x0WgVOmc63whPLOy7U=\n" + "FILE_SIZE=211\n" + "METADATA_HASH=aEKYyzJt2E8Gz8fzB+gmekN5mriotZCSq6R+kDfdeV4=\n" + "METADATA_SIZE=165\n"; string key_value; EXPECT_TRUE(PayloadProperties{payload_file.path()}.GetPropertiesAsKeyValue( &key_value)); diff --git a/scripts/paycheck.py b/scripts/paycheck.py index f4ccca2c..8eb0033d 100755 --- a/scripts/paycheck.py +++ b/scripts/paycheck.py @@ -92,9 +92,6 @@ def ParseArguments(argv): check_args.add_argument('-c', '--check', action='store_true', default=False, help=('force payload integrity check (e.g. before ' 'applying)')) - check_args.add_argument('-D', '--describe', action='store_true', - default=False, - help='Print a friendly description of the payload.') check_args.add_argument('-r', '--report', metavar='FILE', help="dump payload report (`-' for stdout)") check_args.add_argument('-t', '--type', dest='assert_type', @@ -209,9 +206,6 @@ def main(argv): # Initialize payload. payload.Init() - if args.describe: - payload.Describe() - # Perform payload integrity checks. if args.check: report_file = None diff --git a/scripts/update_payload/payload.py b/scripts/update_payload/payload.py index ea5ed308..61423a9f 100644 --- a/scripts/update_payload/payload.py +++ b/scripts/update_payload/payload.py @@ -226,31 +226,6 @@ def Init(self): self.is_init = True - def Describe(self): - """Emits the payload embedded description data to standard output.""" - def _DescribeImageInfo(description, image_info): - """Display info about the image.""" - def _DisplayIndentedValue(name, value): - print(' {:<14} {}'.format(name+':', value)) - - print('%s:' % description) - _DisplayIndentedValue('Channel', image_info.channel) - _DisplayIndentedValue('Board', image_info.board) - _DisplayIndentedValue('Version', image_info.version) - _DisplayIndentedValue('Key', image_info.key) - - if image_info.build_channel != image_info.channel: - _DisplayIndentedValue('Build channel', image_info.build_channel) - - if image_info.build_version != image_info.version: - _DisplayIndentedValue('Build version', image_info.build_version) - - if self.manifest.HasField('old_image_info'): - _DescribeImageInfo('Old Image', self.manifest.old_image_info) - - if self.manifest.HasField('new_image_info'): - _DescribeImageInfo('New Image', self.manifest.new_image_info) - def _AssertInit(self): """Raises an exception if the object was not initialized.""" if not self.is_init: diff --git a/update_metadata.proto b/update_metadata.proto index 3d136cad..22108179 100644 --- a/update_metadata.proto +++ b/update_metadata.proto @@ -139,16 +139,16 @@ message PartitionInfo { // // All fields will be set, if this message is present. message ImageInfo { - optional string board = 1; - optional string key = 2; - optional string channel = 3; - optional string version = 4; + optional string board = 1 [deprecated = true]; + optional string key = 2 [deprecated = true]; + optional string channel = 3 [deprecated = true]; + optional string version = 4 [deprecated = true]; // If these values aren't present, they should be assumed to match // the equivalent value above. They are normally only different for // special image types such as nplusone images. - optional string build_channel = 5; - optional string build_version = 6; + optional string build_channel = 5 [deprecated = true]; + optional string build_version = 6 [deprecated = true]; } message InstallOperation { @@ -325,9 +325,9 @@ message DeltaArchiveManifest { optional PartitionInfo new_rootfs_info = 9 [deprecated = true]; // old_image_info will only be present for delta images. - optional ImageInfo old_image_info = 10; + optional ImageInfo old_image_info = 10 [deprecated = true]; - optional ImageInfo new_image_info = 11; + optional ImageInfo new_image_info = 11 [deprecated = true]; // The minor version, also referred as "delta version", of the payload. optional uint32 minor_version = 12 [default = 0]; From da1b3145df344b2bf597bfbb5636e1bb74642389 Mon Sep 17 00:00:00 2001 From: Kelvin Zhang Date: Thu, 24 Sep 2020 17:09:02 -0400 Subject: [PATCH 396/624] Add FeatureFlag for Virtual AB Compression Test: treehugger Bug: 168554689 Change-Id: I732cd1ef55c5c4362ef88a640c2ca228f472af89 --- common/dynamic_partition_control_interface.h | 2 ++ common/dynamic_partition_control_stub.cc | 4 ++++ common/dynamic_partition_control_stub.h | 1 + dynamic_partition_control_android.cc | 21 ++++++++++++++++++-- dynamic_partition_control_android.h | 2 ++ 5 files changed, 28 insertions(+), 2 deletions(-) diff --git a/common/dynamic_partition_control_interface.h b/common/dynamic_partition_control_interface.h index 7c2d0b0c..22f6db87 100644 --- a/common/dynamic_partition_control_interface.h +++ b/common/dynamic_partition_control_interface.h @@ -56,6 +56,8 @@ class DynamicPartitionControlInterface { // Return the feature flags of Virtual A/B on this device. virtual FeatureFlag GetVirtualAbFeatureFlag() = 0; + // Return the feature flags of Virtual A/B Compression on this device. + virtual FeatureFlag GetVirtualAbCompressionFeatureFlag() = 0; // Attempt to optimize |operation|. // If successful, |optimized| contains an operation with extents that diff --git a/common/dynamic_partition_control_stub.cc b/common/dynamic_partition_control_stub.cc index 5a8ca434..c63a8ff6 100644 --- a/common/dynamic_partition_control_stub.cc +++ b/common/dynamic_partition_control_stub.cc @@ -33,6 +33,10 @@ FeatureFlag DynamicPartitionControlStub::GetVirtualAbFeatureFlag() { return FeatureFlag(FeatureFlag::Value::NONE); } +FeatureFlag DynamicPartitionControlStub::GetVirtualAbCompressionFeatureFlag() { + return FeatureFlag(FeatureFlag::Value::NONE); +} + bool DynamicPartitionControlStub::OptimizeOperation( const std::string& partition_name, const InstallOperation& operation, diff --git a/common/dynamic_partition_control_stub.h b/common/dynamic_partition_control_stub.h index 94dba1bc..8bff4743 100644 --- a/common/dynamic_partition_control_stub.h +++ b/common/dynamic_partition_control_stub.h @@ -31,6 +31,7 @@ class DynamicPartitionControlStub : public DynamicPartitionControlInterface { public: FeatureFlag GetDynamicPartitionsFeatureFlag() override; FeatureFlag GetVirtualAbFeatureFlag() override; + FeatureFlag GetVirtualAbCompressionFeatureFlag() override; bool OptimizeOperation(const std::string& partition_name, const InstallOperation& operation, InstallOperation* optimized) override; diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc index ccb99ba4..c9888abd 100644 --- a/dynamic_partition_control_android.cc +++ b/dynamic_partition_control_android.cc @@ -71,6 +71,14 @@ constexpr char kRetrfoitDynamicPartitions[] = "ro.boot.dynamic_partitions_retrofit"; constexpr char kVirtualAbEnabled[] = "ro.virtual_ab.enabled"; constexpr char kVirtualAbRetrofit[] = "ro.virtual_ab.retrofit"; +constexpr char kVirtualAbCompressionEnabled[] = + "ro.virtual_ab.compression.enabled"; + +// Currently, android doesn't have a retrofit prop for VAB Compression. However, +// struct FeatureFlag forces us to determine if a feature is 'retrofit'. So this +// is here just to simplify code. Replace it with real retrofit prop name once +// there is one. +constexpr char kVirtualAbCompressionRetrofit[] = ""; constexpr char kPostinstallFstabPrefix[] = "ro.postinstall.fstab.prefix"; // Map timeout for dynamic partitions. constexpr std::chrono::milliseconds kMapTimeout{1000}; @@ -90,7 +98,9 @@ DynamicPartitionControlAndroid::~DynamicPartitionControlAndroid() { static FeatureFlag GetFeatureFlag(const char* enable_prop, const char* retrofit_prop) { - bool retrofit = GetBoolProperty(retrofit_prop, false); + // Default retrofit to false if retrofit_prop is empty. + bool retrofit = retrofit_prop && retrofit_prop[0] != '\0' && + GetBoolProperty(retrofit_prop, false); bool enabled = GetBoolProperty(enable_prop, false); if (retrofit && !enabled) { LOG(ERROR) << retrofit_prop << " is true but " << enable_prop @@ -109,7 +119,9 @@ static FeatureFlag GetFeatureFlag(const char* enable_prop, DynamicPartitionControlAndroid::DynamicPartitionControlAndroid() : dynamic_partitions_( GetFeatureFlag(kUseDynamicPartitions, kRetrfoitDynamicPartitions)), - virtual_ab_(GetFeatureFlag(kVirtualAbEnabled, kVirtualAbRetrofit)) { + virtual_ab_(GetFeatureFlag(kVirtualAbEnabled, kVirtualAbRetrofit)), + virtual_ab_compression_(GetFeatureFlag(kVirtualAbCompressionEnabled, + kVirtualAbCompressionRetrofit)) { if (GetVirtualAbFeatureFlag().IsEnabled()) { snapshot_ = SnapshotManager::New(); } else { @@ -126,6 +138,11 @@ FeatureFlag DynamicPartitionControlAndroid::GetVirtualAbFeatureFlag() { return virtual_ab_; } +FeatureFlag +DynamicPartitionControlAndroid::GetVirtualAbCompressionFeatureFlag() { + return virtual_ab_compression_; +} + bool DynamicPartitionControlAndroid::OptimizeOperation( const std::string& partition_name, const InstallOperation& operation, diff --git a/dynamic_partition_control_android.h b/dynamic_partition_control_android.h index 49967f6c..f3805f0e 100644 --- a/dynamic_partition_control_android.h +++ b/dynamic_partition_control_android.h @@ -36,6 +36,7 @@ class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { ~DynamicPartitionControlAndroid(); FeatureFlag GetDynamicPartitionsFeatureFlag() override; FeatureFlag GetVirtualAbFeatureFlag() override; + FeatureFlag GetVirtualAbCompressionFeatureFlag() override; bool OptimizeOperation(const std::string& partition_name, const InstallOperation& operation, InstallOperation* optimized) override; @@ -277,6 +278,7 @@ class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { std::set mapped_devices_; const FeatureFlag dynamic_partitions_; const FeatureFlag virtual_ab_; + const FeatureFlag virtual_ab_compression_; std::unique_ptr snapshot_; std::unique_ptr metadata_device_; bool target_supports_snapshot_ = false; From 213e2be8e290c757bc25647b2963ddc9b8f087b1 Mon Sep 17 00:00:00 2001 From: Miriam Polzer Date: Fri, 29 May 2020 10:25:09 +0200 Subject: [PATCH 397/624] update_engine: Kiosk version test and comment The required Chrome OS version (target version prefix) variable for kiosks is set to an empty string instead of a null pointer after 5 failed retrieval attempts. Omaha treats an empty target version prefix the same as no target version prefix. This resulted in kiosks updating to versions greater than their set version prefix would allow. Document that an empty string as result means the version could not be read and add a test for this behavior. The behavior for empty kiosk version is changed in crrev.com/c/2416628. BUG=chromium:1084453 TEST=FEATURES=test emerge-amd64-generic update_engine Change-Id: Ia121882a9d1099e63ae87705b082cb25eaacc14a Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2218314 Tested-by: Miriam Polzer Commit-Queue: Miriam Polzer Reviewed-by: Amin Hassani --- update_manager/real_system_provider.cc | 5 +++-- update_manager/real_system_provider_unittest.cc | 16 ++++++++++++++++ 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/update_manager/real_system_provider.cc b/update_manager/real_system_provider.cc index a900071a..39eba221 100644 --- a/update_manager/real_system_provider.cc +++ b/update_manager/real_system_provider.cc @@ -64,9 +64,10 @@ class RetryPollVariable : public Variable { std::unique_ptr result(new T()); if (!func_.Run(result.get())) { if (failed_attempts_ >= kRetryPollVariableMaxRetry) { - // Give up on the retries, set back the desired polling interval and - // return the default. + // Give up on the retries and set back the desired polling interval. this->SetPollInterval(base_interval_); + // Release the result instead of returning a |nullptr| to indicate that + // the result could not be fetched. return result.release(); } this->SetPollInterval( diff --git a/update_manager/real_system_provider_unittest.cc b/update_manager/real_system_provider_unittest.cc index f654f7ac..3996b654 100644 --- a/update_manager/real_system_provider_unittest.cc +++ b/update_manager/real_system_provider_unittest.cc @@ -119,6 +119,22 @@ TEST_F(UmRealSystemProviderTest, std::string(kRequiredPlatformVersion), provider_->var_kiosk_required_platform_version()); } + +TEST_F(UmRealSystemProviderTest, KioskRequiredPlatformVersionRepeatedFailure) { + // Simulate unreadable platform version. The variable should return a + // null pointer |kRetryPollVariableMaxRetry| times and then return an empty + // string to indicate that it gave up. + constexpr int kNumMethodCalls = 5; + EXPECT_CALL(*kiosk_app_proxy_mock_, GetRequiredPlatformVersion) + .Times(kNumMethodCalls + 1) + .WillRepeatedly(Return(false)); + for (int i = 0; i < kNumMethodCalls; ++i) { + UmTestUtils::ExpectVariableNotSet( + provider_->var_kiosk_required_platform_version()); + } + UmTestUtils::ExpectVariableHasValue( + std::string(""), provider_->var_kiosk_required_platform_version()); +} #else TEST_F(UmRealSystemProviderTest, KioskRequiredPlatformVersion) { UmTestUtils::ExpectVariableHasValue( From c0840c469b96456405d1ac2b44f5113e28b58f1d Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Sun, 27 Sep 2020 18:17:33 -0700 Subject: [PATCH 398/624] update_payload: Remove signature version checking aosp/1130669 deprecated the 'version' field from the Signatures message. But the update_payload code wasn't updated, so this CL removes that check. BUG=b:169526824 TEST=cros_generate_update_payload --debug --tgt-image dlc.img --src-image dlc.img --output delta.bin --private-key ~/.ssh/testing_rsa --check Change-Id: I20aca13eeb33c5c1d2aa72513649198868ab00be Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2434552 Tested-by: Amin Hassani Auto-Submit: Amin Hassani Reviewed-by: Jae Hoon Kim Commit-Queue: Jae Hoon Kim --- scripts/update_payload/checker.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/scripts/update_payload/checker.py b/scripts/update_payload/checker.py index 4c655160..58ad85cb 100644 --- a/scripts/update_payload/checker.py +++ b/scripts/update_payload/checker.py @@ -1148,17 +1148,13 @@ def _CheckSignatures(self, report, pubkey_file_name): sig_report = report.AddSubReport(sig_name) # Check: Signature contains mandatory fields. - self._CheckMandatoryField(sig, 'version', sig_report, sig_name) self._CheckMandatoryField(sig, 'data', None, sig_name) sig_report.AddField('data len', len(sig.data)) # Check: Signatures pertains to actual payload hash. - if sig.version == 1: + if sig.data: self._CheckSha256Signature(sig.data, pubkey_file_name, payload_hasher.digest(), sig_name) - else: - raise error.PayloadError('Unknown signature version (%d).' % - sig.version) def Run(self, pubkey_file_name=None, metadata_sig_file=None, metadata_size=0, part_sizes=None, report_out_file=None): From 70ba92e41473fc30f9276c6eef897a7ad0f9d758 Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Tue, 22 Sep 2020 23:56:00 +0000 Subject: [PATCH 399/624] Read boot image version from ro.bootimage.build.date.utc This change re-defines boot image downgrade check logic. The sysprop ro.bootimage.build.date.utc is checked instead. If the sysprop ro.build.ab_update.gki.prevent_downgrade_version is not set, any detected downgrades are suppressed. Bug: 162554855 Bug: 162623577 Bug: 169169031 Test: pass Change-Id: Ib86cb40576a852e0654a4c22d81c876d0315d0d2 --- Android.bp | 3 -- hardware_android.cc | 96 +++++++++++++----------------------- hardware_android.h | 13 ----- hardware_android_unittest.cc | 83 ------------------------------- 4 files changed, 35 insertions(+), 160 deletions(-) delete mode 100644 hardware_android_unittest.cc diff --git a/Android.bp b/Android.bp index 193928bc..28e1bab5 100644 --- a/Android.bp +++ b/Android.bp @@ -261,7 +261,6 @@ cc_defaults { static_libs: [ "gkiprops", - "libkver", "libpayload_consumer", "libupdate_engine_boot_control", ], @@ -392,7 +391,6 @@ cc_binary { "libbrillo-stream", "libbrillo", "libchrome", - "libkver", ], target: { recovery: { @@ -686,7 +684,6 @@ cc_test { "common/utils_unittest.cc", "dynamic_partition_control_android_unittest.cc", "libcurl_http_fetcher_unittest.cc", - "hardware_android_unittest.cc", "payload_consumer/bzip_extent_writer_unittest.cc", "payload_consumer/cached_file_descriptor_unittest.cc", "payload_consumer/certificate_parser_android_unittest.cc", diff --git a/hardware_android.cc b/hardware_android.cc index fc6e1dc3..a659bf67 100644 --- a/hardware_android.cc +++ b/hardware_android.cc @@ -17,7 +17,6 @@ #include "update_engine/hardware_android.h" #include -#include #include #include @@ -28,8 +27,6 @@ #include #include #include -#include -#include #include "update_engine/common/error_code_utils.h" #include "update_engine/common/hardware.h" @@ -39,8 +36,6 @@ using android::base::GetBoolProperty; using android::base::GetIntProperty; using android::base::GetProperty; -using android::kver::IsKernelUpdateValid; -using android::kver::KernelRelease; using std::string; namespace chromeos_update_engine { @@ -62,6 +57,19 @@ string GetPartitionBuildDate(const string& partition_name) { ""); } +ErrorCode IsTimestampNewerLogged(const std::string& partition_name, + const std::string& old_version, + const std::string& new_version) { + auto error_code = utils::IsTimestampNewer(old_version, new_version); + if (error_code != ErrorCode::kSuccess) { + LOG(WARNING) << "Timestamp check failed with " + << utils::ErrorCodeToString(error_code) << ": " + << partition_name << " Partition timestamp: " << old_version + << " Update timestamp: " << new_version; + } + return error_code; +} + } // namespace namespace hardware { @@ -242,14 +250,8 @@ void HardwareAndroid::SetWarmReset(bool warm_reset) { string HardwareAndroid::GetVersionForLogging( const string& partition_name) const { if (partition_name == "boot") { - struct utsname buf; - if (uname(&buf) != 0) { - PLOG(ERROR) << "Unable to call uname()"; - return ""; - } - auto kernel_release = - KernelRelease::Parse(buf.release, true /* allow_suffix */); - return kernel_release.has_value() ? kernel_release->string() : ""; + // ro.bootimage.build.date.utc + return GetPartitionBuildDate("bootimage"); } return GetPartitionBuildDate(partition_name); } @@ -257,61 +259,33 @@ string HardwareAndroid::GetVersionForLogging( ErrorCode HardwareAndroid::IsPartitionUpdateValid( const string& partition_name, const string& new_version) const { if (partition_name == "boot") { - struct utsname buf; - if (uname(&buf) != 0) { - PLOG(ERROR) << "Unable to call uname()"; - return ErrorCode::kError; + const auto old_version = GetPartitionBuildDate("bootimage"); + auto error_code = + IsTimestampNewerLogged(partition_name, old_version, new_version); + if (error_code == ErrorCode::kPayloadTimestampError) { + bool prevent_downgrade = + android::sysprop::GkiProperties::prevent_downgrade_version().value_or( + false); + if (!prevent_downgrade) { + LOG(WARNING) << "Downgrade of boot image is detected, but permitting " + "update because device does not prevent boot image " + "downgrade"; + // If prevent_downgrade_version sysprop is not explicitly set, permit + // downgrade in boot image version. + // Even though error_code is overridden here, always call + // IsTimestampNewerLogged to produce log messages. + error_code = ErrorCode::kSuccess; + } } - bool prevent_downgrade = - android::sysprop::GkiProperties::prevent_downgrade_version().value_or( - false); - return IsKernelUpdateValid(buf.release, new_version, prevent_downgrade); + return error_code; } const auto old_version = GetPartitionBuildDate(partition_name); // TODO(zhangkelvin) for some partitions, missing a current timestamp should // be an error, e.g. system, vendor, product etc. - auto error_code = utils::IsTimestampNewer(old_version, new_version); - if (error_code != ErrorCode::kSuccess) { - LOG(ERROR) << "Timestamp check failed with " - << utils::ErrorCodeToString(error_code) - << " Partition timestamp: " << old_version - << " Update timestamp: " << new_version; - } + auto error_code = + IsTimestampNewerLogged(partition_name, old_version, new_version); return error_code; } -ErrorCode HardwareAndroid::IsKernelUpdateValid(const string& old_release, - const string& new_release, - bool prevent_downgrade) { - // Check that the package either contain an empty version (indicating that the - // new build does not use GKI), or a valid GKI kernel release. - std::optional new_kernel_release; - if (new_release.empty()) { - LOG(INFO) << "New build does not contain GKI."; - } else { - new_kernel_release = - KernelRelease::Parse(new_release, true /* allow_suffix */); - if (!new_kernel_release.has_value()) { - LOG(ERROR) << "New kernel release is not valid GKI kernel release: " - << new_release; - return ErrorCode::kDownloadManifestParseError; - } - } - - auto old_kernel_release = - KernelRelease::Parse(old_release, true /* allow_suffix */); - bool is_update_valid = android::kver::IsKernelUpdateValid(old_kernel_release, - new_kernel_release); - - if (!is_update_valid) { - if (prevent_downgrade) { - return ErrorCode::kPayloadTimestampError; - } - LOG(WARNING) << "Boot version downgrade detected, allowing update because " - << "prevent_downgrade_version sysprop is not set."; - } - return ErrorCode::kSuccess; -} - } // namespace chromeos_update_engine diff --git a/hardware_android.h b/hardware_android.h index 552cb534..d8fbbbe0 100644 --- a/hardware_android.h +++ b/hardware_android.h @@ -22,7 +22,6 @@ #include #include -#include #include "update_engine/common/error_code.h" #include "update_engine/common/hardware.h" @@ -68,18 +67,6 @@ class HardwareAndroid : public HardwareInterface { const std::string& new_version) const override; private: - FRIEND_TEST(HardwareAndroidTest, IsKernelUpdateValid); - - // Helper for IsPartitionUpdateValid. Check an update from |old_release| - // to |new_release| is valid or not. - // - If |new_release| is invalid, return kDownloadManifestParseError - // - If downgrade detected, kPayloadTimestampError if |prevent_downgrade| is - // set to true, or kSuccess if |prevent_downgrade| is set to false - // - If update is valid, kSuccess. - static ErrorCode IsKernelUpdateValid(const std::string& old_release, - const std::string& new_release, - bool prevent_downgrade); - DISALLOW_COPY_AND_ASSIGN(HardwareAndroid); }; diff --git a/hardware_android_unittest.cc b/hardware_android_unittest.cc deleted file mode 100644 index 679356c9..00000000 --- a/hardware_android_unittest.cc +++ /dev/null @@ -1,83 +0,0 @@ -// -// Copyright (C) 2020 The Android Open Source Project -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -#include -#include - -#include "update_engine/common/error_code.h" -#include "update_engine/hardware_android.h" - -using ::testing::NiceMock; -using ::testing::Return; - -namespace chromeos_update_engine { - -TEST(HardwareAndroidTest, IsKernelUpdateValid) { - EXPECT_EQ(ErrorCode::kSuccess, - HardwareAndroid::IsKernelUpdateValid( - "5.4.42-not-gki", "", true /*prevent_downgrade*/)) - << "Legacy update should be fine"; - - EXPECT_EQ( - ErrorCode::kSuccess, - HardwareAndroid::IsKernelUpdateValid( - "5.4.42-not-gki", "5.4.42-android12-0", true /*prevent_downgrade*/)) - << "Update to GKI should be fine"; - - EXPECT_EQ(ErrorCode::kDownloadManifestParseError, - HardwareAndroid::IsKernelUpdateValid( - "5.4.42-not-gki", "5.4.42-not-gki", true /*prevent_downgrade*/)) - << "Should report parse error for invalid version field"; - - EXPECT_EQ(ErrorCode::kSuccess, - HardwareAndroid::IsKernelUpdateValid("5.4.42-android12-0-something", - "5.4.42-android12-0-something", - true /*prevent_downgrade*/)) - << "Self update should be fine"; - - EXPECT_EQ(ErrorCode::kSuccess, - HardwareAndroid::IsKernelUpdateValid("5.4.42-android12-0-something", - "5.4.43-android12-0-something", - true /*prevent_downgrade*/)) - << "Sub-level update should be fine"; - - EXPECT_EQ( - ErrorCode::kSuccess, - HardwareAndroid::IsKernelUpdateValid("5.4.42-android12-0-something", - "5.10.10-android12-0-something", - true /*prevent_downgrade*/)) - << "KMI version update should be fine"; - - EXPECT_EQ(ErrorCode::kPayloadTimestampError, - HardwareAndroid::IsKernelUpdateValid("5.4.42-android12-0-something", - "5.4.5-android12-0-something", - true /*prevent_downgrade*/)) - << "Should detect sub-level downgrade"; - - EXPECT_EQ(ErrorCode::kPayloadTimestampError, - HardwareAndroid::IsKernelUpdateValid("5.4.42-android12-0-something", - "5.1.5-android12-0-something", - true /*prevent_downgrade*/)) - << "Should detect KMI version downgrade"; - - EXPECT_EQ(ErrorCode::kSuccess, - HardwareAndroid::IsKernelUpdateValid("5.4.42-android12-0-something", - "5.4.5-android12-0-something", - false /*prevent_downgrade*/)) - << "Should suppress sub-level downgrade"; -} - -} // namespace chromeos_update_engine From d55ec44bfcb77ff204c3274b53394d451e0f7a03 Mon Sep 17 00:00:00 2001 From: Qijiang Fan Date: Mon, 28 Sep 2020 15:11:40 +0900 Subject: [PATCH 400/624] update_engine: use log_file_path for newer libchrome. log_file will take a FILE * instead for opened file. To pass a file path in const char *, log_file_path should be used in newer libchrome. BUG=chromium:1094927 TEST=emerge update_engine Change-Id: Ic003f3111b3cfeb2217741c46bf86cdcd543c2a5 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2434423 Reviewed-by: Amin Hassani Commit-Queue: Qijiang Fan Tested-by: Qijiang Fan --- logging.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/logging.cc b/logging.cc index 6320e368..012feee0 100644 --- a/logging.cc +++ b/logging.cc @@ -79,7 +79,11 @@ void SetupLogging(bool log_to_system, bool log_to_file) { if (log_to_file) { log_file = SetupLogFile(kSystemLogsRoot); log_settings.delete_old = logging::APPEND_TO_OLD_LOG_FILE; +#if BASE_VER < 780000 log_settings.log_file = log_file.c_str(); +#else + log_settings.log_file_path = log_file.c_str(); +#endif } logging::InitLogging(log_settings); } From aff72001751f046d17209a1679d496923716b71d Mon Sep 17 00:00:00 2001 From: Miriam Polzer Date: Thu, 27 Aug 2020 08:20:39 +0200 Subject: [PATCH 401/624] update_engine: Enterprise channel downgrade Powerwash and roll back when an enrolled user downgrades the channel: - If the admin downgrades channel, check for ChannelDowngradebehavior policy. - If the user downgrades the channel, powerwash based on given boolean. Add the "rollback" flag to the powerwash file to try to preserve some data. Note that this change is not affecting users yet: The ChannelDowngradeBehavior policy is not available in DPanel and the UI does not support chosing powerwash on channel downgrade for enrolled users. BUG=chromium:1122531 TEST=FEATURES=test emerge-amd64-generic update_engine TEST=Set policy with YAPS and test on device Change-Id: I2f02a6e752eed083b57484766f8e7ecc2eed7aca Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2397890 Tested-by: Miriam Polzer Commit-Queue: Miriam Polzer Reviewed-by: Amin Hassani Reviewed-by: Jae Hoon Kim --- BUILD.gn | 1 + mock_update_attempter.h | 25 ++-- omaha_request_params.h | 4 + omaha_response_handler_action.cc | 10 +- omaha_response_handler_action_unittest.cc | 126 ++++++++++++++++++ payload_consumer/postinstall_runner_action.cc | 5 +- update_attempter.cc | 8 +- update_attempter.h | 4 + update_attempter_unittest.cc | 111 ++++++++++++--- update_manager/android_things_policy.cc | 1 + update_manager/chromeos_policy.cc | 1 + update_manager/default_policy.cc | 1 + .../enterprise_device_policy_impl.cc | 13 +- .../enterprise_device_policy_impl_unittest.cc | 65 +++++++++ update_manager/policy.h | 3 + 15 files changed, 337 insertions(+), 41 deletions(-) create mode 100644 update_manager/enterprise_device_policy_impl_unittest.cc diff --git a/BUILD.gn b/BUILD.gn index 59aa0047..b1719e20 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -533,6 +533,7 @@ if (use.test) { "update_boot_flags_action_unittest.cc", "update_manager/boxed_value_unittest.cc", "update_manager/chromeos_policy_unittest.cc", + "update_manager/enterprise_device_policy_impl_unittest.cc", "update_manager/evaluation_context_unittest.cc", "update_manager/generic_variables_unittest.cc", "update_manager/prng_unittest.cc", diff --git a/mock_update_attempter.h b/mock_update_attempter.h index cc056484..d502222a 100644 --- a/mock_update_attempter.h +++ b/mock_update_attempter.h @@ -30,17 +30,20 @@ class MockUpdateAttempter : public UpdateAttempter { public: using UpdateAttempter::UpdateAttempter; - MOCK_METHOD10(Update, - void(const std::string& app_version, - const std::string& omaha_url, - const std::string& target_channel, - const std::string& lts_tag, - const std::string& target_version_prefix, - bool rollback_allowed, - bool rollback_data_save_requested, - int rollback_allowed_milestones, - bool obey_proxies, - bool interactive)); + MOCK_METHOD(void, + Update, + (const std::string& app_version, + const std::string& omaha_url, + const std::string& target_channel, + const std::string& lts_tag, + const std::string& target_version_prefix, + bool rollback_allowed, + bool rollback_data_save_requested, + int rollback_allowed_milestones, + bool rollback_on_channel_downgrade, + bool obey_proxies, + bool interactive), + (override)); MOCK_METHOD1(GetStatus, bool(update_engine::UpdateEngineStatus* out_status)); diff --git a/omaha_request_params.h b/omaha_request_params.h index aad92903..5d30d583 100644 --- a/omaha_request_params.h +++ b/omaha_request_params.h @@ -302,6 +302,10 @@ class OmahaRequestParams { void set_is_powerwash_allowed(bool powerwash_allowed) { mutable_image_props_.is_powerwash_allowed = powerwash_allowed; } + bool is_powerwash_allowed() { + return mutable_image_props_.is_powerwash_allowed; + } + void set_device_requisition(const std::string& requisition) { device_requisition_ = requisition; } diff --git a/omaha_response_handler_action.cc b/omaha_response_handler_action.cc index 040f8e79..92e0a72c 100644 --- a/omaha_response_handler_action.cc +++ b/omaha_response_handler_action.cc @@ -188,10 +188,12 @@ void OmahaResponseHandlerAction::PerformAction() { } // Powerwash if either the response requires it or the parameters indicated - // powerwash and we are downgrading the version. + // powerwash (usually because there was a channel downgrade) and we are + // downgrading the version. Enterprise rollback, indicated by + // |response.is_rollback| is dealt with separately above. if (response.powerwash_required) { install_plan_.powerwash_required = true; - } else if (params->ShouldPowerwash()) { + } else if (params->ShouldPowerwash() && !response.is_rollback) { base::Version new_version(response.version); base::Version current_version(params->app_version()); @@ -205,6 +207,10 @@ void OmahaResponseHandlerAction::PerformAction() { << " Current version number: " << params->app_version(); } else if (new_version < current_version) { install_plan_.powerwash_required = true; + // Always try to preserve enrollment and wifi data for enrolled devices. + install_plan_.rollback_data_save_requested = + system_state_ && system_state_->device_policy() && + system_state_->device_policy()->IsEnterpriseEnrolled(); } } diff --git a/omaha_response_handler_action_unittest.cc b/omaha_response_handler_action_unittest.cc index 04cfa73e..4e421b03 100644 --- a/omaha_response_handler_action_unittest.cc +++ b/omaha_response_handler_action_unittest.cc @@ -531,6 +531,132 @@ TEST_F(OmahaResponseHandlerActionTest, EXPECT_FALSE(install_plan.powerwash_required); } +TEST_F(OmahaResponseHandlerActionTest, + ChangeToMoreStableChannelButSameVersionTest) { + OmahaResponse in; + in.update_exists = true; + in.version = "12345.0.0.0"; + in.packages.push_back({.payload_urls = {"https://ChannelDownVersionUp"}, + .size = 1, + .hash = kPayloadHashHex}); + in.more_info_url = "http://more/info"; + + // Create a uniquely named test directory. + base::ScopedTempDir tempdir; + ASSERT_TRUE(tempdir.CreateUniqueTempDir()); + + OmahaRequestParams params(&fake_system_state_); + fake_system_state_.fake_hardware()->SetIsOfficialBuild(false); + params.set_root(tempdir.GetPath().value()); + params.set_current_channel("beta-channel"); + EXPECT_TRUE(params.SetTargetChannel("stable-channel", true, nullptr)); + params.UpdateDownloadChannel(); + params.set_app_version("12345.0.0.0"); + + fake_system_state_.set_request_params(¶ms); + InstallPlan install_plan; + EXPECT_TRUE(DoTest(in, "", &install_plan)); + EXPECT_FALSE(install_plan.powerwash_required); + EXPECT_FALSE(install_plan.rollback_data_save_requested); +} + +// On an enrolled device, the rollback data restore should be attempted when +// doing a powerwash and channel downgrade. +TEST_F(OmahaResponseHandlerActionTest, + ChangeToMoreStableChannelEnrolledDataRestore) { + OmahaResponse in; + in.update_exists = true; + in.version = "12345.96.0.0"; + in.packages.push_back({.payload_urls = {"https://ChannelDownEnrolled"}, + .size = 1, + .hash = kPayloadHashHex}); + in.more_info_url = "http://more/info"; + + // Create a uniquely named test directory. + base::ScopedTempDir tempdir; + ASSERT_TRUE(tempdir.CreateUniqueTempDir()); + + OmahaRequestParams params(&fake_system_state_); + fake_system_state_.fake_hardware()->SetIsOfficialBuild(true); + params.set_root(tempdir.GetPath().value()); + params.set_current_channel("beta-channel"); + EXPECT_TRUE(params.SetTargetChannel("stable-channel", true, nullptr)); + params.UpdateDownloadChannel(); + params.set_app_version("12347.48.0.0"); + + testing::NiceMock mock_device_policy; + EXPECT_CALL(mock_device_policy, IsEnterpriseEnrolled()) + .WillOnce(Return(true)); + fake_system_state_.set_device_policy(&mock_device_policy); + + fake_system_state_.set_request_params(¶ms); + InstallPlan install_plan; + EXPECT_TRUE(DoTest(in, "", &install_plan)); + EXPECT_TRUE(install_plan.rollback_data_save_requested); +} + +// Never attempt rollback data restore if the device is not enrolled. +TEST_F(OmahaResponseHandlerActionTest, + ChangeToMoreStableChannelUnenrolledNoDataRestore) { + OmahaResponse in; + in.update_exists = true; + in.version = "12345.96.0.0"; + in.packages.push_back({.payload_urls = {"https://ChannelDownEnrolled"}, + .size = 1, + .hash = kPayloadHashHex}); + in.more_info_url = "http://more/info"; + + // Create a uniquely named test directory. + base::ScopedTempDir tempdir; + ASSERT_TRUE(tempdir.CreateUniqueTempDir()); + + OmahaRequestParams params(&fake_system_state_); + fake_system_state_.fake_hardware()->SetIsOfficialBuild(true); + params.set_root(tempdir.GetPath().value()); + params.set_current_channel("beta-channel"); + EXPECT_TRUE(params.SetTargetChannel("stable-channel", true, nullptr)); + params.UpdateDownloadChannel(); + params.set_app_version("12347.48.0.0"); + + testing::NiceMock mock_device_policy; + EXPECT_CALL(mock_device_policy, IsEnterpriseEnrolled()) + .WillOnce(Return(false)); + fake_system_state_.set_device_policy(&mock_device_policy); + + fake_system_state_.set_request_params(¶ms); + InstallPlan install_plan; + EXPECT_TRUE(DoTest(in, "", &install_plan)); + EXPECT_FALSE(install_plan.rollback_data_save_requested); +} + +// Never attempt rollback data restore if powerwash is not allowed. +TEST_F(OmahaResponseHandlerActionTest, + ChangeToMoreStableChannelNoPowerwashNoDataRestore) { + OmahaResponse in; + in.update_exists = true; + in.version = "12345.96.0.0"; + in.packages.push_back( + {.payload_urls = {"https://URL"}, .size = 1, .hash = kPayloadHashHex}); + in.more_info_url = "http://more/info"; + + // Create a uniquely named test directory. + base::ScopedTempDir tempdir; + ASSERT_TRUE(tempdir.CreateUniqueTempDir()); + + OmahaRequestParams params(&fake_system_state_); + fake_system_state_.fake_hardware()->SetIsOfficialBuild(true); + params.set_root(tempdir.GetPath().value()); + params.set_current_channel("beta-channel"); + EXPECT_TRUE(params.SetTargetChannel("stable-channel", false, nullptr)); + params.UpdateDownloadChannel(); + params.set_app_version("12347.48.0.0"); + + fake_system_state_.set_request_params(¶ms); + InstallPlan install_plan; + EXPECT_TRUE(DoTest(in, "", &install_plan)); + EXPECT_FALSE(install_plan.rollback_data_save_requested); +} + TEST_F(OmahaResponseHandlerActionTest, ChangeToLessStableVersionAndChannelTest) { OmahaResponse in; diff --git a/payload_consumer/postinstall_runner_action.cc b/payload_consumer/postinstall_runner_action.cc index e8fa81bc..91c3a640 100644 --- a/payload_consumer/postinstall_runner_action.cc +++ b/payload_consumer/postinstall_runner_action.cc @@ -63,9 +63,8 @@ void PostinstallRunnerAction::PerformAction() { // that retains a small amount of system state such as enrollment and // network configuration. In both cases all user accounts are deleted. if (install_plan_.powerwash_required || install_plan_.is_rollback) { - bool save_rollback_data = - install_plan_.is_rollback && install_plan_.rollback_data_save_requested; - if (hardware_->SchedulePowerwash(save_rollback_data)) { + if (hardware_->SchedulePowerwash( + install_plan_.rollback_data_save_requested)) { powerwash_scheduled_ = true; } else { return CompletePostinstall(ErrorCode::kPostinstallPowerwashError); diff --git a/update_attempter.cc b/update_attempter.cc index 24562e2a..14d5837d 100644 --- a/update_attempter.cc +++ b/update_attempter.cc @@ -252,6 +252,7 @@ void UpdateAttempter::Update(const string& app_version, bool rollback_allowed, bool rollback_data_save_requested, int rollback_allowed_milestones, + bool rollback_on_channel_downgrade, bool obey_proxies, bool interactive) { // This is normally called frequently enough so it's appropriate to use as a @@ -290,6 +291,7 @@ void UpdateAttempter::Update(const string& app_version, rollback_allowed, rollback_data_save_requested, rollback_allowed_milestones, + rollback_on_channel_downgrade, obey_proxies, interactive)) { return; @@ -366,6 +368,7 @@ bool UpdateAttempter::CalculateUpdateParams(const string& app_version, bool rollback_allowed, bool rollback_data_save_requested, int rollback_allowed_milestones, + bool rollback_on_channel_downgrade, bool obey_proxies, bool interactive) { http_response_code_ = 0; @@ -424,11 +427,9 @@ bool UpdateAttempter::CalculateUpdateParams(const string& app_version, LOG(INFO) << "No target channel mandated by policy."; } else { LOG(INFO) << "Setting target channel as mandated: " << target_channel; - // Pass in false for powerwash_allowed until we add it to the policy - // protobuf. string error_message; if (!omaha_request_params_->SetTargetChannel( - target_channel, false, &error_message)) { + target_channel, rollback_on_channel_downgrade, &error_message)) { LOG(ERROR) << "Setting the channel failed: " << error_message; } @@ -1114,6 +1115,7 @@ void UpdateAttempter::OnUpdateScheduled(EvalStatus status, params.rollback_allowed, params.rollback_data_save_requested, params.rollback_allowed_milestones, + params.rollback_on_channel_downgrade, /*obey_proxies=*/false, params.interactive); // Always clear the forced app_version and omaha_url after an update attempt diff --git a/update_attempter.h b/update_attempter.h index abd0bd4a..6c931509 100644 --- a/update_attempter.h +++ b/update_attempter.h @@ -90,6 +90,7 @@ class UpdateAttempter : public ActionProcessorDelegate, bool rollback_allowed, bool rollback_data_save_requested, int rollback_allowed_milestones, + bool rollback_on_channel_downgrade, bool obey_proxies, bool interactive); @@ -284,6 +285,8 @@ class UpdateAttempter : public ActionProcessorDelegate, FRIEND_TEST(UpdateAttempterTest, RollbackAfterInstall); FRIEND_TEST(UpdateAttempterTest, RollbackAllowed); FRIEND_TEST(UpdateAttempterTest, RollbackAllowedSetAndReset); + FRIEND_TEST(UpdateAttempterTest, ChannelDowngradeNoRollback); + FRIEND_TEST(UpdateAttempterTest, ChannelDowngradeRollback); FRIEND_TEST(UpdateAttempterTest, RollbackMetricsNotRollbackFailure); FRIEND_TEST(UpdateAttempterTest, RollbackMetricsNotRollbackSuccess); FRIEND_TEST(UpdateAttempterTest, RollbackMetricsRollbackFailure); @@ -376,6 +379,7 @@ class UpdateAttempter : public ActionProcessorDelegate, bool rollback_allowed, bool rollback_data_save_requested, int rollback_allowed_milestones, + bool rollback_on_channel_downgrade, bool obey_proxies, bool interactive); diff --git a/update_attempter_unittest.cc b/update_attempter_unittest.cc index 354416eb..edcb67bc 100644 --- a/update_attempter_unittest.cc +++ b/update_attempter_unittest.cc @@ -25,6 +25,7 @@ #include #include +#include #include #include #include @@ -179,6 +180,7 @@ class UpdateAttempterUnderTest : public UpdateAttempter { bool rollback_allowed, bool rollback_data_save_requested, int rollback_allowed_milestones, + bool rollback_on_channel_downgrade, bool obey_proxies, bool interactive) override { update_called_ = true; @@ -191,6 +193,7 @@ class UpdateAttempterUnderTest : public UpdateAttempter { rollback_allowed, rollback_data_save_requested, rollback_allowed_milestones, + rollback_on_channel_downgrade, obey_proxies, interactive); return; @@ -427,7 +430,7 @@ void UpdateAttempterTest::ScheduleQuitMainLoop() { void UpdateAttempterTest::SessionIdTestChange() { EXPECT_NE(UpdateStatus::UPDATED_NEED_REBOOT, attempter_.status()); const auto old_session_id = attempter_.session_id_; - attempter_.Update("", "", "", "", "", false, false, 0, false, false); + attempter_.Update("", "", "", "", "", false, false, 0, false, false, false); EXPECT_NE(old_session_id, attempter_.session_id_); ScheduleQuitMainLoop(); } @@ -798,7 +801,7 @@ void UpdateAttempterTest::UpdateTestStart() { EXPECT_CALL(*processor_, StartProcessing()); } - attempter_.Update("", "", "", "", "", false, false, 0, false, false); + attempter_.Update("", "", "", "", "", false, false, 0, false, false, false); loop_.PostTask(FROM_HERE, base::Bind(&UpdateAttempterTest::UpdateTestVerify, base::Unretained(this))); @@ -998,7 +1001,7 @@ void UpdateAttempterTest::P2PNotEnabledStart() { fake_system_state_.set_p2p_manager(&mock_p2p_manager); mock_p2p_manager.fake().SetP2PEnabled(false); EXPECT_CALL(mock_p2p_manager, PerformHousekeeping()).Times(0); - attempter_.Update("", "", "", "", "", false, false, 0, false, false); + attempter_.Update("", "", "", "", "", false, false, 0, false, false, false); EXPECT_FALSE(actual_using_p2p_for_downloading_); EXPECT_FALSE(actual_using_p2p_for_sharing()); ScheduleQuitMainLoop(); @@ -1020,7 +1023,7 @@ void UpdateAttempterTest::P2PEnabledStartingFailsStart() { mock_p2p_manager.fake().SetEnsureP2PRunningResult(false); mock_p2p_manager.fake().SetPerformHousekeepingResult(false); EXPECT_CALL(mock_p2p_manager, PerformHousekeeping()).Times(0); - attempter_.Update("", "", "", "", "", false, false, 0, false, false); + attempter_.Update("", "", "", "", "", false, false, 0, false, false, false); EXPECT_FALSE(actual_using_p2p_for_downloading()); EXPECT_FALSE(actual_using_p2p_for_sharing()); ScheduleQuitMainLoop(); @@ -1043,7 +1046,7 @@ void UpdateAttempterTest::P2PEnabledHousekeepingFailsStart() { mock_p2p_manager.fake().SetEnsureP2PRunningResult(true); mock_p2p_manager.fake().SetPerformHousekeepingResult(false); EXPECT_CALL(mock_p2p_manager, PerformHousekeeping()); - attempter_.Update("", "", "", "", "", false, false, 0, false, false); + attempter_.Update("", "", "", "", "", false, false, 0, false, false, false); EXPECT_FALSE(actual_using_p2p_for_downloading()); EXPECT_FALSE(actual_using_p2p_for_sharing()); ScheduleQuitMainLoop(); @@ -1065,7 +1068,7 @@ void UpdateAttempterTest::P2PEnabledStart() { mock_p2p_manager.fake().SetEnsureP2PRunningResult(true); mock_p2p_manager.fake().SetPerformHousekeepingResult(true); EXPECT_CALL(mock_p2p_manager, PerformHousekeeping()); - attempter_.Update("", "", "", "", "", false, false, 0, false, false); + attempter_.Update("", "", "", "", "", false, false, 0, false, false, false); EXPECT_TRUE(actual_using_p2p_for_downloading()); EXPECT_TRUE(actual_using_p2p_for_sharing()); ScheduleQuitMainLoop(); @@ -1097,6 +1100,7 @@ void UpdateAttempterTest::P2PEnabledInteractiveStart() { false, /*rollback_allowed_milestones=*/0, false, + false, /*interactive=*/true); EXPECT_FALSE(actual_using_p2p_for_downloading()); EXPECT_TRUE(actual_using_p2p_for_sharing()); @@ -1127,7 +1131,7 @@ void UpdateAttempterTest::ReadScatterFactorFromPolicyTestStart() { attempter_.policy_provider_.reset( new policy::PolicyProvider(std::move(device_policy))); - attempter_.Update("", "", "", "", "", false, false, 0, false, false); + attempter_.Update("", "", "", "", "", false, false, 0, false, false, false); EXPECT_EQ(scatter_factor_in_seconds, attempter_.scatter_factor_.InSeconds()); ScheduleQuitMainLoop(); @@ -1165,7 +1169,7 @@ void UpdateAttempterTest::DecrementUpdateCheckCountTestStart() { attempter_.policy_provider_.reset( new policy::PolicyProvider(std::move(device_policy))); - attempter_.Update("", "", "", "", "", false, false, 0, false, false); + attempter_.Update("", "", "", "", "", false, false, 0, false, false, false); EXPECT_EQ(scatter_factor_in_seconds, attempter_.scatter_factor_.InSeconds()); // Make sure the file still exists. @@ -1181,7 +1185,7 @@ void UpdateAttempterTest::DecrementUpdateCheckCountTestStart() { // However, if the count is already 0, it's not decremented. Test that. initial_value = 0; EXPECT_TRUE(fake_prefs.SetInt64(kPrefsUpdateCheckCount, initial_value)); - attempter_.Update("", "", "", "", "", false, false, 0, false, false); + attempter_.Update("", "", "", "", "", false, false, 0, false, false, false); EXPECT_TRUE(fake_prefs.Exists(kPrefsUpdateCheckCount)); EXPECT_TRUE(fake_prefs.GetInt64(kPrefsUpdateCheckCount, &new_value)); EXPECT_EQ(initial_value, new_value); @@ -1237,6 +1241,7 @@ void UpdateAttempterTest::NoScatteringDoneDuringManualUpdateTestStart() { false, /*rollback_allowed_milestones=*/0, false, + false, /*interactive=*/true); EXPECT_EQ(scatter_factor_in_seconds, attempter_.scatter_factor_.InSeconds()); @@ -1289,7 +1294,7 @@ void UpdateAttempterTest::StagingSetsPrefsAndTurnsOffScatteringStart() { FakePrefs fake_prefs; SetUpStagingTest(kValidStagingSchedule, &fake_prefs); - attempter_.Update("", "", "", "", "", false, false, 0, false, false); + attempter_.Update("", "", "", "", "", false, false, 0, false, false, false); // Check that prefs have the correct values. int64_t update_count; EXPECT_TRUE(fake_prefs.GetInt64(kPrefsUpdateCheckCount, &update_count)); @@ -1346,8 +1351,17 @@ void UpdateAttempterTest::StagingOffIfInteractiveStart() { FakePrefs fake_prefs; SetUpStagingTest(kValidStagingSchedule, &fake_prefs); - attempter_.Update( - "", "", "", "", "", false, false, 0, false, /* interactive = */ true); + attempter_.Update("", + "", + "", + "", + "", + false, + false, + 0, + false, + false, + /* interactive = */ true); CheckStagingOff(); ScheduleQuitMainLoop(); @@ -1367,8 +1381,17 @@ void UpdateAttempterTest::StagingOffIfOobeStart() { FakePrefs fake_prefs; SetUpStagingTest(kValidStagingSchedule, &fake_prefs); - attempter_.Update( - "", "", "", "", "", false, false, 0, false, /* interactive = */ true); + attempter_.Update("", + "", + "", + "", + "", + false, + false, + 0, + false, + false, + /* interactive = */ true); CheckStagingOff(); ScheduleQuitMainLoop(); @@ -1697,23 +1720,33 @@ TEST_F(UpdateAttempterTest, UpdateAfterInstall) { TEST_F(UpdateAttempterTest, TargetVersionPrefixSetAndReset) { attempter_.CalculateUpdateParams( - "", "", "", "", "1234", false, false, 4, false, false); + /*app_version=*/"", + /*omaha_url=*/"", + /*target_channel=*/"", + /*lts_tag=*/"", + /*target_version_prefix=*/"1234", + /*rollback_allowed=*/false, + /*rollback_data_save_requested=*/false, + /*rollback_allowed_milestones=*/4, + /*rollback_on_channel_downgrade=*/false, + /*obey_proxies=*/false, + /*interactive=*/false); EXPECT_EQ("1234", fake_system_state_.request_params()->target_version_prefix()); attempter_.CalculateUpdateParams( - "", "", "", "", "", false, 4, false, false, false); + "", "", "", "", "", false, false, 4, false, false, false); EXPECT_TRUE( fake_system_state_.request_params()->target_version_prefix().empty()); } TEST_F(UpdateAttempterTest, TargetChannelHintSetAndReset) { attempter_.CalculateUpdateParams( - "", "", "", "hint", "", false, false, 4, false, false); + "", "", "", "hint", "", false, false, 4, false, false, false); EXPECT_EQ("hint", fake_system_state_.request_params()->lts_tag()); attempter_.CalculateUpdateParams( - "", "", "", "", "", false, 4, false, false, false); + "", "", "", "", "", false, false, 4, false, false, false); EXPECT_TRUE(fake_system_state_.request_params()->lts_tag().empty()); } @@ -1726,6 +1759,7 @@ TEST_F(UpdateAttempterTest, RollbackAllowedSetAndReset) { /*rollback_allowed=*/true, /*rollback_data_save_requested=*/false, /*rollback_allowed_milestones=*/4, + /*rollback_on_channel_downgrade=*/false, false, false); EXPECT_TRUE(fake_system_state_.request_params()->rollback_allowed()); @@ -1740,6 +1774,7 @@ TEST_F(UpdateAttempterTest, RollbackAllowedSetAndReset) { /*rollback_allowed=*/false, /*rollback_data_save_requested=*/false, /*rollback_allowed_milestones=*/4, + /*rollback_on_channel_downgrade=*/false, false, false); EXPECT_FALSE(fake_system_state_.request_params()->rollback_allowed()); @@ -1747,6 +1782,42 @@ TEST_F(UpdateAttempterTest, RollbackAllowedSetAndReset) { fake_system_state_.request_params()->rollback_allowed_milestones()); } +TEST_F(UpdateAttempterTest, ChannelDowngradeNoRollback) { + base::ScopedTempDir tempdir; + ASSERT_TRUE(tempdir.CreateUniqueTempDir()); + fake_system_state_.request_params()->set_root(tempdir.GetPath().value()); + attempter_.CalculateUpdateParams(/*app_version=*/"", + /*omaha_url=*/"", + /*target_channel=*/"stable-channel", + /*lts_tag=*/"", + /*target_version_prefix=*/"", + /*rollback_allowed=*/false, + /*rollback_data_save_requested=*/false, + /*rollback_allowed_milestones=*/4, + /*rollback_on_channel_downgrade=*/false, + /*obey_proxies=*/false, + /*interactive=*/false); + EXPECT_FALSE(fake_system_state_.request_params()->is_powerwash_allowed()); +} + +TEST_F(UpdateAttempterTest, ChannelDowngradeRollback) { + base::ScopedTempDir tempdir; + ASSERT_TRUE(tempdir.CreateUniqueTempDir()); + fake_system_state_.request_params()->set_root(tempdir.GetPath().value()); + attempter_.CalculateUpdateParams(/*app_version=*/"", + /*omaha_url=*/"", + /*target_channel=*/"stable-channel", + /*lts_tag=*/"", + /*target_version_prefix=*/"", + /*rollback_allowed=*/false, + /*rollback_data_save_requested=*/false, + /*rollback_allowed_milestones=*/4, + /*rollback_on_channel_downgrade=*/true, + /*obey_proxies=*/false, + /*interactive=*/false); + EXPECT_TRUE(fake_system_state_.request_params()->is_powerwash_allowed()); +} + TEST_F(UpdateAttempterTest, UpdateDeferredByPolicyTest) { // Construct an OmahaResponseHandlerAction that has processed an InstallPlan, // but the update is being deferred by the Policy. @@ -1861,7 +1932,7 @@ void UpdateAttempterTest::ResetRollbackHappenedStart(bool is_consumer, SetRollbackHappened(false)) .Times(expected_reset ? 1 : 0); attempter_.policy_provider_ = std::move(mock_policy_provider); - attempter_.Update("", "", "", "", "", false, false, 0, false, false); + attempter_.Update("", "", "", "", "", false, false, 0, false, false, false); ScheduleQuitMainLoop(); } @@ -2202,7 +2273,7 @@ void UpdateAttempterTest::UpdateToQuickFixBuildStart(bool set_token) { .WillOnce(Return(false)); attempter_.policy_provider_.reset( new policy::PolicyProvider(std::move(device_policy))); - attempter_.Update("", "", "", "", "", false, false, 0, false, false); + attempter_.Update("", "", "", "", "", false, false, 0, false, false, false); EXPECT_EQ(token, attempter_.omaha_request_params_->autoupdate_token()); ScheduleQuitMainLoop(); diff --git a/update_manager/android_things_policy.cc b/update_manager/android_things_policy.cc index 6362a73d..c4fa75a4 100644 --- a/update_manager/android_things_policy.cc +++ b/update_manager/android_things_policy.cc @@ -63,6 +63,7 @@ EvalStatus AndroidThingsPolicy::UpdateCheckAllowed( result->rollback_allowed = false; result->rollback_data_save_requested = false; result->rollback_allowed_milestones = -1; + result->rollback_on_channel_downgrade = false; result->interactive = false; // Build a list of policies to consult. Note that each policy may modify the diff --git a/update_manager/chromeos_policy.cc b/update_manager/chromeos_policy.cc index 85cc3ae9..4c651b7d 100644 --- a/update_manager/chromeos_policy.cc +++ b/update_manager/chromeos_policy.cc @@ -221,6 +221,7 @@ EvalStatus ChromeOSPolicy::UpdateCheckAllowed(EvaluationContext* ec, result->target_version_prefix.clear(); result->rollback_allowed = false; result->rollback_allowed_milestones = -1; + result->rollback_on_channel_downgrade = false; result->interactive = false; EnoughSlotsAbUpdatesPolicyImpl enough_slots_ab_updates_policy; diff --git a/update_manager/default_policy.cc b/update_manager/default_policy.cc index cc13c441..7ca414b1 100644 --- a/update_manager/default_policy.cc +++ b/update_manager/default_policy.cc @@ -44,6 +44,7 @@ EvalStatus DefaultPolicy::UpdateCheckAllowed(EvaluationContext* ec, result->target_version_prefix.clear(); result->rollback_allowed = false; result->rollback_allowed_milestones = -1; // No version rolls should happen. + result->rollback_on_channel_downgrade = false; result->interactive = false; // Ensure that the minimum interval is set. If there's no clock, this defaults diff --git a/update_manager/enterprise_device_policy_impl.cc b/update_manager/enterprise_device_policy_impl.cc index fed50a94..8fc79ca6 100644 --- a/update_manager/enterprise_device_policy_impl.cc +++ b/update_manager/enterprise_device_policy_impl.cc @@ -117,14 +117,23 @@ EvalStatus EnterpriseDevicePolicyImpl::UpdateCheckAllowed( if (rollback_allowed_milestones_p) result->rollback_allowed_milestones = *rollback_allowed_milestones_p; - // Determine whether a target channel is dictated by policy. + // Determine whether a target channel is dictated by policy and whether we + // should rollback in case that channel is more stable. const bool* release_channel_delegated_p = ec->GetValue(dp_provider->var_release_channel_delegated()); if (release_channel_delegated_p && !(*release_channel_delegated_p)) { const string* release_channel_p = ec->GetValue(dp_provider->var_release_channel()); - if (release_channel_p) + if (release_channel_p) { result->target_channel = *release_channel_p; + const ChannelDowngradeBehavior* channel_downgrade_behavior_p = + ec->GetValue(dp_provider->var_channel_downgrade_behavior()); + if (channel_downgrade_behavior_p && + *channel_downgrade_behavior_p == + ChannelDowngradeBehavior::kRollback) { + result->rollback_on_channel_downgrade = true; + } + } } const string* release_lts_tag_p = diff --git a/update_manager/enterprise_device_policy_impl_unittest.cc b/update_manager/enterprise_device_policy_impl_unittest.cc new file mode 100644 index 00000000..5b25602e --- /dev/null +++ b/update_manager/enterprise_device_policy_impl_unittest.cc @@ -0,0 +1,65 @@ +// +// Copyright (C) 2020 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include "update_engine/update_manager/enterprise_device_policy_impl.h" + +#include + +#include "update_engine/update_manager/policy_test_utils.h" + +namespace chromeos_update_manager { + +class UmEnterpriseDevicePolicyImplTest : public UmPolicyTestBase { + protected: + UmEnterpriseDevicePolicyImplTest() : UmPolicyTestBase() { + policy_ = std::make_unique(); + } + + void SetUpDefaultState() override { + UmPolicyTestBase::SetUpDefaultState(); + + fake_state_.device_policy_provider()->var_device_policy_is_loaded()->reset( + new bool(true)); + } +}; + +TEST_F(UmEnterpriseDevicePolicyImplTest, ChannelDowngradeBehaviorNoRollback) { + fake_state_.device_policy_provider()->var_release_channel_delegated()->reset( + new bool(false)); + fake_state_.device_policy_provider()->var_release_channel()->reset( + new std::string("stable-channel")); + + UpdateCheckParams result; + ExpectPolicyStatus( + EvalStatus::kContinue, &Policy::UpdateCheckAllowed, &result); + EXPECT_FALSE(result.rollback_on_channel_downgrade); +} + +TEST_F(UmEnterpriseDevicePolicyImplTest, ChannelDowngradeBehaviorRollback) { + fake_state_.device_policy_provider()->var_release_channel_delegated()->reset( + new bool(false)); + fake_state_.device_policy_provider()->var_release_channel()->reset( + new std::string("stable-channel")); + fake_state_.device_policy_provider()->var_channel_downgrade_behavior()->reset( + new ChannelDowngradeBehavior(ChannelDowngradeBehavior::kRollback)); + + UpdateCheckParams result; + ExpectPolicyStatus( + EvalStatus::kContinue, &Policy::UpdateCheckAllowed, &result); + EXPECT_TRUE(result.rollback_on_channel_downgrade); +} + +} // namespace chromeos_update_manager diff --git a/update_manager/policy.h b/update_manager/policy.h index 9194c38c..4b3bfc72 100644 --- a/update_manager/policy.h +++ b/update_manager/policy.h @@ -58,6 +58,9 @@ struct UpdateCheckParams { // (e.g. no device policy is available yet), in this case no version // roll-forward should happen. int rollback_allowed_milestones; + // Whether a rollback with data save should be initiated on channel + // downgrade (e.g. beta to stable). + bool rollback_on_channel_downgrade{false}; // A target channel, if so imposed by policy; otherwise, an empty string. std::string target_channel; // Specifies if the channel hint, e.g. LTS (Long Term Support) updates. From 582d8fec6c5fbb8b65c1f37579f5f71b88694a7e Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Mon, 28 Sep 2020 22:15:18 -0700 Subject: [PATCH 402/624] update_payload: Add support for minor version 7 Although, we have not upreved to minor version 7, some of the current canary images have been moved to minor version 7. So temporarily add support for minor version 7. BUG=b:169526824 TEST=sudo FEATURES=test emerge update_payload TEST=cros_generate_update_payload --debug --tgt-image chromiumos_test_image.bin --src-image chromiumos_test_image.bin --output delta.bin --check --work-dir workdir Change-Id: Ifb32307ecee7814190028f5ee7033f67fc5c40db Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2437004 Reviewed-by: Amin Hassani Tested-by: Amin Hassani --- scripts/update_payload/checker.py | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/update_payload/checker.py b/scripts/update_payload/checker.py index 58ad85cb..99a5c629 100644 --- a/scripts/update_payload/checker.py +++ b/scripts/update_payload/checker.py @@ -71,6 +71,7 @@ 4: (_TYPE_DELTA,), 5: (_TYPE_DELTA,), 6: (_TYPE_DELTA,), + 7: (_TYPE_DELTA,), } From 8db52491584d597c2c13710d8ab0c308d49ac365 Mon Sep 17 00:00:00 2001 From: Miriam Polzer Date: Thu, 17 Sep 2020 14:06:46 +0200 Subject: [PATCH 403/624] update_engine: Fall back to DeviceMinimumVersion If the kiosk's required Chrome OS version can not be read several times and DeviceMinimumVersionis set, update only if the current version is below the DeviceMinimumVersion. This a very conservative approach at preventing kiosks from updating randomly: - It only affects kiosk devices. - It only affects devices that have DeviceMinimumVersion set, others will still simply update once the kiosk version could not be fetched several times. BUG=chromium:1084453 TEST=FEATURES=test emerge-amd64-generic update_engine TEST=Set policies on DUT and check behavior Change-Id: I82caf4bf4969959e461a218a916f057ea73946ad Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2416628 Tested-by: Miriam Polzer Commit-Queue: Miriam Polzer Reviewed-by: Amin Hassani --- .../enterprise_device_policy_impl.cc | 34 ++++++- .../enterprise_device_policy_impl_unittest.cc | 96 +++++++++++++++++++ update_manager/fake_system_provider.h | 6 ++ update_manager/real_system_provider.cc | 15 ++- update_manager/real_system_provider.h | 22 +++-- .../real_system_provider_unittest.cc | 26 +++-- update_manager/state_factory.cc | 4 +- update_manager/system_provider.h | 7 ++ 8 files changed, 181 insertions(+), 29 deletions(-) diff --git a/update_manager/enterprise_device_policy_impl.cc b/update_manager/enterprise_device_policy_impl.cc index 8fc79ca6..ce8150e7 100644 --- a/update_manager/enterprise_device_policy_impl.cc +++ b/update_manager/enterprise_device_policy_impl.cc @@ -62,13 +62,37 @@ EvalStatus EnterpriseDevicePolicyImpl::UpdateCheckAllowed( ec->GetValue(system_provider->var_kiosk_required_platform_version()); if (!kiosk_required_platform_version_p) { LOG(INFO) << "Kiosk app required platform version is not fetched, " - "blocking update checks"; + "blocking update checks."; return EvalStatus::kAskMeAgainLater; + } else if (kiosk_required_platform_version_p->empty()) { + // The platform version could not be fetched several times. Update + // based on |DeviceMinimumVersion| instead (crbug.com/1048931). + const base::Version* device_minimum_version_p = + ec->GetValue(dp_provider->var_device_minimum_version()); + const base::Version* current_version_p( + ec->GetValue(system_provider->var_chromeos_version())); + if (device_minimum_version_p && device_minimum_version_p->IsValid() && + current_version_p && current_version_p->IsValid() && + *current_version_p > *device_minimum_version_p) { + // Do not update if the current version is newer than the minimum + // version. + LOG(INFO) << "Reading kiosk app required platform version failed " + "repeatedly but current version is newer than " + "DeviceMinimumVersion. Blocking update checks. " + "Current version: " + << *current_version_p + << " DeviceMinimumVersion: " << *device_minimum_version_p; + return EvalStatus::kAskMeAgainLater; + } + LOG(WARNING) << "Reading kiosk app required platform version failed " + "repeatedly. Attempting an update without it now."; + // An empty string for |target_version_prefix| allows arbitrary updates. + result->target_version_prefix = ""; + } else { + result->target_version_prefix = *kiosk_required_platform_version_p; + LOG(INFO) << "Allow kiosk app to control Chrome version policy is set, " + << "target version is " << result->target_version_prefix; } - - result->target_version_prefix = *kiosk_required_platform_version_p; - LOG(INFO) << "Allow kiosk app to control Chrome version policy is set, " - << "target version is " << result->target_version_prefix; // TODO(hunyadym): Add support for allowing rollback using the manifest // (if policy doesn't specify otherwise). } else { diff --git a/update_manager/enterprise_device_policy_impl_unittest.cc b/update_manager/enterprise_device_policy_impl_unittest.cc index 5b25602e..f27715e6 100644 --- a/update_manager/enterprise_device_policy_impl_unittest.cc +++ b/update_manager/enterprise_device_policy_impl_unittest.cc @@ -36,6 +36,102 @@ class UmEnterpriseDevicePolicyImplTest : public UmPolicyTestBase { } }; +TEST_F(UmEnterpriseDevicePolicyImplTest, KioskAppVersionSet) { + fake_state_.device_policy_provider()->var_update_disabled()->reset( + new bool(true)); + fake_state_.device_policy_provider() + ->var_allow_kiosk_app_control_chrome_version() + ->reset(new bool(true)); + + fake_state_.system_provider()->var_kiosk_required_platform_version()->reset( + new std::string("1234.5.6")); + + UpdateCheckParams result; + ExpectPolicyStatus( + EvalStatus::kContinue, &Policy::UpdateCheckAllowed, &result); + EXPECT_EQ(result.target_version_prefix, "1234.5.6"); +} + +TEST_F(UmEnterpriseDevicePolicyImplTest, KioskAppVersionUnreadableNoUpdate) { + fake_state_.device_policy_provider()->var_update_disabled()->reset( + new bool(true)); + fake_state_.device_policy_provider() + ->var_allow_kiosk_app_control_chrome_version() + ->reset(new bool(true)); + + fake_state_.system_provider()->var_kiosk_required_platform_version()->reset( + nullptr); + + UpdateCheckParams result; + ExpectPolicyStatus( + EvalStatus::kAskMeAgainLater, &Policy::UpdateCheckAllowed, &result); +} + +TEST_F(UmEnterpriseDevicePolicyImplTest, KioskAppVersionUnreadableUpdate) { + fake_state_.device_policy_provider()->var_update_disabled()->reset( + new bool(true)); + fake_state_.device_policy_provider() + ->var_allow_kiosk_app_control_chrome_version() + ->reset(new bool(true)); + + // The real variable returns an empty string after several unsuccessful + // reading attempts. Fake this by setting it directly to empty string. + fake_state_.system_provider()->var_kiosk_required_platform_version()->reset( + new std::string("")); + + UpdateCheckParams result; + ExpectPolicyStatus( + EvalStatus::kContinue, &Policy::UpdateCheckAllowed, &result); + EXPECT_EQ(result.target_version_prefix, ""); +} + +TEST_F(UmEnterpriseDevicePolicyImplTest, + KioskAppVersionUnreadableUpdateWithMinVersion) { + fake_state_.device_policy_provider()->var_update_disabled()->reset( + new bool(true)); + fake_state_.device_policy_provider() + ->var_allow_kiosk_app_control_chrome_version() + ->reset(new bool(true)); + + // The real variable returns an empty string after several unsuccessful + // reading attempts. Fake this by setting it directly to empty string. + fake_state_.system_provider()->var_kiosk_required_platform_version()->reset( + new std::string("")); + // Update if the minimum version is above the current OS version. + fake_state_.device_policy_provider()->var_device_minimum_version()->reset( + new base::Version("2.0.0")); + fake_state_.system_provider()->var_chromeos_version()->reset( + new base::Version("1.0.0")); + + UpdateCheckParams result; + ExpectPolicyStatus( + EvalStatus::kContinue, &Policy::UpdateCheckAllowed, &result); + EXPECT_EQ(result.target_version_prefix, ""); +} + +TEST_F(UmEnterpriseDevicePolicyImplTest, + KioskAppVersionUnreadableNoUpdateWithMinVersion) { + fake_state_.device_policy_provider()->var_update_disabled()->reset( + new bool(true)); + fake_state_.device_policy_provider() + ->var_allow_kiosk_app_control_chrome_version() + ->reset(new bool(true)); + + // The real variable returns an empty string after several unsuccessful + // reading attempts. Fake this by setting it directly to empty string. + fake_state_.system_provider()->var_kiosk_required_platform_version()->reset( + new std::string("")); + // Block update if the minimum version is below the current OS version. + fake_state_.device_policy_provider()->var_device_minimum_version()->reset( + new base::Version("1.0.0")); + fake_state_.system_provider()->var_chromeos_version()->reset( + new base::Version("2.0.0")); + + UpdateCheckParams result; + ExpectPolicyStatus( + EvalStatus::kAskMeAgainLater, &Policy::UpdateCheckAllowed, &result); +} + TEST_F(UmEnterpriseDevicePolicyImplTest, ChannelDowngradeBehaviorNoRollback) { fake_state_.device_policy_provider()->var_release_channel_delegated()->reset( new bool(false)); diff --git a/update_manager/fake_system_provider.h b/update_manager/fake_system_provider.h index f54951b3..b320c01a 100644 --- a/update_manager/fake_system_provider.h +++ b/update_manager/fake_system_provider.h @@ -50,6 +50,10 @@ class FakeSystemProvider : public SystemProvider { return &var_kiosk_required_platform_version_; } + FakeVariable* var_chromeos_version() override { + return &var_version_; + } + private: FakeVariable var_is_normal_boot_mode_{"is_normal_boot_mode", kVariableModeConst}; @@ -60,6 +64,8 @@ class FakeSystemProvider : public SystemProvider { FakeVariable var_num_slots_{"num_slots", kVariableModePoll}; FakeVariable var_kiosk_required_platform_version_{ "kiosk_required_platform_version", kVariableModePoll}; + FakeVariable var_version_{"chromeos_version", + kVariableModePoll}; DISALLOW_COPY_AND_ASSIGN(FakeSystemProvider); }; diff --git a/update_manager/real_system_provider.cc b/update_manager/real_system_provider.cc index 39eba221..9b5bc02e 100644 --- a/update_manager/real_system_provider.cc +++ b/update_manager/real_system_provider.cc @@ -24,7 +24,10 @@ #include #endif // USE_CHROME_KIOSK_APP +#include "update_engine/common/boot_control_interface.h" +#include "update_engine/common/hardware_interface.h" #include "update_engine/common/utils.h" +#include "update_engine/omaha_request_params.h" #include "update_engine/update_manager/generic_variables.h" #include "update_engine/update_manager/variable.h" @@ -97,19 +100,19 @@ class RetryPollVariable : public Variable { bool RealSystemProvider::Init() { var_is_normal_boot_mode_.reset(new ConstCopyVariable( - "is_normal_boot_mode", hardware_->IsNormalBootMode())); + "is_normal_boot_mode", system_state_->hardware()->IsNormalBootMode())); var_is_official_build_.reset(new ConstCopyVariable( - "is_official_build", hardware_->IsOfficialBuild())); + "is_official_build", system_state_->hardware()->IsOfficialBuild())); var_is_oobe_complete_.reset(new CallCopyVariable( "is_oobe_complete", base::Bind(&chromeos_update_engine::HardwareInterface::IsOOBEComplete, - base::Unretained(hardware_), + base::Unretained(system_state_->hardware()), nullptr))); var_num_slots_.reset(new ConstCopyVariable( - "num_slots", boot_control_->GetNumSlots())); + "num_slots", system_state_->boot_control()->GetNumSlots())); var_kiosk_required_platform_version_.reset(new RetryPollVariable( "kiosk_required_platform_version", @@ -117,6 +120,10 @@ bool RealSystemProvider::Init() { base::Bind(&RealSystemProvider::GetKioskAppRequiredPlatformVersion, base::Unretained(this)))); + var_chromeos_version_.reset(new ConstCopyVariable( + "chromeos_version", + base::Version(system_state_->request_params()->app_version()))); + return true; } diff --git a/update_manager/real_system_provider.h b/update_manager/real_system_provider.h index 114c6eab..0e689977 100644 --- a/update_manager/real_system_provider.h +++ b/update_manager/real_system_provider.h @@ -20,8 +20,9 @@ #include #include -#include "update_engine/common/boot_control_interface.h" -#include "update_engine/common/hardware_interface.h" +#include + +#include "update_engine/system_state.h" #include "update_engine/update_manager/system_provider.h" namespace org { @@ -36,16 +37,13 @@ namespace chromeos_update_manager { class RealSystemProvider : public SystemProvider { public: RealSystemProvider( - chromeos_update_engine::HardwareInterface* hardware, - chromeos_update_engine::BootControlInterface* boot_control, + chromeos_update_engine::SystemState* system_state, org::chromium::KioskAppServiceInterfaceProxyInterface* kiosk_app_proxy) - : hardware_(hardware), #if USE_CHROME_KIOSK_APP - boot_control_(boot_control), - kiosk_app_proxy_(kiosk_app_proxy) { + : system_state_(system_state), kiosk_app_proxy_(kiosk_app_proxy) { } #else - boot_control_(boot_control) { + system_state_(system_state) { } #endif // USE_CHROME_KIOSK_APP @@ -72,6 +70,10 @@ class RealSystemProvider : public SystemProvider { return var_kiosk_required_platform_version_.get(); } + Variable* var_chromeos_version() override { + return var_chromeos_version_.get(); + } + private: bool GetKioskAppRequiredPlatformVersion( std::string* required_platform_version); @@ -81,9 +83,9 @@ class RealSystemProvider : public SystemProvider { std::unique_ptr> var_is_oobe_complete_; std::unique_ptr> var_num_slots_; std::unique_ptr> var_kiosk_required_platform_version_; + std::unique_ptr> var_chromeos_version_; - chromeos_update_engine::HardwareInterface* const hardware_; - chromeos_update_engine::BootControlInterface* const boot_control_; + chromeos_update_engine::SystemState* const system_state_; #if USE_CHROME_KIOSK_APP org::chromium::KioskAppServiceInterfaceProxyInterface* const kiosk_app_proxy_; #endif // USE_CHROME_KIOSK_APP diff --git a/update_manager/real_system_provider_unittest.cc b/update_manager/real_system_provider_unittest.cc index 3996b654..97571466 100644 --- a/update_manager/real_system_provider_unittest.cc +++ b/update_manager/real_system_provider_unittest.cc @@ -24,6 +24,7 @@ #include "update_engine/common/fake_boot_control.h" #include "update_engine/common/fake_hardware.h" +#include "update_engine/fake_system_state.h" #include "update_engine/update_manager/umtest_utils.h" #if USE_CHROME_KIOSK_APP #include "kiosk-app/dbus-proxies.h" @@ -54,17 +55,15 @@ class UmRealSystemProviderTest : public ::testing::Test { .WillByDefault( DoAll(SetArgPointee<0>(kRequiredPlatformVersion), Return(true))); - provider_.reset(new RealSystemProvider( - &fake_hardware_, &fake_boot_control_, kiosk_app_proxy_mock_.get())); + provider_.reset(new RealSystemProvider(&fake_system_state_, + kiosk_app_proxy_mock_.get())); #else - provider_.reset( - new RealSystemProvider(&fake_hardware_, &fake_boot_control_, nullptr)); + provider_.reset(new RealSystemProvider(&fake_system_state, nullptr)); #endif // USE_CHROME_KIOSK_APP EXPECT_TRUE(provider_->Init()); } - chromeos_update_engine::FakeHardware fake_hardware_; - chromeos_update_engine::FakeBootControl fake_boot_control_; + chromeos_update_engine::FakeSystemState fake_system_state_; unique_ptr provider_; #if USE_CHROME_KIOSK_APP @@ -77,18 +76,29 @@ TEST_F(UmRealSystemProviderTest, InitTest) { EXPECT_NE(nullptr, provider_->var_is_official_build()); EXPECT_NE(nullptr, provider_->var_is_oobe_complete()); EXPECT_NE(nullptr, provider_->var_kiosk_required_platform_version()); + EXPECT_NE(nullptr, provider_->var_chromeos_version()); } TEST_F(UmRealSystemProviderTest, IsOOBECompleteTrue) { - fake_hardware_.SetIsOOBEComplete(base::Time()); + fake_system_state_.fake_hardware()->SetIsOOBEComplete(base::Time()); UmTestUtils::ExpectVariableHasValue(true, provider_->var_is_oobe_complete()); } TEST_F(UmRealSystemProviderTest, IsOOBECompleteFalse) { - fake_hardware_.UnsetIsOOBEComplete(); + fake_system_state_.fake_hardware()->UnsetIsOOBEComplete(); UmTestUtils::ExpectVariableHasValue(false, provider_->var_is_oobe_complete()); } +TEST_F(UmRealSystemProviderTest, VersionFromRequestParams) { + fake_system_state_.request_params()->set_app_version("1.2.3"); + // Call |Init| again to pick up the version. + EXPECT_TRUE(provider_->Init()); + + base::Version version("1.2.3"); + UmTestUtils::ExpectVariableHasValue(version, + provider_->var_chromeos_version()); +} + #if USE_CHROME_KIOSK_APP TEST_F(UmRealSystemProviderTest, KioskRequiredPlatformVersion) { UmTestUtils::ExpectVariableHasValue( diff --git a/update_manager/state_factory.cc b/update_manager/state_factory.cc index 78cec6a0..a0d8a63b 100644 --- a/update_manager/state_factory.cc +++ b/update_manager/state_factory.cc @@ -69,8 +69,8 @@ State* DefaultStateFactory( unique_ptr shill_provider(new FakeShillProvider()); #endif // USE_SHILL unique_ptr random_provider(new RealRandomProvider()); - unique_ptr system_provider(new RealSystemProvider( - system_state->hardware(), system_state->boot_control(), kiosk_app_proxy)); + unique_ptr system_provider( + new RealSystemProvider(system_state, kiosk_app_proxy)); unique_ptr time_provider(new RealTimeProvider(clock)); unique_ptr updater_provider( diff --git a/update_manager/system_provider.h b/update_manager/system_provider.h index 13e188bb..8eb14e3b 100644 --- a/update_manager/system_provider.h +++ b/update_manager/system_provider.h @@ -17,6 +17,10 @@ #ifndef UPDATE_ENGINE_UPDATE_MANAGER_SYSTEM_PROVIDER_H_ #define UPDATE_ENGINE_UPDATE_MANAGER_SYSTEM_PROVIDER_H_ +#include + +#include + #include "update_engine/update_manager/provider.h" #include "update_engine/update_manager/variable.h" @@ -46,6 +50,9 @@ class SystemProvider : public Provider { // with zero delay kiosk app if any. virtual Variable* var_kiosk_required_platform_version() = 0; + // Chrome OS version number as provided by |ImagePropeties|. + virtual Variable* var_chromeos_version() = 0; + protected: SystemProvider() {} From 420bed506f4baa36eb9b3953df068460e98bc31f Mon Sep 17 00:00:00 2001 From: David Anderson Date: Mon, 21 Sep 2020 17:09:53 -0700 Subject: [PATCH 404/624] Link to libsnapshot_cow where libsnapshot is linked. Bug: 168554689 Test: builds Change-Id: Ie75bba98c8a346fcad1588b678648526a4c51a5c --- Android.bp | 1 + 1 file changed, 1 insertion(+) diff --git a/Android.bp b/Android.bp index 28e1bab5..c5e66e39 100644 --- a/Android.bp +++ b/Android.bp @@ -199,6 +199,7 @@ cc_defaults { "libgsi", "libpayload_consumer", "libsnapshot", + "libsnapshot_cow", "update_metadata-protos", ], shared_libs: [ From d506deef54c216feab9c6889ae21b57181128661 Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Fri, 25 Sep 2020 15:08:19 -0700 Subject: [PATCH 405/624] CleanupPreviousUpdateAction: refactor and add StopActionInternal Add a new function, StopActionInternal, that stops the action. Also refactor and let all public *Action APIs call one of Start or Stop. Test: pass Bug: 169436297 Change-Id: I04877426f87f12755f9e3ba17434a4514941df99 --- cleanup_previous_update_action.cc | 24 ++++++++++++++---------- cleanup_previous_update_action.h | 1 + 2 files changed, 15 insertions(+), 10 deletions(-) diff --git a/cleanup_previous_update_action.cc b/cleanup_previous_update_action.cc index 1a2476f0..3a7a7502 100644 --- a/cleanup_previous_update_action.cc +++ b/cleanup_previous_update_action.cc @@ -68,29 +68,23 @@ CleanupPreviousUpdateAction::CleanupPreviousUpdateAction( merge_stats_(nullptr) {} void CleanupPreviousUpdateAction::PerformAction() { - ResumeAction(); + StartActionInternal(); } void CleanupPreviousUpdateAction::TerminateProcessing() { - SuspendAction(); + StopActionInternal(); } void CleanupPreviousUpdateAction::ResumeAction() { - CHECK(prefs_); - CHECK(boot_control_); - - LOG(INFO) << "Starting/resuming CleanupPreviousUpdateAction"; - running_ = true; StartActionInternal(); } void CleanupPreviousUpdateAction::SuspendAction() { - LOG(INFO) << "Stopping/suspending CleanupPreviousUpdateAction"; - running_ = false; + StopActionInternal(); } void CleanupPreviousUpdateAction::ActionCompleted(ErrorCode error_code) { - running_ = false; + StopActionInternal(); ReportMergeStats(); metadata_device_ = nullptr; } @@ -103,7 +97,17 @@ std::string CleanupPreviousUpdateAction::StaticType() { return "CleanupPreviousUpdateAction"; } +void CleanupPreviousUpdateAction::StopActionInternal() { + LOG(INFO) << "Stopping/suspending/completing CleanupPreviousUpdateAction"; + running_ = false; +} + void CleanupPreviousUpdateAction::StartActionInternal() { + CHECK(prefs_); + CHECK(boot_control_); + + LOG(INFO) << "Starting/resuming CleanupPreviousUpdateAction"; + running_ = true; // Do nothing on non-VAB device. if (!boot_control_->GetDynamicPartitionControl() ->GetVirtualAbFeatureFlag() diff --git a/cleanup_previous_update_action.h b/cleanup_previous_update_action.h index 6f6ce078..1d8d3d75 100644 --- a/cleanup_previous_update_action.h +++ b/cleanup_previous_update_action.h @@ -75,6 +75,7 @@ class CleanupPreviousUpdateAction : public Action { unsigned int last_percentage_{0}; android::snapshot::ISnapshotMergeStats* merge_stats_; + void StopActionInternal(); void StartActionInternal(); void ScheduleWaitBootCompleted(); void WaitBootCompletedOrSchedule(); From d1d52a0b4b59be633e674dbc012baff97f8d0c80 Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Fri, 25 Sep 2020 15:09:07 -0700 Subject: [PATCH 406/624] CleanupPreviousUpdateAction: cancel pending tasks on destroy Maintain a variable that tracks the task ID of the pending task in the message loop. On destroy, cancel the pending task to avoid segfault. Test: during merge, call update_engine_client --cancel and it does not crash Fixes: 169436297 Change-Id: I9f3bccc5d4d5e2aaad1d08ef815c50100220c870 --- cleanup_previous_update_action.cc | 51 +++++++++++++++++++++++++++++-- cleanup_previous_update_action.h | 7 +++++ 2 files changed, 55 insertions(+), 3 deletions(-) diff --git a/cleanup_previous_update_action.cc b/cleanup_previous_update_action.cc index 3a7a7502..89ed6f82 100644 --- a/cleanup_previous_update_action.cc +++ b/cleanup_previous_update_action.cc @@ -67,6 +67,10 @@ CleanupPreviousUpdateAction::CleanupPreviousUpdateAction( last_percentage_(0), merge_stats_(nullptr) {} +CleanupPreviousUpdateAction::~CleanupPreviousUpdateAction() { + StopActionInternal(); +} + void CleanupPreviousUpdateAction::PerformAction() { StartActionInternal(); } @@ -97,9 +101,44 @@ std::string CleanupPreviousUpdateAction::StaticType() { return "CleanupPreviousUpdateAction"; } +// This function is called at the beginning of all delayed functions. By +// resetting |scheduled_task_|, the delayed function acknowledges that the task +// has already been executed, therefore there's no need to cancel it in the +// future. This avoids StopActionInternal() from resetting task IDs in an +// unexpected way because task IDs could be reused. +void CleanupPreviousUpdateAction::AcknowledgeTaskExecuted() { + if (scheduled_task_ != MessageLoop::kTaskIdNull) { + LOG(INFO) << "Executing task " << scheduled_task_; + } + scheduled_task_ = MessageLoop::kTaskIdNull; +} + +// Check that scheduled_task_ is a valid task ID. Otherwise, terminate the +// action. +void CleanupPreviousUpdateAction::CheckTaskScheduled(std::string_view name) { + if (scheduled_task_ == MessageLoop::kTaskIdNull) { + LOG(ERROR) << "Unable to schedule " << name; + processor_->ActionComplete(this, ErrorCode::kError); + } else { + LOG(INFO) << "CleanupPreviousUpdateAction scheduled task ID " + << scheduled_task_ << " for " << name; + } +} + void CleanupPreviousUpdateAction::StopActionInternal() { LOG(INFO) << "Stopping/suspending/completing CleanupPreviousUpdateAction"; running_ = false; + + if (scheduled_task_ != MessageLoop::kTaskIdNull) { + if (MessageLoop::current()->CancelTask(scheduled_task_)) { + LOG(INFO) << "CleanupPreviousUpdateAction cancelled pending task ID " + << scheduled_task_; + } else { + LOG(ERROR) << "CleanupPreviousUpdateAction unable to cancel task ID " + << scheduled_task_; + } + } + scheduled_task_ = MessageLoop::kTaskIdNull; } void CleanupPreviousUpdateAction::StartActionInternal() { @@ -124,14 +163,16 @@ void CleanupPreviousUpdateAction::StartActionInternal() { void CleanupPreviousUpdateAction::ScheduleWaitBootCompleted() { TEST_AND_RETURN(running_); - MessageLoop::current()->PostDelayedTask( + scheduled_task_ = MessageLoop::current()->PostDelayedTask( FROM_HERE, base::Bind(&CleanupPreviousUpdateAction::WaitBootCompletedOrSchedule, base::Unretained(this)), kCheckBootCompletedInterval); + CheckTaskScheduled("WaitBootCompleted"); } void CleanupPreviousUpdateAction::WaitBootCompletedOrSchedule() { + AcknowledgeTaskExecuted(); TEST_AND_RETURN(running_); if (!kIsRecovery && !android::base::GetBoolProperty(kBootCompletedProp, false)) { @@ -146,15 +187,17 @@ void CleanupPreviousUpdateAction::WaitBootCompletedOrSchedule() { void CleanupPreviousUpdateAction::ScheduleWaitMarkBootSuccessful() { TEST_AND_RETURN(running_); - MessageLoop::current()->PostDelayedTask( + scheduled_task_ = MessageLoop::current()->PostDelayedTask( FROM_HERE, base::Bind( &CleanupPreviousUpdateAction::CheckSlotMarkedSuccessfulOrSchedule, base::Unretained(this)), kCheckSlotMarkedSuccessfulInterval); + CheckTaskScheduled("WaitMarkBootSuccessful"); } void CleanupPreviousUpdateAction::CheckSlotMarkedSuccessfulOrSchedule() { + AcknowledgeTaskExecuted(); TEST_AND_RETURN(running_); if (!kIsRecovery && !boot_control_->IsSlotMarkedSuccessful(boot_control_->GetCurrentSlot())) { @@ -216,14 +259,16 @@ void CleanupPreviousUpdateAction::CheckSlotMarkedSuccessfulOrSchedule() { void CleanupPreviousUpdateAction::ScheduleWaitForMerge() { TEST_AND_RETURN(running_); - MessageLoop::current()->PostDelayedTask( + scheduled_task_ = MessageLoop::current()->PostDelayedTask( FROM_HERE, base::Bind(&CleanupPreviousUpdateAction::WaitForMergeOrSchedule, base::Unretained(this)), kWaitForMergeInterval); + CheckTaskScheduled("WaitForMerge"); } void CleanupPreviousUpdateAction::WaitForMergeOrSchedule() { + AcknowledgeTaskExecuted(); TEST_AND_RETURN(running_); auto state = snapshot_->ProcessUpdateState( std::bind(&CleanupPreviousUpdateAction::OnMergePercentageUpdate, this), diff --git a/cleanup_previous_update_action.h b/cleanup_previous_update_action.h index 1d8d3d75..fe65e60d 100644 --- a/cleanup_previous_update_action.h +++ b/cleanup_previous_update_action.h @@ -20,6 +20,7 @@ #include // NOLINT(build/c++11) -- for merge times #include #include +#include #include #include @@ -51,6 +52,7 @@ class CleanupPreviousUpdateAction : public Action { BootControlInterface* boot_control, android::snapshot::ISnapshotManager* snapshot, CleanupPreviousUpdateActionDelegateInterface* delegate); + ~CleanupPreviousUpdateAction(); void PerformAction() override; void SuspendAction() override; @@ -74,6 +76,11 @@ class CleanupPreviousUpdateAction : public Action { bool cancel_failed_{false}; unsigned int last_percentage_{0}; android::snapshot::ISnapshotMergeStats* merge_stats_; + brillo::MessageLoop::TaskId scheduled_task_{brillo::MessageLoop::kTaskIdNull}; + + // Helpers for task management. + void AcknowledgeTaskExecuted(); + void CheckTaskScheduled(std::string_view name); void StopActionInternal(); void StartActionInternal(); From fbe526cb9cbcfab38cbea286eca6b8c2f19a75af Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Tue, 29 Sep 2020 10:45:19 -0700 Subject: [PATCH 407/624] update_engine: Remove unused function BUG=None TEST=FEATURES=test emerge update_engine Change-Id: Ie1f10afafa5dfa2aca53f6f4b155f0f808f18583 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2438674 Tested-by: Amin Hassani Auto-Submit: Amin Hassani Reviewed-by: Jae Hoon Kim Commit-Queue: Amin Hassani --- payload_consumer/delta_performer.cc | 9 --------- payload_consumer/delta_performer.h | 4 ---- 2 files changed, 13 deletions(-) diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc index 7375d37f..08eba028 100644 --- a/payload_consumer/delta_performer.cc +++ b/payload_consumer/delta_performer.cc @@ -429,15 +429,6 @@ void LogPartitionInfo(const vector& partitions) { } // namespace -uint32_t DeltaPerformer::GetMinorVersion() const { - if (manifest_.has_minor_version()) { - return manifest_.minor_version(); - } - return payload_->type == InstallPayloadType::kDelta - ? kMaxSupportedMinorPayloadVersion - : kFullPayloadMinorVersion; -} - bool DeltaPerformer::IsHeaderParsed() const { return metadata_size_ != 0; } diff --git a/payload_consumer/delta_performer.h b/payload_consumer/delta_performer.h index 88076af3..e4b56c12 100644 --- a/payload_consumer/delta_performer.h +++ b/payload_consumer/delta_performer.h @@ -173,10 +173,6 @@ class DeltaPerformer : public FileWriter { // Return true if header parsing is finished and no errors occurred. bool IsHeaderParsed() const; - // Returns the delta minor version. If this value is defined in the manifest, - // it returns that value, otherwise it returns the default value. - uint32_t GetMinorVersion() const; - // Compare |calculated_hash| with source hash in |operation|, return false and // dump hash and set |error| if don't match. // |source_fd| is the file descriptor of the source partition. From 7d192200b897a9d65912e270691e5a3c8a8ff1a7 Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Tue, 29 Sep 2020 15:58:37 -0700 Subject: [PATCH 408/624] update_engine: Switch back crypto function calls to get0 version Because of b/158580694 we had to switch the crypto calls to get1 version and manually release them. Since that bug has been marked as fixed, we can now switch it back to its original form. BUG=b:163153182 TEST=FEATURES=test emerge update_engine Change-Id: I8c2ff6619f592fc5e78a45efce14d42626d66034 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2438992 Tested-by: Amin Hassani Auto-Submit: Amin Hassani Reviewed-by: Jae Hoon Kim Commit-Queue: Amin Hassani --- payload_consumer/payload_verifier.cc | 15 ++------------- payload_generator/payload_signer.cc | 14 ++------------ 2 files changed, 4 insertions(+), 25 deletions(-) diff --git a/payload_consumer/payload_verifier.cc b/payload_consumer/payload_verifier.cc index 7fd2b8e6..85902c80 100644 --- a/payload_consumer/payload_verifier.cc +++ b/payload_consumer/payload_verifier.cc @@ -175,10 +175,7 @@ bool PayloadVerifier::VerifyRawSignature( } if (key_type == EVP_PKEY_EC) { - // TODO(b/158580694): Switch back to get0 version and remove manual - // freeing of the object once the bug is resolved or gale has been moved - // to informational. - EC_KEY* ec_key = EVP_PKEY_get1_EC_KEY(public_key.get()); + EC_KEY* ec_key = EVP_PKEY_get0_EC_KEY(public_key.get()); TEST_AND_RETURN_FALSE(ec_key != nullptr); if (ECDSA_verify(0, sha256_hash_data.data(), @@ -186,10 +183,8 @@ bool PayloadVerifier::VerifyRawSignature( sig_data.data(), sig_data.size(), ec_key) == 1) { - EC_KEY_free(ec_key); return true; } - EC_KEY_free(ec_key); } LOG(ERROR) << "Unsupported key type " << key_type; @@ -204,21 +199,16 @@ bool PayloadVerifier::GetRawHashFromSignature( const brillo::Blob& sig_data, const EVP_PKEY* public_key, brillo::Blob* out_hash_data) const { - // TODO(b/158580694): Switch back to get0 version and remove manual freeing of - // the object once the bug is resolved or gale has been moved to - // informational. - // // The code below executes the equivalent of: // // openssl rsautl -verify -pubin -inkey <(echo pem_public_key) // -in |sig_data| -out |out_hash_data| - RSA* rsa = EVP_PKEY_get1_RSA(const_cast(public_key)); + RSA* rsa = EVP_PKEY_get0_RSA(const_cast(public_key)); TEST_AND_RETURN_FALSE(rsa != nullptr); unsigned int keysize = RSA_size(rsa); if (sig_data.size() > 2 * keysize) { LOG(ERROR) << "Signature size is too big for public key size."; - RSA_free(rsa); return false; } @@ -226,7 +216,6 @@ bool PayloadVerifier::GetRawHashFromSignature( brillo::Blob hash_data(keysize); int decrypt_size = RSA_public_decrypt( sig_data.size(), sig_data.data(), hash_data.data(), rsa, RSA_NO_PADDING); - RSA_free(rsa); TEST_AND_RETURN_FALSE(decrypt_size > 0 && decrypt_size <= static_cast(hash_data.size())); hash_data.resize(decrypt_size); diff --git a/payload_generator/payload_signer.cc b/payload_generator/payload_signer.cc index 9a44f947..dd87ab7a 100644 --- a/payload_generator/payload_signer.cc +++ b/payload_generator/payload_signer.cc @@ -309,10 +309,7 @@ bool PayloadSigner::SignHash(const brillo::Blob& hash, int key_type = EVP_PKEY_id(private_key.get()); brillo::Blob signature; if (key_type == EVP_PKEY_RSA) { - // TODO(b/158580694): Switch back to get0 version and remove manual freeing - // of the object once the bug is resolved or gale has been moved to - // informational. - RSA* rsa = EVP_PKEY_get1_RSA(private_key.get()); + RSA* rsa = EVP_PKEY_get0_RSA(private_key.get()); TEST_AND_RETURN_FALSE(rsa != nullptr); brillo::Blob padded_hash = hash; @@ -327,17 +324,12 @@ bool PayloadSigner::SignHash(const brillo::Blob& hash, if (signature_size < 0) { LOG(ERROR) << "Signing hash failed: " << ERR_error_string(ERR_get_error(), nullptr); - RSA_free(rsa); return false; } - RSA_free(rsa); TEST_AND_RETURN_FALSE(static_cast(signature_size) == signature.size()); } else if (key_type == EVP_PKEY_EC) { - // TODO(b/158580694): Switch back to get0 version and remove manual freeing - // of the object once the bug is resolved or gale has been moved to - // informational. - EC_KEY* ec_key = EVP_PKEY_get1_EC_KEY(private_key.get()); + EC_KEY* ec_key = EVP_PKEY_get0_EC_KEY(private_key.get()); TEST_AND_RETURN_FALSE(ec_key != nullptr); signature.resize(ECDSA_size(ec_key)); @@ -350,10 +342,8 @@ bool PayloadSigner::SignHash(const brillo::Blob& hash, ec_key) != 1) { LOG(ERROR) << "Signing hash failed: " << ERR_error_string(ERR_get_error(), nullptr); - EC_KEY_free(ec_key); return false; } - EC_KEY_free(ec_key); // NIST P-256 LOG(ERROR) << "signature max size " << signature.size() << " size " From 9bd519d4f5b5cf537038a3beb27efe0e196523cd Mon Sep 17 00:00:00 2001 From: Kelvin Zhang Date: Wed, 23 Sep 2020 12:55:19 -0400 Subject: [PATCH 409/624] Add partition writer class. Previously, delta_performer assumes that each InstallOp can be processed independently, and therefore it creates an ExntentWriter instance for every operation. It also assumes that source/target partition can be read/written using raw system file descriptors. With the introduction of Virtual Ab Compression, both assumptions fall apart. We need to process all SOURCE_COPY operations and reorder them according to merge sequence, which means InstallOperations are no longer independent of each other. Also, VABC requires us to perform writes using their ICowWriter interface, as opposed to read/write syscall. We can add extra logic to handle these cases, but that will make the already huge delta_performer.cc even bigger. It's 2000 lines right now. So instead, we plan to add an additional class called PartitionWriter. Which is supposed to perform partition level initialization, such as performing SOURCE_COPY ahead of time according to merge sequence, setting up snapshot devices, etc. This will make our code more maintainable. The purpose of this CL is to refactor DeltaPerformer, and move some of the logic into PartitionWriter. Future CLs will add a PartitionWriter for VABC. Test: treehugger, generate && serve an OTA Bug: 168554689 Change-Id: I305fe479b22d829dde527ee01df0e48e4dcb7b46 --- Android.bp | 1 + payload_consumer/delta_performer.cc | 184 ++++++++++------ payload_consumer/delta_performer.h | 135 +++++++----- payload_consumer/delta_performer_unittest.cc | 101 --------- payload_consumer/partition_writer_unittest.cc | 203 ++++++++++++++++++ update_attempter_android.cc | 2 +- 6 files changed, 415 insertions(+), 211 deletions(-) create mode 100644 payload_consumer/partition_writer_unittest.cc diff --git a/Android.bp b/Android.bp index c5e66e39..acd3633d 100644 --- a/Android.bp +++ b/Android.bp @@ -690,6 +690,7 @@ cc_test { "payload_consumer/certificate_parser_android_unittest.cc", "payload_consumer/delta_performer_integration_test.cc", "payload_consumer/delta_performer_unittest.cc", + "payload_consumer/partition_writer_unittest.cc", "payload_consumer/download_action_android_unittest.cc", "payload_consumer/extent_reader_unittest.cc", "payload_consumer/extent_writer_unittest.cc", diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc index d9efc30e..b49139e3 100644 --- a/payload_consumer/delta_performer.cc +++ b/payload_consumer/delta_performer.cc @@ -282,6 +282,15 @@ int DeltaPerformer::Close() { } int DeltaPerformer::CloseCurrentPartition() { + if (!partition_writer_) { + return 0; + } + int err = partition_writer_->Close(); + partition_writer_ = nullptr; + return err; +} + +int PartitionWriter::Close() { int err = 0; if (source_fd_ && !source_fd_->Close()) { err = errno; @@ -290,14 +299,6 @@ int DeltaPerformer::CloseCurrentPartition() { err = 1; } source_fd_.reset(); - if (source_ecc_fd_ && !source_ecc_fd_->Close()) { - err = errno; - PLOG(ERROR) << "Error closing ECC source partition"; - if (!err) - err = 1; - } - source_ecc_fd_.reset(); - source_ecc_open_failure_ = false; source_path_.clear(); if (target_fd_ && !target_fd_->Close()) { @@ -308,6 +309,15 @@ int DeltaPerformer::CloseCurrentPartition() { } target_fd_.reset(); target_path_.clear(); + + if (source_ecc_fd_ && !source_ecc_fd_->Close()) { + err = errno; + PLOG(ERROR) << "Error closing ECC source partition"; + if (!err) + err = 1; + } + source_ecc_fd_.reset(); + source_ecc_open_failure_ = false; return -err; } @@ -320,27 +330,43 @@ bool DeltaPerformer::OpenCurrentPartition() { install_plan_->partitions.size() - partitions_.size(); const InstallPlan::Partition& install_part = install_plan_->partitions[num_previous_partitions + current_partition_]; + partition_writer_ = std::make_unique( + partition, + install_part, + boot_control_->GetDynamicPartitionControl(), + block_size_, + interactive_); + // Open source fds if we have a delta payload, or for partitions in the // partial update. bool source_may_exist = manifest_.partial_update() || payload_->type == InstallPayloadType::kDelta; + return partition_writer_->Init(install_plan_, source_may_exist); +} + +bool PartitionWriter::Init(const InstallPlan* install_plan, + bool source_may_exist) { + const PartitionUpdate& partition = partition_update_; + uint32_t source_slot = install_plan->source_slot; + uint32_t target_slot = install_plan->target_slot; + // We shouldn't open the source partition in certain cases, e.g. some dynamic // partitions in delta payload, partitions included in the full payload for // partial updates. Use the source size as the indicator. - if (source_may_exist && install_part.source_size > 0) { - source_path_ = install_part.source_path; + if (source_may_exist && install_part_.source_size > 0) { + source_path_ = install_part_.source_path; int err; source_fd_ = OpenFile(source_path_.c_str(), O_RDONLY, false, &err); if (!source_fd_) { LOG(ERROR) << "Unable to open source partition " << partition.partition_name() << " on slot " - << BootControlInterface::SlotName(install_plan_->source_slot) - << ", file " << source_path_; + << BootControlInterface::SlotName(source_slot) << ", file " + << source_path_; return false; } } - target_path_ = install_part.target_path; + target_path_ = install_part_.target_path; int err; int flags = O_RDWR; @@ -354,8 +380,8 @@ bool DeltaPerformer::OpenCurrentPartition() { if (!target_fd_) { LOG(ERROR) << "Unable to open target partition " << partition.partition_name() << " on slot " - << BootControlInterface::SlotName(install_plan_->target_slot) - << ", file " << target_path_; + << BootControlInterface::SlotName(target_slot) << ", file " + << target_path_; return false; } @@ -364,38 +390,28 @@ bool DeltaPerformer::OpenCurrentPartition() { << "\""; // Discard the end of the partition, but ignore failures. - DiscardPartitionTail(target_fd_, install_part.target_size); + DiscardPartitionTail(target_fd_, install_part_.target_size); return true; } -bool DeltaPerformer::OpenCurrentECCPartition() { +bool PartitionWriter::OpenCurrentECCPartition() { + // No support for ECC for full payloads. + // Full Paylods should not have any operation that requires ECCPartition. if (source_ecc_fd_) return true; if (source_ecc_open_failure_) return false; - if (current_partition_ >= partitions_.size()) - return false; - - // No support for ECC for full payloads. - if (payload_->type == InstallPayloadType::kFull) - return false; - #if USE_FEC - const PartitionUpdate& partition = partitions_[current_partition_]; - size_t num_previous_partitions = - install_plan_->partitions.size() - partitions_.size(); - const InstallPlan::Partition& install_part = - install_plan_->partitions[num_previous_partitions + current_partition_]; - string path = install_part.source_path; + const PartitionUpdate& partition = partition_update_; + const InstallPlan::Partition& install_part = install_part_; + std::string path = install_part.source_path; FileDescriptorPtr fd(new FecFileDescriptor()); if (!fd->Open(path.c_str(), O_RDONLY, 0)) { PLOG(ERROR) << "Unable to open ECC source partition " - << partition.partition_name() << " on slot " - << BootControlInterface::SlotName(install_plan_->source_slot) - << ", file " << path; + << partition.partition_name() << ", file " << path; source_ecc_open_failure_ = true; return false; } @@ -733,10 +749,6 @@ bool DeltaPerformer::Write(const void* bytes, size_t count, ErrorCode* error) { if (!HandleOpResult(op_result, InstallOperationTypeName(op.type()), error)) return false; - if (!target_fd_->Flush()) { - return false; - } - next_operation_num_++; UpdateOverallProgress(false, "Completed "); CheckpointUpdateProgress(false); @@ -1003,9 +1015,18 @@ bool DeltaPerformer::PerformReplaceOperation( // Since we delete data off the beginning of the buffer as we use it, // the data we need should be exactly at the beginning of the buffer. - TEST_AND_RETURN_FALSE(buffer_offset_ == operation.data_offset()); TEST_AND_RETURN_FALSE(buffer_.size() >= operation.data_length()); + TEST_AND_RETURN_FALSE(partition_writer_->PerformReplaceOperation( + operation, buffer_.data(), buffer_.size())); + // Update buffer + DiscardBuffer(true, buffer_.size()); + return true; +} + +bool PartitionWriter::PerformReplaceOperation(const InstallOperation& operation, + const void* data, + size_t count) { // Setup the ExtentWriter stack based on the operation type. std::unique_ptr writer = std::make_unique(); @@ -1017,11 +1038,9 @@ bool DeltaPerformer::PerformReplaceOperation( TEST_AND_RETURN_FALSE( writer->Init(target_fd_, operation.dst_extents(), block_size_)); - TEST_AND_RETURN_FALSE(writer->Write(buffer_.data(), operation.data_length())); + TEST_AND_RETURN_FALSE(writer->Write(data, operation.data_length())); - // Update buffer - DiscardBuffer(true, buffer_.size()); - return true; + return target_fd_->Flush(); } bool DeltaPerformer::PerformZeroOrDiscardOperation( @@ -1032,7 +1051,11 @@ bool DeltaPerformer::PerformZeroOrDiscardOperation( // These operations have no blob. TEST_AND_RETURN_FALSE(!operation.has_data_offset()); TEST_AND_RETURN_FALSE(!operation.has_data_length()); + return partition_writer_->PerformZeroOrDiscardOperation(operation); +} +bool PartitionWriter::PerformZeroOrDiscardOperation( + const InstallOperation& operation) { #ifdef BLKZEROOUT bool attempt_ioctl = true; int request = @@ -1061,13 +1084,13 @@ bool DeltaPerformer::PerformZeroOrDiscardOperation( target_fd_, zeros.data(), chunk_length, start + offset)); } } - return true; + return target_fd_->Flush(); } -bool DeltaPerformer::ValidateSourceHash(const brillo::Blob& calculated_hash, - const InstallOperation& operation, - const FileDescriptorPtr source_fd, - ErrorCode* error) { +bool PartitionWriter::ValidateSourceHash(const brillo::Blob& calculated_hash, + const InstallOperation& operation, + const FileDescriptorPtr source_fd, + ErrorCode* error) { brillo::Blob expected_source_hash(operation.src_sha256_hash().begin(), operation.src_sha256_hash().end()); if (calculated_hash != expected_source_hash) { @@ -1108,14 +1131,18 @@ bool DeltaPerformer::PerformSourceCopyOperation( TEST_AND_RETURN_FALSE(operation.src_length() % block_size_ == 0); if (operation.has_dst_length()) TEST_AND_RETURN_FALSE(operation.dst_length() % block_size_ == 0); + return partition_writer_->PerformSourceCopyOperation(operation, error); +} +bool PartitionWriter::PerformSourceCopyOperation( + const InstallOperation& operation, ErrorCode* error) { TEST_AND_RETURN_FALSE(source_fd_ != nullptr); // The device may optimize the SOURCE_COPY operation. // Being this a device-specific optimization let DynamicPartitionController // decide it the operation should be skipped. - const PartitionUpdate& partition = partitions_[current_partition_]; - const auto& partition_control = boot_control_->GetDynamicPartitionControl(); + const PartitionUpdate& partition = partition_update_; + const auto& partition_control = dynamic_control_; InstallOperation buf; bool should_optimize = partition_control->OptimizeOperation( @@ -1189,7 +1216,7 @@ bool DeltaPerformer::PerformSourceCopyOperation( } TEST_AND_RETURN_FALSE( ValidateSourceHash(source_hash, operation, source_ecc_fd_, error)); - // At this point reading from the the error corrected device worked, but + // At this point reading from the error corrected device worked, but // reading from the raw device failed, so this is considered a recovered // failure. source_ecc_recovered_failures_++; @@ -1215,10 +1242,10 @@ bool DeltaPerformer::PerformSourceCopyOperation( block_size_, nullptr)); } - return true; + return target_fd_->Flush(); } -FileDescriptorPtr DeltaPerformer::ChooseSourceFD( +FileDescriptorPtr PartitionWriter::ChooseSourceFD( const InstallOperation& operation, ErrorCode* error) { if (source_fd_ == nullptr) { LOG(ERROR) << "ChooseSourceFD fail: source_fd_ == nullptr"; @@ -1264,7 +1291,7 @@ FileDescriptorPtr DeltaPerformer::ChooseSourceFD( if (fd_utils::ReadAndHashExtents( source_ecc_fd_, operation.src_extents(), block_size_, &source_hash) && ValidateSourceHash(source_hash, operation, source_ecc_fd_, error)) { - // At this point reading from the the error corrected device worked, but + // At this point reading from the error corrected device worked, but // reading from the raw device failed, so this is considered a recovered // failure. source_ecc_recovered_failures_++; @@ -1369,6 +1396,17 @@ bool DeltaPerformer::PerformSourceBsdiffOperation( if (operation.has_dst_length()) TEST_AND_RETURN_FALSE(operation.dst_length() % block_size_ == 0); + TEST_AND_RETURN_FALSE(partition_writer_->PerformSourceBsdiffOperation( + operation, error, buffer_.data(), buffer_.size())); + DiscardBuffer(true, buffer_.size()); + return true; +} + +bool PartitionWriter::PerformSourceBsdiffOperation( + const InstallOperation& operation, + ErrorCode* error, + const void* data, + size_t count) { FileDescriptorPtr source_fd = ChooseSourceFD(operation, error); TEST_AND_RETURN_FALSE(source_fd != nullptr); @@ -1388,10 +1426,9 @@ bool DeltaPerformer::PerformSourceBsdiffOperation( TEST_AND_RETURN_FALSE(bsdiff::bspatch(std::move(src_file), std::move(dst_file), - buffer_.data(), - buffer_.size()) == 0); - DiscardBuffer(true, buffer_.size()); - return true; + static_cast(data), + count) == 0); + return target_fd_->Flush(); } namespace { @@ -1475,7 +1512,17 @@ bool DeltaPerformer::PerformPuffDiffOperation(const InstallOperation& operation, // the data we need should be exactly at the beginning of the buffer. TEST_AND_RETURN_FALSE(buffer_offset_ == operation.data_offset()); TEST_AND_RETURN_FALSE(buffer_.size() >= operation.data_length()); + TEST_AND_RETURN_FALSE(partition_writer_->PerformPuffDiffOperation( + operation, error, buffer_.data(), buffer_.size())); + DiscardBuffer(true, buffer_.size()); + return true; +} +bool PartitionWriter::PerformPuffDiffOperation( + const InstallOperation& operation, + ErrorCode* error, + const void* data, + size_t count) { FileDescriptorPtr source_fd = ChooseSourceFD(operation, error); TEST_AND_RETURN_FALSE(source_fd != nullptr); @@ -1496,11 +1543,10 @@ bool DeltaPerformer::PerformPuffDiffOperation(const InstallOperation& operation, const size_t kMaxCacheSize = 5 * 1024 * 1024; // Total 5MB cache. TEST_AND_RETURN_FALSE(puffin::PuffPatch(std::move(src_stream), std::move(dst_stream), - buffer_.data(), - buffer_.size(), + static_cast(data), + count, kMaxCacheSize)); - DiscardBuffer(true, buffer_.size()); - return true; + return target_fd_->Flush(); } bool DeltaPerformer::ExtractSignatureMessage() { @@ -2040,4 +2086,20 @@ bool DeltaPerformer::PrimeUpdateState() { return true; } +PartitionWriter::PartitionWriter( + const PartitionUpdate& partition_update, + const InstallPlan::Partition& install_part, + DynamicPartitionControlInterface* dynamic_control, + size_t block_size, + bool is_interactive) + : partition_update_(partition_update), + install_part_(install_part), + dynamic_control_(dynamic_control), + interactive_(is_interactive), + block_size_(block_size) {} + +PartitionWriter::~PartitionWriter() { + Close(); +} + } // namespace chromeos_update_engine diff --git a/payload_consumer/delta_performer.h b/payload_consumer/delta_performer.h index 88076af3..bee7fdea 100644 --- a/payload_consumer/delta_performer.h +++ b/payload_consumer/delta_performer.h @@ -46,6 +46,9 @@ class BootControlInterface; class HardwareInterface; class PrefsInterface; +// At the bottom of this file. +class PartitionWriter; + // This class performs the actions in a delta update synchronously. The delta // update itself should be passed in in chunks as it is received. class DeltaPerformer : public FileWriter { @@ -101,10 +104,6 @@ class DeltaPerformer : public FileWriter { // work. Returns whether the required file descriptors were successfully open. bool OpenCurrentPartition(); - // Attempt to open the error-corrected device for the current partition. - // Returns whether the operation succeeded. - bool OpenCurrentECCPartition(); - // Closes the current partition file descriptors if open. Returns 0 on success // or -errno on error. int CloseCurrentPartition(); @@ -177,14 +176,6 @@ class DeltaPerformer : public FileWriter { // it returns that value, otherwise it returns the default value. uint32_t GetMinorVersion() const; - // Compare |calculated_hash| with source hash in |operation|, return false and - // dump hash and set |error| if don't match. - // |source_fd| is the file descriptor of the source partition. - static bool ValidateSourceHash(const brillo::Blob& calculated_hash, - const InstallOperation& operation, - const FileDescriptorPtr source_fd, - ErrorCode* error); - // Initialize partitions and allocate required space for an update with the // given |manifest|. |update_check_response_hash| is used to check if the // previous call to this function corresponds to the same payload. @@ -208,7 +199,6 @@ class DeltaPerformer : public FileWriter { friend class DeltaPerformerIntegrationTest; FRIEND_TEST(DeltaPerformerTest, BrilloMetadataSignatureSizeTest); FRIEND_TEST(DeltaPerformerTest, BrilloParsePayloadMetadataTest); - FRIEND_TEST(DeltaPerformerTest, ChooseSourceFDTest); FRIEND_TEST(DeltaPerformerTest, UsePublicKeyFromResponse); // Parse and move the update instructions of all partitions into our local @@ -262,13 +252,6 @@ class DeltaPerformer : public FileWriter { bool PerformPuffDiffOperation(const InstallOperation& operation, ErrorCode* error); - // For a given operation, choose the source fd to be used (raw device or error - // correction device) based on the source operation hash. - // Returns nullptr if the source hash mismatch cannot be corrected, and set - // the |error| accordingly. - FileDescriptorPtr ChooseSourceFD(const InstallOperation& operation, - ErrorCode* error); - // Extracts the payload signature message from the current |buffer_| if the // offset matches the one specified by the manifest. Returns whether the // signature was extracted. @@ -335,34 +318,6 @@ class DeltaPerformer : public FileWriter { // Pointer to the current payload in install_plan_.payloads. InstallPlan::Payload* payload_{nullptr}; - // File descriptor of the source partition. Only set while updating a - // partition when using a delta payload. - FileDescriptorPtr source_fd_{nullptr}; - - // File descriptor of the error corrected source partition. Only set while - // updating partition using a delta payload for a partition where error - // correction is available. The size of the error corrected device is smaller - // than the underlying raw device, since it doesn't include the error - // correction blocks. - FileDescriptorPtr source_ecc_fd_{nullptr}; - - // The total number of operations that failed source hash verification but - // passed after falling back to the error-corrected |source_ecc_fd_| device. - uint64_t source_ecc_recovered_failures_{0}; - - // Whether opening the current partition as an error-corrected device failed. - // Used to avoid re-opening the same source partition if it is not actually - // error corrected. - bool source_ecc_open_failure_{false}; - - // File descriptor of the target partition. Only set while performing the - // operations of a given partition. - FileDescriptorPtr target_fd_{nullptr}; - - // Paths the |source_fd_| and |target_fd_| refer to. - std::string source_path_; - std::string target_path_; - PayloadMetadata payload_metadata_; // Parsed manifest. Set after enough bytes to parse the manifest were @@ -452,9 +407,93 @@ class DeltaPerformer : public FileWriter { base::TimeDelta::FromSeconds(kCheckpointFrequencySeconds)}; base::TimeTicks update_checkpoint_time_; + std::unique_ptr partition_writer_; + DISALLOW_COPY_AND_ASSIGN(DeltaPerformer); }; +class PartitionWriter { + public: + PartitionWriter(const PartitionUpdate& partition_update, + const InstallPlan::Partition& install_part, + DynamicPartitionControlInterface* dynamic_control, + size_t block_size, + bool is_interactive); + ~PartitionWriter(); + // Compare |calculated_hash| with source hash in |operation|, return false and + // dump hash and set |error| if don't match. + // |source_fd| is the file descriptor of the source partition. + static bool ValidateSourceHash(const brillo::Blob& calculated_hash, + const InstallOperation& operation, + const FileDescriptorPtr source_fd, + ErrorCode* error); + + // Perform necessary initialization work before InstallOperation can be + // applied to this partition + [[nodiscard]] bool Init(const InstallPlan* install_plan, + bool source_may_exist); + + int Close(); + + // These perform a specific type of operation and return true on success. + // |error| will be set if source hash mismatch, otherwise |error| might not be + // set even if it fails. + [[nodiscard]] bool PerformReplaceOperation(const InstallOperation& operation, + const void* data, + size_t count); + [[nodiscard]] bool PerformZeroOrDiscardOperation( + const InstallOperation& operation); + + [[nodiscard]] bool PerformSourceCopyOperation( + const InstallOperation& operation, ErrorCode* error); + [[nodiscard]] bool PerformSourceBsdiffOperation( + const InstallOperation& operation, + ErrorCode* error, + const void* data, + size_t count); + [[nodiscard]] bool PerformPuffDiffOperation(const InstallOperation& operation, + ErrorCode* error, + const void* data, + size_t count); + + private: + friend class PartitionWriterTest; + FRIEND_TEST(PartitionWriterTest, ChooseSourceFDTest); + + bool OpenCurrentECCPartition(); + // For a given operation, choose the source fd to be used (raw device or error + // correction device) based on the source operation hash. + // Returns nullptr if the source hash mismatch cannot be corrected, and set + // the |error| accordingly. + FileDescriptorPtr ChooseSourceFD(const InstallOperation& operation, + ErrorCode* error); + + const PartitionUpdate& partition_update_; + const InstallPlan::Partition& install_part_; + DynamicPartitionControlInterface* dynamic_control_; + std::string source_path_; + std::string target_path_; + FileDescriptorPtr source_fd_; + FileDescriptorPtr target_fd_; + const bool interactive_; + const size_t block_size_; + // File descriptor of the error corrected source partition. Only set while + // updating partition using a delta payload for a partition where error + // correction is available. The size of the error corrected device is smaller + // than the underlying raw device, since it doesn't include the error + // correction blocks. + FileDescriptorPtr source_ecc_fd_{nullptr}; + + // The total number of operations that failed source hash verification but + // passed after falling back to the error-corrected |source_ecc_fd_| device. + uint64_t source_ecc_recovered_failures_{0}; + + // Whether opening the current partition as an error-corrected device failed. + // Used to avoid re-opening the same source partition if it is not actually + // error corrected. + bool source_ecc_open_failure_{false}; +}; + } // namespace chromeos_update_engine #endif // UPDATE_ENGINE_PAYLOAD_CONSUMER_DELTA_PERFORMER_H_ diff --git a/payload_consumer/delta_performer_unittest.cc b/payload_consumer/delta_performer_unittest.cc index 449201ce..a5eb5381 100644 --- a/payload_consumer/delta_performer_unittest.cc +++ b/payload_consumer/delta_performer_unittest.cc @@ -418,22 +418,7 @@ class DeltaPerformerTest : public ::testing::Test { EXPECT_EQ(payload_.metadata_size, performer_.metadata_size_); } - // Helper function to pretend that the ECC file descriptor was already opened. - // Returns a pointer to the created file descriptor. - FakeFileDescriptor* SetFakeECCFile(size_t size) { - EXPECT_FALSE(performer_.source_ecc_fd_) << "source_ecc_fd_ already open."; - FakeFileDescriptor* ret = new FakeFileDescriptor(); - fake_ecc_fd_.reset(ret); - // Call open to simulate it was already opened. - ret->Open("", 0); - ret->SetFileSize(size); - performer_.source_ecc_fd_ = fake_ecc_fd_; - return ret; - } - uint64_t GetSourceEccRecoveredFailures() const { - return performer_.source_ecc_recovered_failures_; - } FakePrefs prefs_; InstallPlan install_plan_; @@ -660,94 +645,8 @@ TEST_F(DeltaPerformerTest, SourceHashMismatchTest) { EXPECT_EQ(actual_data, ApplyPayload(payload_data, source.path(), false)); } -// Test that the error-corrected file descriptor is used to read the partition -// since the source partition doesn't match the operation hash. -TEST_F(DeltaPerformerTest, ErrorCorrectionSourceCopyFallbackTest) { - constexpr size_t kCopyOperationSize = 4 * 4096; - test_utils::ScopedTempFile source("Source-XXXXXX"); - // Write invalid data to the source image, which doesn't match the expected - // hash. - brillo::Blob invalid_data(kCopyOperationSize, 0x55); - EXPECT_TRUE(test_utils::WriteFileVector(source.path(), invalid_data)); - - // Setup the fec file descriptor as the fake stream, which matches - // |expected_data|. - FakeFileDescriptor* fake_fec = SetFakeECCFile(kCopyOperationSize); - brillo::Blob expected_data = FakeFileDescriptorData(kCopyOperationSize); - - PartitionConfig old_part(kPartitionNameRoot); - old_part.path = source.path(); - old_part.size = invalid_data.size(); - - brillo::Blob payload_data = - GenerateSourceCopyPayload(expected_data, true, &old_part); - EXPECT_EQ(expected_data, ApplyPayload(payload_data, source.path(), true)); - // Verify that the fake_fec was actually used. - EXPECT_EQ(1U, fake_fec->GetReadOps().size()); - EXPECT_EQ(1U, GetSourceEccRecoveredFailures()); -} - -// Test that the error-corrected file descriptor is used to read a partition -// when no hash is available for SOURCE_COPY but it falls back to the normal -// file descriptor when the size of the error corrected one is too small. -TEST_F(DeltaPerformerTest, ErrorCorrectionSourceCopyWhenNoHashFallbackTest) { - constexpr size_t kCopyOperationSize = 4 * 4096; - test_utils::ScopedTempFile source("Source-XXXXXX"); - // Setup the source path with the right expected data. - brillo::Blob expected_data = FakeFileDescriptorData(kCopyOperationSize); - EXPECT_TRUE(test_utils::WriteFileVector(source.path(), expected_data)); - - // Setup the fec file descriptor as the fake stream, with smaller data than - // the expected. - FakeFileDescriptor* fake_fec = SetFakeECCFile(kCopyOperationSize / 2); - - PartitionConfig old_part(kPartitionNameRoot); - old_part.path = source.path(); - old_part.size = expected_data.size(); - - // The payload operation doesn't include an operation hash. - brillo::Blob payload_data = - GenerateSourceCopyPayload(expected_data, false, &old_part); - EXPECT_EQ(expected_data, ApplyPayload(payload_data, source.path(), true)); - // Verify that the fake_fec was attempted to be used. Since the file - // descriptor is shorter it can actually do more than one read to realize it - // reached the EOF. - EXPECT_LE(1U, fake_fec->GetReadOps().size()); - // This fallback doesn't count as an error-corrected operation since the - // operation hash was not available. - EXPECT_EQ(0U, GetSourceEccRecoveredFailures()); -} -TEST_F(DeltaPerformerTest, ChooseSourceFDTest) { - constexpr size_t kSourceSize = 4 * 4096; - test_utils::ScopedTempFile source("Source-XXXXXX"); - // Write invalid data to the source image, which doesn't match the expected - // hash. - brillo::Blob invalid_data(kSourceSize, 0x55); - EXPECT_TRUE(test_utils::WriteFileVector(source.path(), invalid_data)); - - performer_.source_fd_ = std::make_shared(); - performer_.source_fd_->Open(source.path().c_str(), O_RDONLY); - performer_.block_size_ = 4096; - - // Setup the fec file descriptor as the fake stream, which matches - // |expected_data|. - FakeFileDescriptor* fake_fec = SetFakeECCFile(kSourceSize); - brillo::Blob expected_data = FakeFileDescriptorData(kSourceSize); - - InstallOperation op; - *(op.add_src_extents()) = ExtentForRange(0, kSourceSize / 4096); - brillo::Blob src_hash; - EXPECT_TRUE(HashCalculator::RawHashOfData(expected_data, &src_hash)); - op.set_src_sha256_hash(src_hash.data(), src_hash.size()); - ErrorCode error = ErrorCode::kSuccess; - EXPECT_EQ(performer_.source_ecc_fd_, performer_.ChooseSourceFD(op, &error)); - EXPECT_EQ(ErrorCode::kSuccess, error); - // Verify that the fake_fec was actually used. - EXPECT_EQ(1U, fake_fec->GetReadOps().size()); - EXPECT_EQ(1U, GetSourceEccRecoveredFailures()); -} TEST_F(DeltaPerformerTest, ExtentsToByteStringTest) { uint64_t test[] = {1, 1, 4, 2, 0, 1}; diff --git a/payload_consumer/partition_writer_unittest.cc b/payload_consumer/partition_writer_unittest.cc new file mode 100644 index 00000000..c1ff4f4e --- /dev/null +++ b/payload_consumer/partition_writer_unittest.cc @@ -0,0 +1,203 @@ +// +// Copyright (C) 2020 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include +#include + +#include +#include + +#include "update_engine/common/dynamic_partition_control_stub.h" +#include "update_engine/common/error_code.h" +#include "update_engine/common/fake_prefs.h" +#include "update_engine/common/hash_calculator.h" +#include "update_engine/common/test_utils.h" +#include "update_engine/common/utils.h" +#include "update_engine/payload_consumer/delta_performer.h" +#include "update_engine/payload_consumer/extent_reader.h" +#include "update_engine/payload_consumer/extent_writer.h" +#include "update_engine/payload_consumer/fake_file_descriptor.h" +#include "update_engine/payload_consumer/file_descriptor.h" +#include "update_engine/payload_consumer/install_plan.h" +#include "update_engine/payload_generator/annotated_operation.h" +#include "update_engine/payload_generator/delta_diff_generator.h" +#include "update_engine/payload_generator/extent_ranges.h" +#include "update_engine/payload_generator/payload_file.h" +#include "update_engine/payload_generator/payload_generation_config.h" +#include "update_engine/update_metadata.pb.h" + +namespace chromeos_update_engine { + +class PartitionWriterTest : public testing::Test { + public: + // Helper function to pretend that the ECC file descriptor was already opened. + // Returns a pointer to the created file descriptor. + FakeFileDescriptor* SetFakeECCFile(size_t size) { + EXPECT_FALSE(writer_.source_ecc_fd_) << "source_ecc_fd_ already open."; + FakeFileDescriptor* ret = new FakeFileDescriptor(); + fake_ecc_fd_.reset(ret); + // Call open to simulate it was already opened. + ret->Open("", 0); + ret->SetFileSize(size); + writer_.source_ecc_fd_ = fake_ecc_fd_; + return ret; + } + + uint64_t GetSourceEccRecoveredFailures() const { + return writer_.source_ecc_recovered_failures_; + } + + AnnotatedOperation GenerateSourceCopyOp(const brillo::Blob& copied_data, + bool add_hash, + PartitionConfig* old_part = nullptr) { + PayloadGenerationConfig config; + const uint64_t kDefaultBlockSize = config.block_size; + EXPECT_EQ(0U, copied_data.size() % kDefaultBlockSize); + uint64_t num_blocks = copied_data.size() / kDefaultBlockSize; + AnnotatedOperation aop; + *(aop.op.add_src_extents()) = ExtentForRange(0, num_blocks); + *(aop.op.add_dst_extents()) = ExtentForRange(0, num_blocks); + aop.op.set_type(InstallOperation::SOURCE_COPY); + brillo::Blob src_hash; + EXPECT_TRUE(HashCalculator::RawHashOfData(copied_data, &src_hash)); + if (add_hash) + aop.op.set_src_sha256_hash(src_hash.data(), src_hash.size()); + + return aop; + } + + brillo::Blob PerformSourceCopyOp(const InstallOperation& op, + const brillo::Blob blob_data) { + test_utils::ScopedTempFile source_partition("Blob-XXXXXX"); + DirectExtentWriter extent_writer; + FileDescriptorPtr fd(new EintrSafeFileDescriptor()); + EXPECT_TRUE(fd->Open(source_partition.path().c_str(), O_RDWR)); + EXPECT_TRUE(extent_writer.Init(fd, op.src_extents(), kBlockSize)); + EXPECT_TRUE(extent_writer.Write(blob_data.data(), blob_data.size())); + + test_utils::ScopedTempFile target_partition("Blob-XXXXXX"); + + install_part_.source_path = source_partition.path(); + install_part_.target_path = target_partition.path(); + install_part_.source_size = blob_data.size(); + install_part_.target_size = blob_data.size(); + + ErrorCode error; + EXPECT_TRUE(writer_.Init(&install_plan_, true)); + EXPECT_TRUE(writer_.PerformSourceCopyOperation(op, &error)); + + brillo::Blob output_data; + EXPECT_TRUE(utils::ReadFile(target_partition.path(), &output_data)); + return output_data; + } + + FakePrefs prefs_{}; + InstallPlan install_plan_{}; + InstallPlan::Payload payload_{}; + DynamicPartitionControlStub dynamic_control_{}; + FileDescriptorPtr fake_ecc_fd_{}; + DeltaArchiveManifest manifest_{}; + PartitionUpdate partition_update_{}; + InstallPlan::Partition install_part_{}; + PartitionWriter writer_{ + partition_update_, install_part_, &dynamic_control_, kBlockSize, false}; +}; +// Test that the error-corrected file descriptor is used to read a partition +// when no hash is available for SOURCE_COPY but it falls back to the normal +// file descriptor when the size of the error corrected one is too small. +TEST_F(PartitionWriterTest, ErrorCorrectionSourceCopyWhenNoHashFallbackTest) { + constexpr size_t kCopyOperationSize = 4 * 4096; + test_utils::ScopedTempFile source("Source-XXXXXX"); + // Setup the source path with the right expected data. + brillo::Blob expected_data = FakeFileDescriptorData(kCopyOperationSize); + EXPECT_TRUE(test_utils::WriteFileVector(source.path(), expected_data)); + + // Setup the fec file descriptor as the fake stream, with smaller data than + // the expected. + FakeFileDescriptor* fake_fec = SetFakeECCFile(kCopyOperationSize / 2); + + PartitionConfig old_part(kPartitionNameRoot); + old_part.path = source.path(); + old_part.size = expected_data.size(); + + // The payload operation doesn't include an operation hash. + auto source_copy_op = GenerateSourceCopyOp(expected_data, false, &old_part); + + auto output_data = PerformSourceCopyOp(source_copy_op.op, expected_data); + ASSERT_EQ(output_data, expected_data); + + // Verify that the fake_fec was attempted to be used. Since the file + // descriptor is shorter it can actually do more than one read to realize it + // reached the EOF. + EXPECT_LE(1U, fake_fec->GetReadOps().size()); + // This fallback doesn't count as an error-corrected operation since the + // operation hash was not available. + EXPECT_EQ(0U, GetSourceEccRecoveredFailures()); +} + +// Test that the error-corrected file descriptor is used to read the partition +// since the source partition doesn't match the operation hash. +TEST_F(PartitionWriterTest, ErrorCorrectionSourceCopyFallbackTest) { + constexpr size_t kCopyOperationSize = 4 * 4096; + // Write invalid data to the source image, which doesn't match the expected + // hash. + brillo::Blob invalid_data(kCopyOperationSize, 0x55); + + // Setup the fec file descriptor as the fake stream, which matches + // |expected_data|. + FakeFileDescriptor* fake_fec = SetFakeECCFile(kCopyOperationSize); + brillo::Blob expected_data = FakeFileDescriptorData(kCopyOperationSize); + + auto source_copy_op = GenerateSourceCopyOp(expected_data, true); + auto output_data = PerformSourceCopyOp(source_copy_op.op, invalid_data); + ASSERT_EQ(output_data, expected_data); + + // Verify that the fake_fec was actually used. + EXPECT_EQ(1U, fake_fec->GetReadOps().size()); + EXPECT_EQ(1U, GetSourceEccRecoveredFailures()); +} + +TEST_F(PartitionWriterTest, ChooseSourceFDTest) { + constexpr size_t kSourceSize = 4 * 4096; + test_utils::ScopedTempFile source("Source-XXXXXX"); + // Write invalid data to the source image, which doesn't match the expected + // hash. + brillo::Blob invalid_data(kSourceSize, 0x55); + EXPECT_TRUE(test_utils::WriteFileVector(source.path(), invalid_data)); + + writer_.source_fd_ = std::make_shared(); + writer_.source_fd_->Open(source.path().c_str(), O_RDONLY); + + // Setup the fec file descriptor as the fake stream, which matches + // |expected_data|. + FakeFileDescriptor* fake_fec = SetFakeECCFile(kSourceSize); + brillo::Blob expected_data = FakeFileDescriptorData(kSourceSize); + + InstallOperation op; + *(op.add_src_extents()) = ExtentForRange(0, kSourceSize / 4096); + brillo::Blob src_hash; + EXPECT_TRUE(HashCalculator::RawHashOfData(expected_data, &src_hash)); + op.set_src_sha256_hash(src_hash.data(), src_hash.size()); + + ErrorCode error = ErrorCode::kSuccess; + EXPECT_EQ(writer_.source_ecc_fd_, writer_.ChooseSourceFD(op, &error)); + EXPECT_EQ(ErrorCode::kSuccess, error); + // Verify that the fake_fec was actually used. + EXPECT_EQ(1U, fake_fec->GetReadOps().size()); + EXPECT_EQ(1U, GetSourceEccRecoveredFailures()); +} + +} // namespace chromeos_update_engine diff --git a/update_attempter_android.cc b/update_attempter_android.cc index 7fc13e11..3578d955 100644 --- a/update_attempter_android.cc +++ b/update_attempter_android.cc @@ -507,7 +507,7 @@ bool UpdateAttempterAndroid::VerifyPayloadApplicable( return LogAndSetError( error, FROM_HERE, "Failed to hash " + partition_path); } - if (!DeltaPerformer::ValidateSourceHash( + if (!PartitionWriter::ValidateSourceHash( source_hash, operation, fd, &errorcode)) { return false; } From 50bac6567e2f6dc9e4f7d465c00a4c6568a91311 Mon Sep 17 00:00:00 2001 From: Kelvin Zhang Date: Mon, 28 Sep 2020 15:51:41 -0400 Subject: [PATCH 410/624] Move partition writer to a separate file Test: treehugger && serve an OTA update Change-Id: I803692110841ce6d2207555ac7a682e9f989363d --- Android.bp | 1 + payload_consumer/delta_performer.cc | 635 +------------------------- payload_consumer/delta_performer.h | 86 +--- payload_consumer/partition_writer.cc | 644 +++++++++++++++++++++++++++ payload_consumer/partition_writer.h | 113 +++++ 5 files changed, 777 insertions(+), 702 deletions(-) create mode 100644 payload_consumer/partition_writer.cc create mode 100644 payload_consumer/partition_writer.h diff --git a/Android.bp b/Android.bp index acd3633d..8e9ec17f 100644 --- a/Android.bp +++ b/Android.bp @@ -178,6 +178,7 @@ cc_library_static { "payload_consumer/payload_constants.cc", "payload_consumer/payload_metadata.cc", "payload_consumer/payload_verifier.cc", + "payload_consumer/partition_writer.cc", "payload_consumer/postinstall_runner_action.cc", "payload_consumer/verity_writer_android.cc", "payload_consumer/xz_extent_writer.cc", diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc index b49139e3..87fc4cfc 100644 --- a/payload_consumer/delta_performer.cc +++ b/payload_consumer/delta_performer.cc @@ -54,6 +54,7 @@ #include "update_engine/payload_consumer/extent_reader.h" #include "update_engine/payload_consumer/extent_writer.h" #include "update_engine/payload_consumer/partition_update_generator_interface.h" +#include "update_engine/payload_consumer/partition_writer.h" #if USE_FEC #include "update_engine/payload_consumer/fec_file_descriptor.h" #endif // USE_FEC @@ -79,65 +80,6 @@ namespace { const int kUpdateStateOperationInvalid = -1; const int kMaxResumedUpdateFailures = 10; -const uint64_t kCacheSize = 1024 * 1024; // 1MB - -// Opens path for read/write. On success returns an open FileDescriptor -// and sets *err to 0. On failure, sets *err to errno and returns nullptr. -FileDescriptorPtr OpenFile(const char* path, - int mode, - bool cache_writes, - int* err) { - // Try to mark the block device read-only based on the mode. Ignore any - // failure since this won't work when passing regular files. - bool read_only = (mode & O_ACCMODE) == O_RDONLY; - utils::SetBlockDeviceReadOnly(path, read_only); - - FileDescriptorPtr fd(new EintrSafeFileDescriptor()); - if (cache_writes && !read_only) { - fd = FileDescriptorPtr(new CachedFileDescriptor(fd, kCacheSize)); - LOG(INFO) << "Caching writes."; - } - if (!fd->Open(path, mode, 000)) { - *err = errno; - PLOG(ERROR) << "Unable to open file " << path; - return nullptr; - } - *err = 0; - return fd; -} - -// Discard the tail of the block device referenced by |fd|, from the offset -// |data_size| until the end of the block device. Returns whether the data was -// discarded. -bool DiscardPartitionTail(const FileDescriptorPtr& fd, uint64_t data_size) { - uint64_t part_size = fd->BlockDevSize(); - if (!part_size || part_size <= data_size) - return false; - - struct blkioctl_request { - int number; - const char* name; - }; - const vector blkioctl_requests = { - {BLKDISCARD, "BLKDISCARD"}, - {BLKSECDISCARD, "BLKSECDISCARD"}, -#ifdef BLKZEROOUT - {BLKZEROOUT, "BLKZEROOUT"}, -#endif - }; - for (const auto& req : blkioctl_requests) { - int error = 0; - if (fd->BlkIoctl(req.number, data_size, part_size - data_size, &error) && - error == 0) { - return true; - } - LOG(WARNING) << "Error discarding the last " - << (part_size - data_size) / 1024 << " KiB using ioctl(" - << req.name << ")"; - } - return false; -} - } // namespace // Computes the ratio of |part| and |total|, scaled to |norm|, using integer @@ -290,37 +232,6 @@ int DeltaPerformer::CloseCurrentPartition() { return err; } -int PartitionWriter::Close() { - int err = 0; - if (source_fd_ && !source_fd_->Close()) { - err = errno; - PLOG(ERROR) << "Error closing source partition"; - if (!err) - err = 1; - } - source_fd_.reset(); - source_path_.clear(); - - if (target_fd_ && !target_fd_->Close()) { - err = errno; - PLOG(ERROR) << "Error closing target partition"; - if (!err) - err = 1; - } - target_fd_.reset(); - target_path_.clear(); - - if (source_ecc_fd_ && !source_ecc_fd_->Close()) { - err = errno; - PLOG(ERROR) << "Error closing ECC source partition"; - if (!err) - err = 1; - } - source_ecc_fd_.reset(); - source_ecc_open_failure_ = false; - return -err; -} - bool DeltaPerformer::OpenCurrentPartition() { if (current_partition_ >= partitions_.size()) return false; @@ -344,86 +255,6 @@ bool DeltaPerformer::OpenCurrentPartition() { return partition_writer_->Init(install_plan_, source_may_exist); } -bool PartitionWriter::Init(const InstallPlan* install_plan, - bool source_may_exist) { - const PartitionUpdate& partition = partition_update_; - uint32_t source_slot = install_plan->source_slot; - uint32_t target_slot = install_plan->target_slot; - - // We shouldn't open the source partition in certain cases, e.g. some dynamic - // partitions in delta payload, partitions included in the full payload for - // partial updates. Use the source size as the indicator. - if (source_may_exist && install_part_.source_size > 0) { - source_path_ = install_part_.source_path; - int err; - source_fd_ = OpenFile(source_path_.c_str(), O_RDONLY, false, &err); - if (!source_fd_) { - LOG(ERROR) << "Unable to open source partition " - << partition.partition_name() << " on slot " - << BootControlInterface::SlotName(source_slot) << ", file " - << source_path_; - return false; - } - } - - target_path_ = install_part_.target_path; - int err; - - int flags = O_RDWR; - if (!interactive_) - flags |= O_DSYNC; - - LOG(INFO) << "Opening " << target_path_ << " partition with" - << (interactive_ ? "out" : "") << " O_DSYNC"; - - target_fd_ = OpenFile(target_path_.c_str(), flags, true, &err); - if (!target_fd_) { - LOG(ERROR) << "Unable to open target partition " - << partition.partition_name() << " on slot " - << BootControlInterface::SlotName(target_slot) << ", file " - << target_path_; - return false; - } - - LOG(INFO) << "Applying " << partition.operations().size() - << " operations to partition \"" << partition.partition_name() - << "\""; - - // Discard the end of the partition, but ignore failures. - DiscardPartitionTail(target_fd_, install_part_.target_size); - - return true; -} - -bool PartitionWriter::OpenCurrentECCPartition() { - // No support for ECC for full payloads. - // Full Paylods should not have any operation that requires ECCPartition. - if (source_ecc_fd_) - return true; - - if (source_ecc_open_failure_) - return false; - -#if USE_FEC - const PartitionUpdate& partition = partition_update_; - const InstallPlan::Partition& install_part = install_part_; - std::string path = install_part.source_path; - FileDescriptorPtr fd(new FecFileDescriptor()); - if (!fd->Open(path.c_str(), O_RDONLY, 0)) { - PLOG(ERROR) << "Unable to open ECC source partition " - << partition.partition_name() << ", file " << path; - source_ecc_open_failure_ = true; - return false; - } - source_ecc_fd_ = fd; -#else - // No support for ECC compiled. - source_ecc_open_failure_ = true; -#endif // USE_FEC - - return !source_ecc_open_failure_; -} - namespace { void LogPartitionInfoHash(const PartitionInfo& info, const string& tag) { @@ -1024,25 +855,6 @@ bool DeltaPerformer::PerformReplaceOperation( return true; } -bool PartitionWriter::PerformReplaceOperation(const InstallOperation& operation, - const void* data, - size_t count) { - // Setup the ExtentWriter stack based on the operation type. - std::unique_ptr writer = std::make_unique(); - - if (operation.type() == InstallOperation::REPLACE_BZ) { - writer.reset(new BzipExtentWriter(std::move(writer))); - } else if (operation.type() == InstallOperation::REPLACE_XZ) { - writer.reset(new XzExtentWriter(std::move(writer))); - } - - TEST_AND_RETURN_FALSE( - writer->Init(target_fd_, operation.dst_extents(), block_size_)); - TEST_AND_RETURN_FALSE(writer->Write(data, operation.data_length())); - - return target_fd_->Flush(); -} - bool DeltaPerformer::PerformZeroOrDiscardOperation( const InstallOperation& operation) { CHECK(operation.type() == InstallOperation::DISCARD || @@ -1051,40 +863,8 @@ bool DeltaPerformer::PerformZeroOrDiscardOperation( // These operations have no blob. TEST_AND_RETURN_FALSE(!operation.has_data_offset()); TEST_AND_RETURN_FALSE(!operation.has_data_length()); - return partition_writer_->PerformZeroOrDiscardOperation(operation); -} -bool PartitionWriter::PerformZeroOrDiscardOperation( - const InstallOperation& operation) { -#ifdef BLKZEROOUT - bool attempt_ioctl = true; - int request = - (operation.type() == InstallOperation::ZERO ? BLKZEROOUT : BLKDISCARD); -#else // !defined(BLKZEROOUT) - bool attempt_ioctl = false; - int request = 0; -#endif // !defined(BLKZEROOUT) - - brillo::Blob zeros; - for (const Extent& extent : operation.dst_extents()) { - const uint64_t start = extent.start_block() * block_size_; - const uint64_t length = extent.num_blocks() * block_size_; - if (attempt_ioctl) { - int result = 0; - if (target_fd_->BlkIoctl(request, start, length, &result) && result == 0) - continue; - attempt_ioctl = false; - } - // In case of failure, we fall back to writing 0 to the selected region. - zeros.resize(16 * block_size_); - for (uint64_t offset = 0; offset < length; offset += zeros.size()) { - uint64_t chunk_length = - min(length - offset, static_cast(zeros.size())); - TEST_AND_RETURN_FALSE(utils::PWriteAll( - target_fd_, zeros.data(), chunk_length, start + offset)); - } - } - return target_fd_->Flush(); + return partition_writer_->PerformZeroOrDiscardOperation(operation); } bool PartitionWriter::ValidateSourceHash(const brillo::Blob& calculated_hash, @@ -1134,172 +914,6 @@ bool DeltaPerformer::PerformSourceCopyOperation( return partition_writer_->PerformSourceCopyOperation(operation, error); } -bool PartitionWriter::PerformSourceCopyOperation( - const InstallOperation& operation, ErrorCode* error) { - TEST_AND_RETURN_FALSE(source_fd_ != nullptr); - - // The device may optimize the SOURCE_COPY operation. - // Being this a device-specific optimization let DynamicPartitionController - // decide it the operation should be skipped. - const PartitionUpdate& partition = partition_update_; - const auto& partition_control = dynamic_control_; - - InstallOperation buf; - bool should_optimize = partition_control->OptimizeOperation( - partition.partition_name(), operation, &buf); - const InstallOperation& optimized = should_optimize ? buf : operation; - - if (operation.has_src_sha256_hash()) { - bool read_ok; - brillo::Blob source_hash; - brillo::Blob expected_source_hash(operation.src_sha256_hash().begin(), - operation.src_sha256_hash().end()); - - // We fall back to use the error corrected device if the hash of the raw - // device doesn't match or there was an error reading the source partition. - // Note that this code will also fall back if writing the target partition - // fails. - if (should_optimize) { - // Hash operation.src_extents(), then copy optimized.src_extents to - // optimized.dst_extents. - read_ok = - fd_utils::ReadAndHashExtents( - source_fd_, operation.src_extents(), block_size_, &source_hash) && - fd_utils::CopyAndHashExtents(source_fd_, - optimized.src_extents(), - target_fd_, - optimized.dst_extents(), - block_size_, - nullptr /* skip hashing */); - } else { - read_ok = fd_utils::CopyAndHashExtents(source_fd_, - operation.src_extents(), - target_fd_, - operation.dst_extents(), - block_size_, - &source_hash); - } - if (read_ok && expected_source_hash == source_hash) - return true; - LOG(WARNING) << "Source hash from RAW device mismatched, attempting to " - "correct using ECC"; - if (!OpenCurrentECCPartition()) { - // The following function call will return false since the source hash - // mismatches, but we still want to call it so it prints the appropriate - // log message. - return ValidateSourceHash(source_hash, operation, source_fd_, error); - } - - LOG(WARNING) << "Source hash from RAW device mismatched: found " - << base::HexEncode(source_hash.data(), source_hash.size()) - << ", expected " - << base::HexEncode(expected_source_hash.data(), - expected_source_hash.size()); - if (should_optimize) { - TEST_AND_RETURN_FALSE(fd_utils::ReadAndHashExtents( - source_ecc_fd_, operation.src_extents(), block_size_, &source_hash)); - TEST_AND_RETURN_FALSE( - fd_utils::CopyAndHashExtents(source_ecc_fd_, - optimized.src_extents(), - target_fd_, - optimized.dst_extents(), - block_size_, - nullptr /* skip hashing */)); - } else { - TEST_AND_RETURN_FALSE( - fd_utils::CopyAndHashExtents(source_ecc_fd_, - operation.src_extents(), - target_fd_, - operation.dst_extents(), - block_size_, - &source_hash)); - } - TEST_AND_RETURN_FALSE( - ValidateSourceHash(source_hash, operation, source_ecc_fd_, error)); - // At this point reading from the error corrected device worked, but - // reading from the raw device failed, so this is considered a recovered - // failure. - source_ecc_recovered_failures_++; - } else { - // When the operation doesn't include a source hash, we attempt the error - // corrected device first since we can't verify the block in the raw device - // at this point, but we fall back to the raw device since the error - // corrected device can be shorter or not available. - - if (OpenCurrentECCPartition() && - fd_utils::CopyAndHashExtents(source_ecc_fd_, - optimized.src_extents(), - target_fd_, - optimized.dst_extents(), - block_size_, - nullptr)) { - return true; - } - TEST_AND_RETURN_FALSE(fd_utils::CopyAndHashExtents(source_fd_, - optimized.src_extents(), - target_fd_, - optimized.dst_extents(), - block_size_, - nullptr)); - } - return target_fd_->Flush(); -} - -FileDescriptorPtr PartitionWriter::ChooseSourceFD( - const InstallOperation& operation, ErrorCode* error) { - if (source_fd_ == nullptr) { - LOG(ERROR) << "ChooseSourceFD fail: source_fd_ == nullptr"; - return nullptr; - } - - if (!operation.has_src_sha256_hash()) { - // When the operation doesn't include a source hash, we attempt the error - // corrected device first since we can't verify the block in the raw device - // at this point, but we first need to make sure all extents are readable - // since the error corrected device can be shorter or not available. - if (OpenCurrentECCPartition() && - fd_utils::ReadAndHashExtents( - source_ecc_fd_, operation.src_extents(), block_size_, nullptr)) { - return source_ecc_fd_; - } - return source_fd_; - } - - brillo::Blob source_hash; - brillo::Blob expected_source_hash(operation.src_sha256_hash().begin(), - operation.src_sha256_hash().end()); - if (fd_utils::ReadAndHashExtents( - source_fd_, operation.src_extents(), block_size_, &source_hash) && - source_hash == expected_source_hash) { - return source_fd_; - } - // We fall back to use the error corrected device if the hash of the raw - // device doesn't match or there was an error reading the source partition. - if (!OpenCurrentECCPartition()) { - // The following function call will return false since the source hash - // mismatches, but we still want to call it so it prints the appropriate - // log message. - ValidateSourceHash(source_hash, operation, source_fd_, error); - return nullptr; - } - LOG(WARNING) << "Source hash from RAW device mismatched: found " - << base::HexEncode(source_hash.data(), source_hash.size()) - << ", expected " - << base::HexEncode(expected_source_hash.data(), - expected_source_hash.size()); - - if (fd_utils::ReadAndHashExtents( - source_ecc_fd_, operation.src_extents(), block_size_, &source_hash) && - ValidateSourceHash(source_hash, operation, source_ecc_fd_, error)) { - // At this point reading from the error corrected device worked, but - // reading from the raw device failed, so this is considered a recovered - // failure. - source_ecc_recovered_failures_++; - return source_ecc_fd_; - } - return nullptr; -} - bool DeltaPerformer::ExtentsToBsdiffPositionsString( const RepeatedPtrField& extents, uint64_t block_size, @@ -1322,69 +936,6 @@ bool DeltaPerformer::ExtentsToBsdiffPositionsString( return true; } -namespace { - -class BsdiffExtentFile : public bsdiff::FileInterface { - public: - BsdiffExtentFile(std::unique_ptr reader, size_t size) - : BsdiffExtentFile(std::move(reader), nullptr, size) {} - BsdiffExtentFile(std::unique_ptr writer, size_t size) - : BsdiffExtentFile(nullptr, std::move(writer), size) {} - - ~BsdiffExtentFile() override = default; - - bool Read(void* buf, size_t count, size_t* bytes_read) override { - TEST_AND_RETURN_FALSE(reader_->Read(buf, count)); - *bytes_read = count; - offset_ += count; - return true; - } - - bool Write(const void* buf, size_t count, size_t* bytes_written) override { - TEST_AND_RETURN_FALSE(writer_->Write(buf, count)); - *bytes_written = count; - offset_ += count; - return true; - } - - bool Seek(off_t pos) override { - if (reader_ != nullptr) { - TEST_AND_RETURN_FALSE(reader_->Seek(pos)); - offset_ = pos; - } else { - // For writes technically there should be no change of position, or it - // should be equivalent of current offset. - TEST_AND_RETURN_FALSE(offset_ == static_cast(pos)); - } - return true; - } - - bool Close() override { return true; } - - bool GetSize(uint64_t* size) override { - *size = size_; - return true; - } - - private: - BsdiffExtentFile(std::unique_ptr reader, - std::unique_ptr writer, - size_t size) - : reader_(std::move(reader)), - writer_(std::move(writer)), - size_(size), - offset_(0) {} - - std::unique_ptr reader_; - std::unique_ptr writer_; - uint64_t size_; - uint64_t offset_; - - DISALLOW_COPY_AND_ASSIGN(BsdiffExtentFile); -}; - -} // namespace - bool DeltaPerformer::PerformSourceBsdiffOperation( const InstallOperation& operation, ErrorCode* error) { // Since we delete data off the beginning of the buffer as we use it, @@ -1402,110 +953,6 @@ bool DeltaPerformer::PerformSourceBsdiffOperation( return true; } -bool PartitionWriter::PerformSourceBsdiffOperation( - const InstallOperation& operation, - ErrorCode* error, - const void* data, - size_t count) { - FileDescriptorPtr source_fd = ChooseSourceFD(operation, error); - TEST_AND_RETURN_FALSE(source_fd != nullptr); - - auto reader = std::make_unique(); - TEST_AND_RETURN_FALSE( - reader->Init(source_fd, operation.src_extents(), block_size_)); - auto src_file = std::make_unique( - std::move(reader), - utils::BlocksInExtents(operation.src_extents()) * block_size_); - - auto writer = std::make_unique(); - TEST_AND_RETURN_FALSE( - writer->Init(target_fd_, operation.dst_extents(), block_size_)); - auto dst_file = std::make_unique( - std::move(writer), - utils::BlocksInExtents(operation.dst_extents()) * block_size_); - - TEST_AND_RETURN_FALSE(bsdiff::bspatch(std::move(src_file), - std::move(dst_file), - static_cast(data), - count) == 0); - return target_fd_->Flush(); -} - -namespace { - -// A class to be passed to |puffpatch| for reading from |source_fd_| and writing -// into |target_fd_|. -class PuffinExtentStream : public puffin::StreamInterface { - public: - // Constructor for creating a stream for reading from an |ExtentReader|. - PuffinExtentStream(std::unique_ptr reader, uint64_t size) - : PuffinExtentStream(std::move(reader), nullptr, size) {} - - // Constructor for creating a stream for writing to an |ExtentWriter|. - PuffinExtentStream(std::unique_ptr writer, uint64_t size) - : PuffinExtentStream(nullptr, std::move(writer), size) {} - - ~PuffinExtentStream() override = default; - - bool GetSize(uint64_t* size) const override { - *size = size_; - return true; - } - - bool GetOffset(uint64_t* offset) const override { - *offset = offset_; - return true; - } - - bool Seek(uint64_t offset) override { - if (is_read_) { - TEST_AND_RETURN_FALSE(reader_->Seek(offset)); - offset_ = offset; - } else { - // For writes technically there should be no change of position, or it - // should equivalent of current offset. - TEST_AND_RETURN_FALSE(offset_ == offset); - } - return true; - } - - bool Read(void* buffer, size_t count) override { - TEST_AND_RETURN_FALSE(is_read_); - TEST_AND_RETURN_FALSE(reader_->Read(buffer, count)); - offset_ += count; - return true; - } - - bool Write(const void* buffer, size_t count) override { - TEST_AND_RETURN_FALSE(!is_read_); - TEST_AND_RETURN_FALSE(writer_->Write(buffer, count)); - offset_ += count; - return true; - } - - bool Close() override { return true; } - - private: - PuffinExtentStream(std::unique_ptr reader, - std::unique_ptr writer, - uint64_t size) - : reader_(std::move(reader)), - writer_(std::move(writer)), - size_(size), - offset_(0), - is_read_(reader_ ? true : false) {} - - std::unique_ptr reader_; - std::unique_ptr writer_; - uint64_t size_; - uint64_t offset_; - bool is_read_; - - DISALLOW_COPY_AND_ASSIGN(PuffinExtentStream); -}; - -} // namespace - bool DeltaPerformer::PerformPuffDiffOperation(const InstallOperation& operation, ErrorCode* error) { // Since we delete data off the beginning of the buffer as we use it, @@ -1518,37 +965,6 @@ bool DeltaPerformer::PerformPuffDiffOperation(const InstallOperation& operation, return true; } -bool PartitionWriter::PerformPuffDiffOperation( - const InstallOperation& operation, - ErrorCode* error, - const void* data, - size_t count) { - FileDescriptorPtr source_fd = ChooseSourceFD(operation, error); - TEST_AND_RETURN_FALSE(source_fd != nullptr); - - auto reader = std::make_unique(); - TEST_AND_RETURN_FALSE( - reader->Init(source_fd, operation.src_extents(), block_size_)); - puffin::UniqueStreamPtr src_stream(new PuffinExtentStream( - std::move(reader), - utils::BlocksInExtents(operation.src_extents()) * block_size_)); - - auto writer = std::make_unique(); - TEST_AND_RETURN_FALSE( - writer->Init(target_fd_, operation.dst_extents(), block_size_)); - puffin::UniqueStreamPtr dst_stream(new PuffinExtentStream( - std::move(writer), - utils::BlocksInExtents(operation.dst_extents()) * block_size_)); - - const size_t kMaxCacheSize = 5 * 1024 * 1024; // Total 5MB cache. - TEST_AND_RETURN_FALSE(puffin::PuffPatch(std::move(src_stream), - std::move(dst_stream), - static_cast(data), - count, - kMaxCacheSize)); - return target_fd_->Flush(); -} - bool DeltaPerformer::ExtractSignatureMessage() { TEST_AND_RETURN_FALSE(signatures_message_data_.empty()); TEST_AND_RETURN_FALSE(buffer_offset_ == manifest_.signatures_offset()); @@ -1557,11 +973,11 @@ bool DeltaPerformer::ExtractSignatureMessage() { buffer_.begin(), buffer_.begin() + manifest_.signatures_size()); // Save the signature blob because if the update is interrupted after the - // download phase we don't go through this path anymore. Some alternatives to - // consider: + // download phase we don't go through this path anymore. Some alternatives + // to consider: // - // 1. On resume, re-download the signature blob from the server and re-verify - // it. + // 1. On resume, re-download the signature blob from the server and + // re-verify it. // // 2. Verify the signature as soon as it's received and don't checkpoint the // blob and the signed sha-256 context. @@ -1584,8 +1000,8 @@ bool DeltaPerformer::GetPublicKey(string* out_public_key) { return utils::ReadFile(public_key_path_, out_public_key); } - // If this is an official build then we are not allowed to use public key from - // Omaha response. + // If this is an official build then we are not allowed to use public key + // from Omaha response. if (!hardware_->IsOfficialBuild() && !install_plan_->public_key_rsa.empty()) { LOG(INFO) << "Verifying using public key from Omaha response."; return brillo::data_encoding::Base64Decode(install_plan_->public_key_rsa, @@ -1704,9 +1120,9 @@ ErrorCode DeltaPerformer::CheckTimestampError() const { // Check version field for a given PartitionUpdate object. If an error // is encountered, set |error_code| accordingly. If downgrade is detected, - // |downgrade_detected| is set. Return true if the program should continue to - // check the next partition or not, or false if it should exit early due to - // errors. + // |downgrade_detected| is set. Return true if the program should continue + // to check the next partition or not, or false if it should exit early due + // to errors. auto&& timestamp_valid = [this](const PartitionUpdate& partition, bool allow_empty_version, bool* downgrade_detected) -> ErrorCode { @@ -1784,10 +1200,11 @@ ErrorCode DeltaPerformer::ValidateOperationHash( const InstallOperation& operation) { if (!operation.data_sha256_hash().size()) { if (!operation.data_length()) { - // Operations that do not have any data blob won't have any operation hash - // either. So, these operations are always considered validated since the - // metadata that contains all the non-data-blob portions of the operation - // has already been validated. This is true for both HTTP and HTTPS cases. + // Operations that do not have any data blob won't have any operation + // hash either. So, these operations are always considered validated + // since the metadata that contains all the non-data-blob portions of + // the operation has already been validated. This is true for both HTTP + // and HTTPS cases. return ErrorCode::kSuccess; } @@ -1926,8 +1343,8 @@ bool DeltaPerformer::CanResumeUpdate(PrefsInterface* prefs, return false; int64_t resumed_update_failures; - // Note that storing this value is optional, but if it is there it should not - // be more than the limit. + // Note that storing this value is optional, but if it is there it should + // not be more than the limit. if (prefs->GetInt64(kPrefsResumedUpdateFailures, &resumed_update_failures) && resumed_update_failures > kMaxResumedUpdateFailures) return false; @@ -2086,20 +1503,4 @@ bool DeltaPerformer::PrimeUpdateState() { return true; } -PartitionWriter::PartitionWriter( - const PartitionUpdate& partition_update, - const InstallPlan::Partition& install_part, - DynamicPartitionControlInterface* dynamic_control, - size_t block_size, - bool is_interactive) - : partition_update_(partition_update), - install_part_(install_part), - dynamic_control_(dynamic_control), - interactive_(is_interactive), - block_size_(block_size) {} - -PartitionWriter::~PartitionWriter() { - Close(); -} - } // namespace chromeos_update_engine diff --git a/payload_consumer/delta_performer.h b/payload_consumer/delta_performer.h index bee7fdea..d44f6c20 100644 --- a/payload_consumer/delta_performer.h +++ b/payload_consumer/delta_performer.h @@ -35,6 +35,7 @@ #include "update_engine/payload_consumer/file_descriptor.h" #include "update_engine/payload_consumer/file_writer.h" #include "update_engine/payload_consumer/install_plan.h" +#include "update_engine/payload_consumer/partition_writer.h" #include "update_engine/payload_consumer/payload_metadata.h" #include "update_engine/payload_consumer/payload_verifier.h" #include "update_engine/update_metadata.pb.h" @@ -46,9 +47,6 @@ class BootControlInterface; class HardwareInterface; class PrefsInterface; -// At the bottom of this file. -class PartitionWriter; - // This class performs the actions in a delta update synchronously. The delta // update itself should be passed in in chunks as it is received. class DeltaPerformer : public FileWriter { @@ -412,88 +410,6 @@ class DeltaPerformer : public FileWriter { DISALLOW_COPY_AND_ASSIGN(DeltaPerformer); }; -class PartitionWriter { - public: - PartitionWriter(const PartitionUpdate& partition_update, - const InstallPlan::Partition& install_part, - DynamicPartitionControlInterface* dynamic_control, - size_t block_size, - bool is_interactive); - ~PartitionWriter(); - // Compare |calculated_hash| with source hash in |operation|, return false and - // dump hash and set |error| if don't match. - // |source_fd| is the file descriptor of the source partition. - static bool ValidateSourceHash(const brillo::Blob& calculated_hash, - const InstallOperation& operation, - const FileDescriptorPtr source_fd, - ErrorCode* error); - - // Perform necessary initialization work before InstallOperation can be - // applied to this partition - [[nodiscard]] bool Init(const InstallPlan* install_plan, - bool source_may_exist); - - int Close(); - - // These perform a specific type of operation and return true on success. - // |error| will be set if source hash mismatch, otherwise |error| might not be - // set even if it fails. - [[nodiscard]] bool PerformReplaceOperation(const InstallOperation& operation, - const void* data, - size_t count); - [[nodiscard]] bool PerformZeroOrDiscardOperation( - const InstallOperation& operation); - - [[nodiscard]] bool PerformSourceCopyOperation( - const InstallOperation& operation, ErrorCode* error); - [[nodiscard]] bool PerformSourceBsdiffOperation( - const InstallOperation& operation, - ErrorCode* error, - const void* data, - size_t count); - [[nodiscard]] bool PerformPuffDiffOperation(const InstallOperation& operation, - ErrorCode* error, - const void* data, - size_t count); - - private: - friend class PartitionWriterTest; - FRIEND_TEST(PartitionWriterTest, ChooseSourceFDTest); - - bool OpenCurrentECCPartition(); - // For a given operation, choose the source fd to be used (raw device or error - // correction device) based on the source operation hash. - // Returns nullptr if the source hash mismatch cannot be corrected, and set - // the |error| accordingly. - FileDescriptorPtr ChooseSourceFD(const InstallOperation& operation, - ErrorCode* error); - - const PartitionUpdate& partition_update_; - const InstallPlan::Partition& install_part_; - DynamicPartitionControlInterface* dynamic_control_; - std::string source_path_; - std::string target_path_; - FileDescriptorPtr source_fd_; - FileDescriptorPtr target_fd_; - const bool interactive_; - const size_t block_size_; - // File descriptor of the error corrected source partition. Only set while - // updating partition using a delta payload for a partition where error - // correction is available. The size of the error corrected device is smaller - // than the underlying raw device, since it doesn't include the error - // correction blocks. - FileDescriptorPtr source_ecc_fd_{nullptr}; - - // The total number of operations that failed source hash verification but - // passed after falling back to the error-corrected |source_ecc_fd_| device. - uint64_t source_ecc_recovered_failures_{0}; - - // Whether opening the current partition as an error-corrected device failed. - // Used to avoid re-opening the same source partition if it is not actually - // error corrected. - bool source_ecc_open_failure_{false}; -}; - } // namespace chromeos_update_engine #endif // UPDATE_ENGINE_PAYLOAD_CONSUMER_DELTA_PERFORMER_H_ diff --git a/payload_consumer/partition_writer.cc b/payload_consumer/partition_writer.cc new file mode 100644 index 00000000..d47ebee6 --- /dev/null +++ b/payload_consumer/partition_writer.cc @@ -0,0 +1,644 @@ +// +// Copyright (C) 2020 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include + +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "update_engine/common/terminator.h" +#include "update_engine/common/utils.h" +#include "update_engine/payload_consumer/bzip_extent_writer.h" +#include "update_engine/payload_consumer/cached_file_descriptor.h" +#include "update_engine/payload_consumer/extent_reader.h" +#include "update_engine/payload_consumer/extent_writer.h" +#include "update_engine/payload_consumer/fec_file_descriptor.h" +#include "update_engine/payload_consumer/file_descriptor_utils.h" +#include "update_engine/payload_consumer/install_plan.h" +#include "update_engine/payload_consumer/mount_history.h" +#include "update_engine/payload_consumer/payload_constants.h" +#include "update_engine/payload_consumer/xz_extent_writer.h" + +namespace chromeos_update_engine { + +namespace { +constexpr uint64_t kCacheSize = 1024 * 1024; // 1MB + +// Discard the tail of the block device referenced by |fd|, from the offset +// |data_size| until the end of the block device. Returns whether the data was +// discarded. + +bool DiscardPartitionTail(const FileDescriptorPtr& fd, uint64_t data_size) { + uint64_t part_size = fd->BlockDevSize(); + if (!part_size || part_size <= data_size) + return false; + + struct blkioctl_request { + int number; + const char* name; + }; + const std::initializer_list blkioctl_requests = { + {BLKDISCARD, "BLKDISCARD"}, + {BLKSECDISCARD, "BLKSECDISCARD"}, +#ifdef BLKZEROOUT + {BLKZEROOUT, "BLKZEROOUT"}, +#endif + }; + for (const auto& req : blkioctl_requests) { + int error = 0; + if (fd->BlkIoctl(req.number, data_size, part_size - data_size, &error) && + error == 0) { + return true; + } + LOG(WARNING) << "Error discarding the last " + << (part_size - data_size) / 1024 << " KiB using ioctl(" + << req.name << ")"; + } + return false; +} + +} // namespace + +// Opens path for read/write. On success returns an open FileDescriptor +// and sets *err to 0. On failure, sets *err to errno and returns nullptr. +FileDescriptorPtr OpenFile(const char* path, + int mode, + bool cache_writes, + int* err) { + // Try to mark the block device read-only based on the mode. Ignore any + // failure since this won't work when passing regular files. + bool read_only = (mode & O_ACCMODE) == O_RDONLY; + utils::SetBlockDeviceReadOnly(path, read_only); + + FileDescriptorPtr fd(new EintrSafeFileDescriptor()); + if (cache_writes && !read_only) { + fd = FileDescriptorPtr(new CachedFileDescriptor(fd, kCacheSize)); + LOG(INFO) << "Caching writes."; + } + if (!fd->Open(path, mode, 000)) { + *err = errno; + PLOG(ERROR) << "Unable to open file " << path; + return nullptr; + } + *err = 0; + return fd; +} + +class BsdiffExtentFile : public bsdiff::FileInterface { + public: + BsdiffExtentFile(std::unique_ptr reader, size_t size) + : BsdiffExtentFile(std::move(reader), nullptr, size) {} + BsdiffExtentFile(std::unique_ptr writer, size_t size) + : BsdiffExtentFile(nullptr, std::move(writer), size) {} + + ~BsdiffExtentFile() override = default; + + bool Read(void* buf, size_t count, size_t* bytes_read) override { + TEST_AND_RETURN_FALSE(reader_->Read(buf, count)); + *bytes_read = count; + offset_ += count; + return true; + } + + bool Write(const void* buf, size_t count, size_t* bytes_written) override { + TEST_AND_RETURN_FALSE(writer_->Write(buf, count)); + *bytes_written = count; + offset_ += count; + return true; + } + + bool Seek(off_t pos) override { + if (reader_ != nullptr) { + TEST_AND_RETURN_FALSE(reader_->Seek(pos)); + offset_ = pos; + } else { + // For writes technically there should be no change of position, or it + // should be equivalent of current offset. + TEST_AND_RETURN_FALSE(offset_ == static_cast(pos)); + } + return true; + } + + bool Close() override { return true; } + + bool GetSize(uint64_t* size) override { + *size = size_; + return true; + } + + private: + BsdiffExtentFile(std::unique_ptr reader, + std::unique_ptr writer, + size_t size) + : reader_(std::move(reader)), + writer_(std::move(writer)), + size_(size), + offset_(0) {} + + std::unique_ptr reader_; + std::unique_ptr writer_; + uint64_t size_; + uint64_t offset_; + + DISALLOW_COPY_AND_ASSIGN(BsdiffExtentFile); +}; +// A class to be passed to |puffpatch| for reading from |source_fd_| and writing +// into |target_fd_|. +class PuffinExtentStream : public puffin::StreamInterface { + public: + // Constructor for creating a stream for reading from an |ExtentReader|. + PuffinExtentStream(std::unique_ptr reader, uint64_t size) + : PuffinExtentStream(std::move(reader), nullptr, size) {} + + // Constructor for creating a stream for writing to an |ExtentWriter|. + PuffinExtentStream(std::unique_ptr writer, uint64_t size) + : PuffinExtentStream(nullptr, std::move(writer), size) {} + + ~PuffinExtentStream() override = default; + + bool GetSize(uint64_t* size) const override { + *size = size_; + return true; + } + + bool GetOffset(uint64_t* offset) const override { + *offset = offset_; + return true; + } + + bool Seek(uint64_t offset) override { + if (is_read_) { + TEST_AND_RETURN_FALSE(reader_->Seek(offset)); + offset_ = offset; + } else { + // For writes technically there should be no change of position, or it + // should equivalent of current offset. + TEST_AND_RETURN_FALSE(offset_ == offset); + } + return true; + } + + bool Read(void* buffer, size_t count) override { + TEST_AND_RETURN_FALSE(is_read_); + TEST_AND_RETURN_FALSE(reader_->Read(buffer, count)); + offset_ += count; + return true; + } + + bool Write(const void* buffer, size_t count) override { + TEST_AND_RETURN_FALSE(!is_read_); + TEST_AND_RETURN_FALSE(writer_->Write(buffer, count)); + offset_ += count; + return true; + } + + bool Close() override { return true; } + + private: + PuffinExtentStream(std::unique_ptr reader, + std::unique_ptr writer, + uint64_t size) + : reader_(std::move(reader)), + writer_(std::move(writer)), + size_(size), + offset_(0), + is_read_(reader_ ? true : false) {} + + std::unique_ptr reader_; + std::unique_ptr writer_; + uint64_t size_; + uint64_t offset_; + bool is_read_; + + DISALLOW_COPY_AND_ASSIGN(PuffinExtentStream); +}; + +PartitionWriter::PartitionWriter( + const PartitionUpdate& partition_update, + const InstallPlan::Partition& install_part, + DynamicPartitionControlInterface* dynamic_control, + size_t block_size, + bool is_interactive) + : partition_update_(partition_update), + install_part_(install_part), + dynamic_control_(dynamic_control), + interactive_(is_interactive), + block_size_(block_size) {} + +PartitionWriter::~PartitionWriter() { + Close(); +} + +bool PartitionWriter::Init(const InstallPlan* install_plan, + bool source_may_exist) { + const PartitionUpdate& partition = partition_update_; + uint32_t source_slot = install_plan->source_slot; + uint32_t target_slot = install_plan->target_slot; + + // We shouldn't open the source partition in certain cases, e.g. some dynamic + // partitions in delta payload, partitions included in the full payload for + // partial updates. Use the source size as the indicator. + if (source_may_exist && install_part_.source_size > 0) { + source_path_ = install_part_.source_path; + int err; + source_fd_ = OpenFile(source_path_.c_str(), O_RDONLY, false, &err); + if (!source_fd_) { + LOG(ERROR) << "Unable to open source partition " + << partition.partition_name() << " on slot " + << BootControlInterface::SlotName(source_slot) << ", file " + << source_path_; + return false; + } + } + + target_path_ = install_part_.target_path; + int err; + + int flags = O_RDWR; + if (!interactive_) + flags |= O_DSYNC; + + LOG(INFO) << "Opening " << target_path_ << " partition with" + << (interactive_ ? "out" : "") << " O_DSYNC"; + + target_fd_ = OpenFile(target_path_.c_str(), flags, true, &err); + if (!target_fd_) { + LOG(ERROR) << "Unable to open target partition " + << partition.partition_name() << " on slot " + << BootControlInterface::SlotName(target_slot) << ", file " + << target_path_; + return false; + } + + LOG(INFO) << "Applying " << partition.operations().size() + << " operations to partition \"" << partition.partition_name() + << "\""; + + // Discard the end of the partition, but ignore failures. + DiscardPartitionTail(target_fd_, install_part_.target_size); + + return true; +} + +bool PartitionWriter::PerformReplaceOperation(const InstallOperation& operation, + const void* data, + size_t count) { + // Setup the ExtentWriter stack based on the operation type. + std::unique_ptr writer = std::make_unique(); + + if (operation.type() == InstallOperation::REPLACE_BZ) { + writer.reset(new BzipExtentWriter(std::move(writer))); + } else if (operation.type() == InstallOperation::REPLACE_XZ) { + writer.reset(new XzExtentWriter(std::move(writer))); + } + + TEST_AND_RETURN_FALSE( + writer->Init(target_fd_, operation.dst_extents(), block_size_)); + TEST_AND_RETURN_FALSE(writer->Write(data, operation.data_length())); + + return target_fd_->Flush(); +} + +bool PartitionWriter::PerformZeroOrDiscardOperation( + const InstallOperation& operation) { +#ifdef BLKZEROOUT + bool attempt_ioctl = true; + int request = + (operation.type() == InstallOperation::ZERO ? BLKZEROOUT : BLKDISCARD); +#else // !defined(BLKZEROOUT) + bool attempt_ioctl = false; + int request = 0; +#endif // !defined(BLKZEROOUT) + + brillo::Blob zeros; + for (const Extent& extent : operation.dst_extents()) { + const uint64_t start = extent.start_block() * block_size_; + const uint64_t length = extent.num_blocks() * block_size_; + if (attempt_ioctl) { + int result = 0; + if (target_fd_->BlkIoctl(request, start, length, &result) && result == 0) + continue; + attempt_ioctl = false; + } + // In case of failure, we fall back to writing 0 to the selected region. + zeros.resize(16 * block_size_); + for (uint64_t offset = 0; offset < length; offset += zeros.size()) { + uint64_t chunk_length = + std::min(length - offset, static_cast(zeros.size())); + TEST_AND_RETURN_FALSE(utils::PWriteAll( + target_fd_, zeros.data(), chunk_length, start + offset)); + } + } + return target_fd_->Flush(); +} + +bool PartitionWriter::PerformSourceCopyOperation( + const InstallOperation& operation, ErrorCode* error) { + TEST_AND_RETURN_FALSE(source_fd_ != nullptr); + + // The device may optimize the SOURCE_COPY operation. + // Being this a device-specific optimization let DynamicPartitionController + // decide it the operation should be skipped. + const PartitionUpdate& partition = partition_update_; + const auto& partition_control = dynamic_control_; + + InstallOperation buf; + bool should_optimize = partition_control->OptimizeOperation( + partition.partition_name(), operation, &buf); + const InstallOperation& optimized = should_optimize ? buf : operation; + + if (operation.has_src_sha256_hash()) { + bool read_ok; + brillo::Blob source_hash; + brillo::Blob expected_source_hash(operation.src_sha256_hash().begin(), + operation.src_sha256_hash().end()); + + // We fall back to use the error corrected device if the hash of the raw + // device doesn't match or there was an error reading the source partition. + // Note that this code will also fall back if writing the target partition + // fails. + if (should_optimize) { + // Hash operation.src_extents(), then copy optimized.src_extents to + // optimized.dst_extents. + read_ok = + fd_utils::ReadAndHashExtents( + source_fd_, operation.src_extents(), block_size_, &source_hash) && + fd_utils::CopyAndHashExtents(source_fd_, + optimized.src_extents(), + target_fd_, + optimized.dst_extents(), + block_size_, + nullptr /* skip hashing */); + } else { + read_ok = fd_utils::CopyAndHashExtents(source_fd_, + operation.src_extents(), + target_fd_, + operation.dst_extents(), + block_size_, + &source_hash); + } + if (read_ok && expected_source_hash == source_hash) + return true; + LOG(WARNING) << "Source hash from RAW device mismatched, attempting to " + "correct using ECC"; + if (!OpenCurrentECCPartition()) { + // The following function call will return false since the source hash + // mismatches, but we still want to call it so it prints the appropriate + // log message. + return ValidateSourceHash(source_hash, operation, source_fd_, error); + } + + LOG(WARNING) << "Source hash from RAW device mismatched: found " + << base::HexEncode(source_hash.data(), source_hash.size()) + << ", expected " + << base::HexEncode(expected_source_hash.data(), + expected_source_hash.size()); + if (should_optimize) { + TEST_AND_RETURN_FALSE(fd_utils::ReadAndHashExtents( + source_ecc_fd_, operation.src_extents(), block_size_, &source_hash)); + TEST_AND_RETURN_FALSE( + fd_utils::CopyAndHashExtents(source_ecc_fd_, + optimized.src_extents(), + target_fd_, + optimized.dst_extents(), + block_size_, + nullptr /* skip hashing */)); + } else { + TEST_AND_RETURN_FALSE( + fd_utils::CopyAndHashExtents(source_ecc_fd_, + operation.src_extents(), + target_fd_, + operation.dst_extents(), + block_size_, + &source_hash)); + } + TEST_AND_RETURN_FALSE( + ValidateSourceHash(source_hash, operation, source_ecc_fd_, error)); + // At this point reading from the error corrected device worked, but + // reading from the raw device failed, so this is considered a recovered + // failure. + source_ecc_recovered_failures_++; + } else { + // When the operation doesn't include a source hash, we attempt the error + // corrected device first since we can't verify the block in the raw device + // at this point, but we fall back to the raw device since the error + // corrected device can be shorter or not available. + + if (OpenCurrentECCPartition() && + fd_utils::CopyAndHashExtents(source_ecc_fd_, + optimized.src_extents(), + target_fd_, + optimized.dst_extents(), + block_size_, + nullptr)) { + return true; + } + TEST_AND_RETURN_FALSE(fd_utils::CopyAndHashExtents(source_fd_, + optimized.src_extents(), + target_fd_, + optimized.dst_extents(), + block_size_, + nullptr)); + } + return target_fd_->Flush(); +} +bool PartitionWriter::PerformSourceBsdiffOperation( + const InstallOperation& operation, + ErrorCode* error, + const void* data, + size_t count) { + FileDescriptorPtr source_fd = ChooseSourceFD(operation, error); + TEST_AND_RETURN_FALSE(source_fd != nullptr); + + auto reader = std::make_unique(); + TEST_AND_RETURN_FALSE( + reader->Init(source_fd, operation.src_extents(), block_size_)); + auto src_file = std::make_unique( + std::move(reader), + utils::BlocksInExtents(operation.src_extents()) * block_size_); + + auto writer = std::make_unique(); + TEST_AND_RETURN_FALSE( + writer->Init(target_fd_, operation.dst_extents(), block_size_)); + auto dst_file = std::make_unique( + std::move(writer), + utils::BlocksInExtents(operation.dst_extents()) * block_size_); + + TEST_AND_RETURN_FALSE(bsdiff::bspatch(std::move(src_file), + std::move(dst_file), + reinterpret_cast(data), + count) == 0); + return target_fd_->Flush(); +} + +bool PartitionWriter::PerformPuffDiffOperation( + const InstallOperation& operation, + ErrorCode* error, + const void* data, + size_t count) { + FileDescriptorPtr source_fd = ChooseSourceFD(operation, error); + TEST_AND_RETURN_FALSE(source_fd != nullptr); + + auto reader = std::make_unique(); + TEST_AND_RETURN_FALSE( + reader->Init(source_fd, operation.src_extents(), block_size_)); + puffin::UniqueStreamPtr src_stream(new PuffinExtentStream( + std::move(reader), + utils::BlocksInExtents(operation.src_extents()) * block_size_)); + + auto writer = std::make_unique(); + TEST_AND_RETURN_FALSE( + writer->Init(target_fd_, operation.dst_extents(), block_size_)); + puffin::UniqueStreamPtr dst_stream(new PuffinExtentStream( + std::move(writer), + utils::BlocksInExtents(operation.dst_extents()) * block_size_)); + + constexpr size_t kMaxCacheSize = 5 * 1024 * 1024; // Total 5MB cache. + TEST_AND_RETURN_FALSE( + puffin::PuffPatch(std::move(src_stream), + std::move(dst_stream), + reinterpret_cast(data), + count, + kMaxCacheSize)); + return target_fd_->Flush(); +} + +FileDescriptorPtr PartitionWriter::ChooseSourceFD( + const InstallOperation& operation, ErrorCode* error) { + if (source_fd_ == nullptr) { + LOG(ERROR) << "ChooseSourceFD fail: source_fd_ == nullptr"; + return nullptr; + } + + if (!operation.has_src_sha256_hash()) { + // When the operation doesn't include a source hash, we attempt the error + // corrected device first since we can't verify the block in the raw device + // at this point, but we first need to make sure all extents are readable + // since the error corrected device can be shorter or not available. + if (OpenCurrentECCPartition() && + fd_utils::ReadAndHashExtents( + source_ecc_fd_, operation.src_extents(), block_size_, nullptr)) { + return source_ecc_fd_; + } + return source_fd_; + } + + brillo::Blob source_hash; + brillo::Blob expected_source_hash(operation.src_sha256_hash().begin(), + operation.src_sha256_hash().end()); + if (fd_utils::ReadAndHashExtents( + source_fd_, operation.src_extents(), block_size_, &source_hash) && + source_hash == expected_source_hash) { + return source_fd_; + } + // We fall back to use the error corrected device if the hash of the raw + // device doesn't match or there was an error reading the source partition. + if (!OpenCurrentECCPartition()) { + // The following function call will return false since the source hash + // mismatches, but we still want to call it so it prints the appropriate + // log message. + ValidateSourceHash(source_hash, operation, source_fd_, error); + return nullptr; + } + LOG(WARNING) << "Source hash from RAW device mismatched: found " + << base::HexEncode(source_hash.data(), source_hash.size()) + << ", expected " + << base::HexEncode(expected_source_hash.data(), + expected_source_hash.size()); + + if (fd_utils::ReadAndHashExtents( + source_ecc_fd_, operation.src_extents(), block_size_, &source_hash) && + ValidateSourceHash(source_hash, operation, source_ecc_fd_, error)) { + // At this point reading from the error corrected device worked, but + // reading from the raw device failed, so this is considered a recovered + // failure. + source_ecc_recovered_failures_++; + return source_ecc_fd_; + } + return nullptr; +} + +bool PartitionWriter::OpenCurrentECCPartition() { + // No support for ECC for full payloads. + // Full payload should not have any opeartion that requires ECC partitions. + if (source_ecc_fd_) + return true; + + if (source_ecc_open_failure_) + return false; + +#if USE_FEC + const PartitionUpdate& partition = partition_update_; + const InstallPlan::Partition& install_part = install_part_; + std::string path = install_part.source_path; + FileDescriptorPtr fd(new FecFileDescriptor()); + if (!fd->Open(path.c_str(), O_RDONLY, 0)) { + PLOG(ERROR) << "Unable to open ECC source partition " + << partition.partition_name() << ", file " << path; + source_ecc_open_failure_ = true; + return false; + } + source_ecc_fd_ = fd; +#else + // No support for ECC compiled. + source_ecc_open_failure_ = true; +#endif // USE_FEC + + return !source_ecc_open_failure_; +} + +int PartitionWriter::Close() { + int err = 0; + if (source_fd_ && !source_fd_->Close()) { + err = errno; + PLOG(ERROR) << "Error closing source partition"; + if (!err) + err = 1; + } + source_fd_.reset(); + source_path_.clear(); + + if (target_fd_ && !target_fd_->Close()) { + err = errno; + PLOG(ERROR) << "Error closing target partition"; + if (!err) + err = 1; + } + target_fd_.reset(); + target_path_.clear(); + + if (source_ecc_fd_ && !source_ecc_fd_->Close()) { + err = errno; + PLOG(ERROR) << "Error closing ECC source partition"; + if (!err) + err = 1; + } + source_ecc_fd_.reset(); + source_ecc_open_failure_ = false; + return -err; +} +} // namespace chromeos_update_engine diff --git a/payload_consumer/partition_writer.h b/payload_consumer/partition_writer.h new file mode 100644 index 00000000..624a411b --- /dev/null +++ b/payload_consumer/partition_writer.h @@ -0,0 +1,113 @@ +// +// Copyright (C) 2020 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef UPDATE_ENGINE_PARTITION_WRITER_H_ +#define UPDATE_ENGINE_PARTITION_WRITER_H_ + +#include +#include + +#include +#include + +#include "update_engine/common/dynamic_partition_control_interface.h" +#include "update_engine/payload_consumer/file_descriptor.h" +#include "update_engine/payload_consumer/install_plan.h" +#include "update_engine/update_metadata.pb.h" +namespace chromeos_update_engine { +class PartitionWriter { + public: + PartitionWriter(const PartitionUpdate& partition_update, + const InstallPlan::Partition& install_part, + DynamicPartitionControlInterface* dynamic_control, + size_t block_size, + bool is_interactive); + ~PartitionWriter(); + static bool ValidateSourceHash(const brillo::Blob& calculated_hash, + const InstallOperation& operation, + const FileDescriptorPtr source_fd, + ErrorCode* error); + + // Perform necessary initialization work before InstallOperation can be + // applied to this partition + [[nodiscard]] bool Init(const InstallPlan* install_plan, + bool source_may_exist); + + int Close(); + + // These perform a specific type of operation and return true on success. + // |error| will be set if source hash mismatch, otherwise |error| might not be + // set even if it fails. + [[nodiscard]] bool PerformReplaceOperation(const InstallOperation& operation, + const void* data, + size_t count); + [[nodiscard]] bool PerformZeroOrDiscardOperation( + const InstallOperation& operation); + + [[nodiscard]] bool PerformSourceCopyOperation( + const InstallOperation& operation, ErrorCode* error); + [[nodiscard]] bool PerformSourceBsdiffOperation( + const InstallOperation& operation, + ErrorCode* error, + const void* data, + size_t count); + [[nodiscard]] bool PerformPuffDiffOperation(const InstallOperation& operation, + ErrorCode* error, + const void* data, + size_t count); + + private: + friend class PartitionWriterTest; + FRIEND_TEST(PartitionWriterTest, ChooseSourceFDTest); + + bool OpenCurrentECCPartition(); + // For a given operation, choose the source fd to be used (raw device or error + // correction device) based on the source operation hash. + // Returns nullptr if the source hash mismatch cannot be corrected, and set + // the |error| accordingly. + FileDescriptorPtr ChooseSourceFD(const InstallOperation& operation, + ErrorCode* error); + + const PartitionUpdate& partition_update_; + const InstallPlan::Partition& install_part_; + DynamicPartitionControlInterface* dynamic_control_; + // Path to source partition + std::string source_path_; + // Path to target partition + std::string target_path_; + FileDescriptorPtr source_fd_; + FileDescriptorPtr target_fd_; + const bool interactive_; + const size_t block_size_; + // File descriptor of the error corrected source partition. Only set while + // updating partition using a delta payload for a partition where error + // correction is available. The size of the error corrected device is smaller + // than the underlying raw device, since it doesn't include the error + // correction blocks. + FileDescriptorPtr source_ecc_fd_{nullptr}; + + // The total number of operations that failed source hash verification but + // passed after falling back to the error-corrected |source_ecc_fd_| device. + uint64_t source_ecc_recovered_failures_{0}; + + // Whether opening the current partition as an error-corrected device failed. + // Used to avoid re-opening the same source partition if it is not actually + // error corrected. + bool source_ecc_open_failure_{false}; +}; +} // namespace chromeos_update_engine + +#endif From 300a8eb1220ca16074341849804b6f6c9e1d5e10 Mon Sep 17 00:00:00 2001 From: Tianjie Xu Date: Sat, 3 Oct 2020 07:27:00 +0000 Subject: [PATCH 411/624] Revert "Link to libsnapshot_cow where libsnapshot is linked." Revert "libsnapshot: Partially implement OpenSnapshotWriter." Revert "Link to libsnapshot_cow everywhere libsnapshot is linked." Revert submission 1433573-vab-libsnapshot-linkage Reason for revert: b/169981170, update crash for droidfooders. Reverted Changes: Ie75bba98c:Link to libsnapshot_cow where libsnapshot is linke... Ieedfadc55:libsnapshot: Partially implement OpenSnapshotWrite... I28a5d4a88:Link to libsnapshot_cow everywhere libsnapshot is ... Change-Id: Ib71dc9fd3edd7a79d075c60a32ad30f8e589af42 --- Android.bp | 1 - 1 file changed, 1 deletion(-) diff --git a/Android.bp b/Android.bp index 8e9ec17f..008a4322 100644 --- a/Android.bp +++ b/Android.bp @@ -200,7 +200,6 @@ cc_defaults { "libgsi", "libpayload_consumer", "libsnapshot", - "libsnapshot_cow", "update_metadata-protos", ], shared_libs: [ From e331f5a9cb761d0f656872a8639e650ac7da7493 Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Tue, 6 Oct 2020 15:53:29 -0700 Subject: [PATCH 412/624] update_engine: Verify payload hash before its signature The test images don't have public key nor they are signed, so some of the autotests have been failing because they can't verify the rejection behavior of mismatched update payload hash. So we need to check for the hash before checking for the signature. BUG=b:170254160 TEST=test_that chromeos6-row4-rack9-host19.cros autoupdate_OmahaResponse.bad_sha256.full Change-Id: Ice4f9d827fb913e6eec55c922163cba0de98ebb9 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2454830 Tested-by: Amin Hassani Reviewed-by: Tianjie Xu Reviewed-by: Jae Hoon Kim Reviewed-by: Amin Hassani Commit-Queue: Amin Hassani --- payload_consumer/delta_performer.cc | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc index 08eba028..d2ed24ab 100644 --- a/payload_consumer/delta_performer.cc +++ b/payload_consumer/delta_performer.cc @@ -1802,6 +1802,13 @@ ErrorCode DeltaPerformer::VerifyPayload( return ErrorCode::kPayloadSizeMismatchError; } + // Verifies the payload hash. + TEST_AND_RETURN_VAL(ErrorCode::kDownloadPayloadVerificationError, + !payload_hash_calculator_.raw_hash().empty()); + TEST_AND_RETURN_VAL( + ErrorCode::kPayloadHashMismatchError, + payload_hash_calculator_.raw_hash() == update_check_response_hash); + auto [payload_verifier, perform_verification] = CreatePayloadVerifier(); if (!perform_verification) { LOG(WARNING) << "Not verifying signed delta payload -- missing public key."; @@ -1812,13 +1819,6 @@ ErrorCode DeltaPerformer::VerifyPayload( return ErrorCode::kDownloadPayloadPubKeyVerificationError; } - // Verifies the payload hash. - TEST_AND_RETURN_VAL(ErrorCode::kDownloadPayloadVerificationError, - !payload_hash_calculator_.raw_hash().empty()); - TEST_AND_RETURN_VAL( - ErrorCode::kPayloadHashMismatchError, - payload_hash_calculator_.raw_hash() == update_check_response_hash); - TEST_AND_RETURN_VAL(ErrorCode::kSignedDeltaPayloadExpectedError, !signatures_message_data_.empty()); brillo::Blob hash_data = signed_hash_calculator_.raw_hash(); From 309c8a883eeca072bd30c563c22c001fa6e41e2a Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Tue, 6 Oct 2020 18:31:08 -0700 Subject: [PATCH 413/624] Make update_engine_stable_client product_specific. For its dependencies, use vendor_available to indicate it can be used by core and product variants, because product_available is not available. Test: builds Bug: 161563386 Bug: 150902910 Change-Id: I6a951ee3ea91c945a830a32c0a117af4c73bb6b8 --- Android.bp | 7 +++++++ stable/Android.bp | 11 +++++++++-- stable/update_engine_stable_client.cc | 1 - 3 files changed, 16 insertions(+), 3 deletions(-) diff --git a/Android.bp b/Android.bp index 008a4322..9187e675 100644 --- a/Android.bp +++ b/Android.bp @@ -753,6 +753,13 @@ sh_binary { // update_engine header library cc_library_headers { name: "libupdate_engine_headers", + + // This header library is available to core and product modules. + // Right now, vendor_available is the only way to specify this. + // vendor modules should NOT use this library. + // TODO(b/150902910): change this to product_available. + vendor_available: true, + export_include_dirs: ["."], apex_available: [ "com.android.gki.*", diff --git a/stable/Android.bp b/stable/Android.bp index 337ae96e..a415ac5e 100644 --- a/stable/Android.bp +++ b/stable/Android.bp @@ -18,6 +18,13 @@ // ======================================================== aidl_interface { name: "libupdate_engine_stable", + + // This header library is available to core and product modules. + // Right now, vendor_available is the only way to specify this. + // vendor modules should NOT use this library. + // TODO(b/150902910): change this to product_available. + vendor_available: true, + srcs: [ "android/os/IUpdateEngineStable.aidl", "android/os/IUpdateEngineStableCallback.aidl", @@ -40,10 +47,10 @@ aidl_interface { // update_engine_stable_client (type: executable) // ======================================================== -// update_engine console client installed to APEXes +// update_engine console client installed to APEXes. cc_binary { name: "update_engine_stable_client", - + product_specific: true, header_libs: [ "libupdate_engine_headers", ], diff --git a/stable/update_engine_stable_client.cc b/stable/update_engine_stable_client.cc index da203c4c..17f66b67 100644 --- a/stable/update_engine_stable_client.cc +++ b/stable/update_engine_stable_client.cc @@ -32,7 +32,6 @@ #include #include #include -#include namespace chromeos_update_engine::internal { From ce99ee79f77887c5b680b1503490484632201ca7 Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Wed, 7 Oct 2020 22:24:33 -0700 Subject: [PATCH 414/624] update_engine: Fix failing unittests on ARM A few of the unittests fail when running on ARM. Android has already fixed those problems and protected them with __ANDROID__. We can just start using them in Chrome OS too. BUG=b:165005216 TEST=FEATURES=test emerge-gale update_engine Change-Id: I7f8820c2ba81f23144257d78aa6c0c599f645e6a Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2460235 Tested-by: Amin Hassani Reviewed-by: Jae Hoon Kim Commit-Queue: Amin Hassani --- libcurl_http_fetcher.cc | 9 ++++----- libcurl_http_fetcher_unittest.cc | 19 +++---------------- 2 files changed, 7 insertions(+), 21 deletions(-) diff --git a/libcurl_http_fetcher.cc b/libcurl_http_fetcher.cc index bce09209..1599aac1 100644 --- a/libcurl_http_fetcher.cc +++ b/libcurl_http_fetcher.cc @@ -458,10 +458,10 @@ void LibcurlHttpFetcher::CurlPerformOnce() { // There's either more work to do or we are paused, so we just keep the // file descriptors to watch up to date and exit, until we are done with the // work and we are not paused. -#ifdef __ANDROID__ - // When there's no base::SingleThreadTaskRunner on current thread, it's not - // possible to watch file descriptors. Just poll it later. This usually - // happens if brillo::FakeMessageLoop is used. + // + // When there's no |base::SingleThreadTaskRunner| on current thread, it's + // not possible to watch file descriptors. Just poll it later. This usually + // happens if |brillo::FakeMessageLoop| is used. if (!base::ThreadTaskRunnerHandle::IsSet()) { MessageLoop::current()->PostDelayedTask( FROM_HERE, @@ -470,7 +470,6 @@ void LibcurlHttpFetcher::CurlPerformOnce() { TimeDelta::FromSeconds(1)); return; } -#endif SetupMessageLoopSources(); return; } diff --git a/libcurl_http_fetcher_unittest.cc b/libcurl_http_fetcher_unittest.cc index 874ef2e9..5d675707 100644 --- a/libcurl_http_fetcher_unittest.cc +++ b/libcurl_http_fetcher_unittest.cc @@ -100,7 +100,6 @@ TEST_F(LibcurlHttpFetcherTest, CouldNotResolveHostTest) { libcurl_fetcher_.BeginTransfer("https://An-uNres0lvable-uRl.invalid"); -#ifdef __ANDROID__ // It's slower on Android that libcurl handle may not finish within 1 cycle. // Will need to wait for more cycles until it finishes. Original test didn't // correctly handle when we need to re-watch libcurl fds. @@ -108,10 +107,7 @@ TEST_F(LibcurlHttpFetcherTest, CouldNotResolveHostTest) { libcurl_fetcher_.GetAuxiliaryErrorCode() == ErrorCode::kSuccess) { loop_.RunOnce(true); } -#else - // The first time it can't resolve. - loop_.RunOnce(true); -#endif + EXPECT_EQ(libcurl_fetcher_.GetAuxiliaryErrorCode(), ErrorCode::kUnresolvedHostError); @@ -141,7 +137,6 @@ TEST_F(LibcurlHttpFetcherTest, HostResolvedTest) { // easier to mock the part that depends on internet connectivity. libcurl_fetcher_.BeginTransfer("https://An-uNres0lvable-uRl.invalid"); -#ifdef __ANDROID__ // It's slower on Android that libcurl handle may not finish within 1 cycle. // Will need to wait for more cycles until it finishes. Original test didn't // correctly handle when we need to re-watch libcurl fds. @@ -149,10 +144,7 @@ TEST_F(LibcurlHttpFetcherTest, HostResolvedTest) { libcurl_fetcher_.GetAuxiliaryErrorCode() == ErrorCode::kSuccess) { loop_.RunOnce(true); } -#else - // The first time it can't resolve. - loop_.RunOnce(true); -#endif + EXPECT_EQ(libcurl_fetcher_.GetAuxiliaryErrorCode(), ErrorCode::kUnresolvedHostError); @@ -165,7 +157,6 @@ TEST_F(LibcurlHttpFetcherTest, HostResolvedTest) { [this]() { libcurl_fetcher_.http_response_code_ = 0; })); libcurl_fetcher_.transfer_size_ = 10; -#ifdef __ANDROID__ // It's slower on Android that libcurl handle may not finish within 1 cycle. // Will need to wait for more cycles until it finishes. Original test didn't // correctly handle when we need to re-watch libcurl fds. @@ -173,11 +164,7 @@ TEST_F(LibcurlHttpFetcherTest, HostResolvedTest) { ErrorCode::kUnresolvedHostError) { loop_.RunOnce(true); } -#else - // This time the host is resolved. But after that again we can't resolve - // anymore (See above). - loop_.RunOnce(true); -#endif + EXPECT_EQ(libcurl_fetcher_.GetAuxiliaryErrorCode(), ErrorCode::kUnresolvedHostRecovered); From 4b883ea0aceed443fc790e4459af61c27cd162e0 Mon Sep 17 00:00:00 2001 From: Kelvin Zhang Date: Thu, 8 Oct 2020 13:26:44 -0400 Subject: [PATCH 415/624] Make update_device.py script compatible with python3 Test: serve an OTA Change-Id: I2e787db375b8dd4b24bc51dff6fef1002c4b4c5e --- scripts/update_device.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/scripts/update_device.py b/scripts/update_device.py index 1cd4b6ae..756d443f 100755 --- a/scripts/update_device.py +++ b/scripts/update_device.py @@ -17,6 +17,7 @@ """Send an A/B update to an Android device over adb.""" +from __future__ import print_function from __future__ import absolute_import import argparse @@ -305,6 +306,7 @@ def run(self): logging.info('Server Terminated') def StopServer(self): + self._httpd.shutdown() self._httpd.socket.close() @@ -318,13 +320,13 @@ def AndroidUpdateCommand(ota_filename, secondary, payload_url, extra_headers): """Return the command to run to start the update in the Android device.""" ota = AndroidOTAPackage(ota_filename, secondary) headers = ota.properties - headers += 'USER_AGENT=Dalvik (something, something)\n' - headers += 'NETWORK_ID=0\n' - headers += extra_headers + headers += b'USER_AGENT=Dalvik (something, something)\n' + headers += b'NETWORK_ID=0\n' + headers += extra_headers.encode() return ['update_engine_client', '--update', '--follow', '--payload=%s' % payload_url, '--offset=%d' % ota.offset, - '--size=%d' % ota.size, '--headers="%s"' % headers] + '--size=%d' % ota.size, '--headers="%s"' % headers.decode()] def OmahaUpdateCommand(omaha_url): From 94f51cc3ffe2e20d7ef523531aa147f9a2cda3a3 Mon Sep 17 00:00:00 2001 From: Kelvin Zhang Date: Fri, 25 Sep 2020 11:34:49 -0400 Subject: [PATCH 416/624] Add a skeleton class for VABCPartitionWriter Bug: 168554689 Test: treehugger Change-Id: Ib73084b779620db47d2ff4fbb6d87f38047e9211 --- Android.bp | 3 + BUILD.gn | 2 + payload_consumer/delta_performer.cc | 20 +++++-- payload_consumer/delta_performer.h | 23 +++++--- payload_consumer/partition_writer.cc | 26 ++++++--- payload_consumer/partition_writer.h | 44 +++++++++----- .../partition_writer_factory_android.cc | 54 +++++++++++++++++ .../partition_writer_factory_chromeos.cc | 38 ++++++++++++ payload_consumer/vabc_partition_writer.cc | 58 +++++++++++++++++++ payload_consumer/vabc_partition_writer.h | 51 ++++++++++++++++ 10 files changed, 284 insertions(+), 35 deletions(-) create mode 100644 payload_consumer/partition_writer_factory_android.cc create mode 100644 payload_consumer/partition_writer_factory_chromeos.cc create mode 100644 payload_consumer/vabc_partition_writer.cc create mode 100644 payload_consumer/vabc_partition_writer.h diff --git a/Android.bp b/Android.bp index 9187e675..27ba172b 100644 --- a/Android.bp +++ b/Android.bp @@ -124,6 +124,7 @@ cc_defaults { "libfec_rs", "libpuffpatch", "libverity_tree", + "libsnapshot_cow", ], shared_libs: [ "libbase", @@ -179,6 +180,8 @@ cc_library_static { "payload_consumer/payload_metadata.cc", "payload_consumer/payload_verifier.cc", "payload_consumer/partition_writer.cc", + "payload_consumer/partition_writer_factory_android.cc", + "payload_consumer/vabc_partition_writer.cc", "payload_consumer/postinstall_runner_action.cc", "payload_consumer/verity_writer_android.cc", "payload_consumer/xz_extent_writer.cc", diff --git a/BUILD.gn b/BUILD.gn index b7de9fc9..9575fab7 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -160,6 +160,8 @@ static_library("libpayload_consumer") { "payload_consumer/install_plan.cc", "payload_consumer/mount_history.cc", "payload_consumer/partition_update_generator_stub.cc", + "payload_consumer/partition_writer_factory_chromeos.cc", + "payload_consumer/partition_writer.cc", "payload_consumer/payload_constants.cc", "payload_consumer/payload_metadata.cc", "payload_consumer/payload_verifier.cc", diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc index 87fc4cfc..9bf6d7ed 100644 --- a/payload_consumer/delta_performer.cc +++ b/payload_consumer/delta_performer.cc @@ -241,13 +241,14 @@ bool DeltaPerformer::OpenCurrentPartition() { install_plan_->partitions.size() - partitions_.size(); const InstallPlan::Partition& install_part = install_plan_->partitions[num_previous_partitions + current_partition_]; - partition_writer_ = std::make_unique( + auto dynamic_control = boot_control_->GetDynamicPartitionControl(); + partition_writer_ = partition_writer::CreatePartitionWriter( partition, install_part, - boot_control_->GetDynamicPartitionControl(), + dynamic_control, block_size_, - interactive_); - + interactive_, + IsDynamicPartition(install_part.name)); // Open source fds if we have a delta payload, or for partitions in the // partial update. bool source_may_exist = manifest_.partial_update() || @@ -642,6 +643,11 @@ bool DeltaPerformer::ParseManifestPartitions(ErrorCode* error) { } } + auto dynamic_control = boot_control_->GetDynamicPartitionControl(); + CHECK_NE(dynamic_control, nullptr); + TEST_AND_RETURN_FALSE(dynamic_control->ListDynamicPartitionsForSlot( + install_plan_->target_slot, &dynamic_partitions_)); + // Partitions in manifest are no longer needed after preparing partitions. manifest_.clear_partitions(); // TODO(xunchang) TBD: allow partial update only on devices with dynamic @@ -1503,4 +1509,10 @@ bool DeltaPerformer::PrimeUpdateState() { return true; } +bool DeltaPerformer::IsDynamicPartition(const std::string& part_name) { + return std::find(dynamic_partitions_.begin(), + dynamic_partitions_.end(), + part_name) != dynamic_partitions_.end(); +} + } // namespace chromeos_update_engine diff --git a/payload_consumer/delta_performer.h b/payload_consumer/delta_performer.h index d44f6c20..96bc8496 100644 --- a/payload_consumer/delta_performer.h +++ b/payload_consumer/delta_performer.h @@ -299,6 +299,8 @@ class DeltaPerformer : public FileWriter { // a generic error on the device. ErrorCode CheckTimestampError() const; + // Check if partition `part_name` is a dynamic partition. + bool IsDynamicPartition(const std::string& part_name); // Update Engine preference store. PrefsInterface* prefs_; @@ -336,22 +338,22 @@ class DeltaPerformer : public FileWriter { // otherwise 0. size_t num_total_operations_{0}; - // The list of partitions to update as found in the manifest major version 2. - // When parsing an older manifest format, the information is converted over to - // this format instead. + // The list of partitions to update as found in the manifest major + // version 2. When parsing an older manifest format, the information is + // converted over to this format instead. std::vector partitions_; // Index in the list of partitions (|partitions_| member) of the current // partition being processed. size_t current_partition_{0}; - // Index of the next operation to perform in the manifest. The index is linear - // on the total number of operation on the manifest. + // Index of the next operation to perform in the manifest. The index is + // linear on the total number of operation on the manifest. size_t next_operation_num_{0}; // A buffer used for accumulating downloaded data. Initially, it stores the - // payload metadata; once that's downloaded and parsed, it stores data for the - // next update operation. + // payload metadata; once that's downloaded and parsed, it stores data for + // the next update operation. brillo::Blob buffer_; // Offset of buffer_ in the binary blobs section of the update. uint64_t buffer_offset_{0}; @@ -393,8 +395,9 @@ class DeltaPerformer : public FileWriter { // If |true|, the update is user initiated (vs. periodic update checks). bool interactive_{false}; - // The timeout after which we should force emitting a progress log (constant), - // and the actual point in time for the next forced log to be emitted. + // The timeout after which we should force emitting a progress log + // (constant), and the actual point in time for the next forced log to be + // emitted. const base::TimeDelta forced_progress_log_wait_{ base::TimeDelta::FromSeconds(kProgressLogTimeoutSeconds)}; base::TimeTicks forced_progress_log_time_; @@ -407,6 +410,8 @@ class DeltaPerformer : public FileWriter { std::unique_ptr partition_writer_; + // List of dynamic partitions on device. + std::vector dynamic_partitions_; DISALLOW_COPY_AND_ASSIGN(DeltaPerformer); }; diff --git a/payload_consumer/partition_writer.cc b/payload_consumer/partition_writer.cc index d47ebee6..b4b869cd 100644 --- a/payload_consumer/partition_writer.cc +++ b/payload_consumer/partition_writer.cc @@ -308,7 +308,7 @@ bool PartitionWriter::PerformReplaceOperation(const InstallOperation& operation, const void* data, size_t count) { // Setup the ExtentWriter stack based on the operation type. - std::unique_ptr writer = std::make_unique(); + std::unique_ptr writer = CreateBaseExtentWriter(); if (operation.type() == InstallOperation::REPLACE_BZ) { writer.reset(new BzipExtentWriter(std::move(writer))); @@ -320,7 +320,7 @@ bool PartitionWriter::PerformReplaceOperation(const InstallOperation& operation, writer->Init(target_fd_, operation.dst_extents(), block_size_)); TEST_AND_RETURN_FALSE(writer->Write(data, operation.data_length())); - return target_fd_->Flush(); + return Flush(); } bool PartitionWriter::PerformZeroOrDiscardOperation( @@ -353,7 +353,7 @@ bool PartitionWriter::PerformZeroOrDiscardOperation( target_fd_, zeros.data(), chunk_length, start + offset)); } } - return target_fd_->Flush(); + return Flush(); } bool PartitionWriter::PerformSourceCopyOperation( @@ -464,8 +464,9 @@ bool PartitionWriter::PerformSourceCopyOperation( block_size_, nullptr)); } - return target_fd_->Flush(); + return Flush(); } + bool PartitionWriter::PerformSourceBsdiffOperation( const InstallOperation& operation, ErrorCode* error, @@ -481,7 +482,7 @@ bool PartitionWriter::PerformSourceBsdiffOperation( std::move(reader), utils::BlocksInExtents(operation.src_extents()) * block_size_); - auto writer = std::make_unique(); + auto writer = CreateBaseExtentWriter(); TEST_AND_RETURN_FALSE( writer->Init(target_fd_, operation.dst_extents(), block_size_)); auto dst_file = std::make_unique( @@ -492,7 +493,7 @@ bool PartitionWriter::PerformSourceBsdiffOperation( std::move(dst_file), reinterpret_cast(data), count) == 0); - return target_fd_->Flush(); + return Flush(); } bool PartitionWriter::PerformPuffDiffOperation( @@ -510,7 +511,7 @@ bool PartitionWriter::PerformPuffDiffOperation( std::move(reader), utils::BlocksInExtents(operation.src_extents()) * block_size_)); - auto writer = std::make_unique(); + auto writer = CreateBaseExtentWriter(); TEST_AND_RETURN_FALSE( writer->Init(target_fd_, operation.dst_extents(), block_size_)); puffin::UniqueStreamPtr dst_stream(new PuffinExtentStream( @@ -524,7 +525,7 @@ bool PartitionWriter::PerformPuffDiffOperation( reinterpret_cast(data), count, kMaxCacheSize)); - return target_fd_->Flush(); + return Flush(); } FileDescriptorPtr PartitionWriter::ChooseSourceFD( @@ -641,4 +642,13 @@ int PartitionWriter::Close() { source_ecc_open_failure_ = false; return -err; } + +std::unique_ptr PartitionWriter::CreateBaseExtentWriter() { + return std::make_unique(); +} + +bool PartitionWriter::Flush() { + return target_fd_->Flush(); +} + } // namespace chromeos_update_engine diff --git a/payload_consumer/partition_writer.h b/payload_consumer/partition_writer.h index 624a411b..1acbddcb 100644 --- a/payload_consumer/partition_writer.h +++ b/payload_consumer/partition_writer.h @@ -18,12 +18,14 @@ #define UPDATE_ENGINE_PARTITION_WRITER_H_ #include +#include #include #include #include #include "update_engine/common/dynamic_partition_control_interface.h" +#include "update_engine/payload_consumer/extent_writer.h" #include "update_engine/payload_consumer/file_descriptor.h" #include "update_engine/payload_consumer/install_plan.h" #include "update_engine/update_metadata.pb.h" @@ -35,7 +37,7 @@ class PartitionWriter { DynamicPartitionControlInterface* dynamic_control, size_t block_size, bool is_interactive); - ~PartitionWriter(); + virtual ~PartitionWriter(); static bool ValidateSourceHash(const brillo::Blob& calculated_hash, const InstallOperation& operation, const FileDescriptorPtr source_fd, @@ -43,33 +45,34 @@ class PartitionWriter { // Perform necessary initialization work before InstallOperation can be // applied to this partition - [[nodiscard]] bool Init(const InstallPlan* install_plan, - bool source_may_exist); + [[nodiscard]] virtual bool Init(const InstallPlan* install_plan, + bool source_may_exist); int Close(); // These perform a specific type of operation and return true on success. // |error| will be set if source hash mismatch, otherwise |error| might not be // set even if it fails. - [[nodiscard]] bool PerformReplaceOperation(const InstallOperation& operation, - const void* data, - size_t count); - [[nodiscard]] bool PerformZeroOrDiscardOperation( + [[nodiscard]] virtual bool PerformReplaceOperation( + const InstallOperation& operation, const void* data, size_t count); + [[nodiscard]] virtual bool PerformZeroOrDiscardOperation( const InstallOperation& operation); - [[nodiscard]] bool PerformSourceCopyOperation( + [[nodiscard]] virtual bool PerformSourceCopyOperation( const InstallOperation& operation, ErrorCode* error); - [[nodiscard]] bool PerformSourceBsdiffOperation( + [[nodiscard]] virtual bool PerformSourceBsdiffOperation( const InstallOperation& operation, ErrorCode* error, const void* data, size_t count); - [[nodiscard]] bool PerformPuffDiffOperation(const InstallOperation& operation, - ErrorCode* error, - const void* data, - size_t count); + [[nodiscard]] virtual bool PerformPuffDiffOperation( + const InstallOperation& operation, + ErrorCode* error, + const void* data, + size_t count); + [[nodiscard]] virtual bool Flush(); - private: + protected: friend class PartitionWriterTest; FRIEND_TEST(PartitionWriterTest, ChooseSourceFDTest); @@ -80,6 +83,7 @@ class PartitionWriter { // the |error| accordingly. FileDescriptorPtr ChooseSourceFD(const InstallOperation& operation, ErrorCode* error); + [[nodiscard]] virtual std::unique_ptr CreateBaseExtentWriter(); const PartitionUpdate& partition_update_; const InstallPlan::Partition& install_part_; @@ -108,6 +112,18 @@ class PartitionWriter { // error corrected. bool source_ecc_open_failure_{false}; }; + +namespace partition_writer { +// Return a PartitionWriter instance for perform InstallOps on this partition. +// Uses VABCPartitionWriter for Virtual AB Compression +std::unique_ptr CreatePartitionWriter( + const PartitionUpdate& partition_update, + const InstallPlan::Partition& install_part, + DynamicPartitionControlInterface* dynamic_control, + size_t block_size, + bool is_interactive, + bool is_dynamic_partition); +} // namespace partition_writer } // namespace chromeos_update_engine #endif diff --git a/payload_consumer/partition_writer_factory_android.cc b/payload_consumer/partition_writer_factory_android.cc new file mode 100644 index 00000000..0c9f7ea2 --- /dev/null +++ b/payload_consumer/partition_writer_factory_android.cc @@ -0,0 +1,54 @@ +// +// Copyright (C) 2020 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include +#include + +#include + +#include "update_engine/payload_consumer/vabc_partition_writer.h" + +namespace chromeos_update_engine::partition_writer { + +std::unique_ptr CreatePartitionWriter( + const PartitionUpdate& partition_update, + const InstallPlan::Partition& install_part, + DynamicPartitionControlInterface* dynamic_control, + size_t block_size, + bool is_interactive, + bool is_dynamic_partition) { + if (dynamic_control && + dynamic_control->GetVirtualAbCompressionFeatureFlag().IsEnabled() && + is_dynamic_partition) { + LOG(INFO) + << "Virtual AB Compression Enabled, using VABC Partition Writer for `" + << install_part.name << '`'; + return std::make_unique(partition_update, + install_part, + dynamic_control, + block_size, + is_interactive); + } else { + LOG(INFO) << "Virtual AB Compression disabled, using Partition Writer for `" + << install_part.name << '`'; + return std::make_unique(partition_update, + install_part, + dynamic_control, + block_size, + is_interactive); + } +} +} // namespace chromeos_update_engine::partition_writer diff --git a/payload_consumer/partition_writer_factory_chromeos.cc b/payload_consumer/partition_writer_factory_chromeos.cc new file mode 100644 index 00000000..609f0431 --- /dev/null +++ b/payload_consumer/partition_writer_factory_chromeos.cc @@ -0,0 +1,38 @@ +// +// Copyright (C) 2020 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include +#include + +#include + +#include "update_engine/payload_consumer/partition_writer.h" + +namespace chromeos_update_engine::partition_writer { +std::unique_ptr CreatePartitionWriter( + const PartitionUpdate& partition_update, + const InstallPlan::Partition& install_part, + DynamicPartitionControlInterface* dynamic_control, + size_t block_size, + bool is_interactive, + bool is_dynamic_partition) { + return std::make_unique(partition_update, + install_part, + dynamic_control, + block_size, + is_interactive); +} +} // namespace chromeos_update_engine::partition_writer diff --git a/payload_consumer/vabc_partition_writer.cc b/payload_consumer/vabc_partition_writer.cc new file mode 100644 index 00000000..ab4897fc --- /dev/null +++ b/payload_consumer/vabc_partition_writer.cc @@ -0,0 +1,58 @@ +// +// Copyright (C) 2020 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include "update_engine/payload_consumer/vabc_partition_writer.h" + +#include + +#include + +#include "update_engine/common/utils.h" +#include "update_engine/payload_consumer/extent_writer.h" +#include "update_engine/payload_consumer/install_plan.h" +#include "update_engine/payload_consumer/partition_writer.h" + +namespace chromeos_update_engine { +bool VABCPartitionWriter::Init(const InstallPlan* install_plan, + bool source_may_exist) { + TEST_AND_RETURN_FALSE(PartitionWriter::Init(install_plan, source_may_exist)); + + // TODO(zhangkelvin) Add code specific to VABC. E.x. Convert InstallOps to + // CowOps, perform all SOURCE_COPY upfront according to merge sequence. + return true; +} + +std::unique_ptr VABCPartitionWriter::CreateBaseExtentWriter() { + // TODO(zhangkelvin) Return a SnapshotExtentWriter + return std::make_unique(); +} + +[[nodiscard]] bool VABCPartitionWriter::PerformZeroOrDiscardOperation( + const InstallOperation& operation) { + // TODO(zhangkelvin) Create a COW_ZERO operation and send it to CowWriter + return PartitionWriter::PerformZeroOrDiscardOperation(operation); +} + +[[nodiscard]] bool VABCPartitionWriter::PerformSourceCopyOperation( + const InstallOperation& operation, ErrorCode* error) { + // TODO(zhangkelvin) Probably just ignore SOURCE_COPY? They should be taken + // care of during Init(); + return true; +} + +VABCPartitionWriter::~VABCPartitionWriter() = default; + +} // namespace chromeos_update_engine diff --git a/payload_consumer/vabc_partition_writer.h b/payload_consumer/vabc_partition_writer.h new file mode 100644 index 00000000..034fb57a --- /dev/null +++ b/payload_consumer/vabc_partition_writer.h @@ -0,0 +1,51 @@ +// +// Copyright (C) 2020 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef UPDATE_ENGINE_VABC_PARTITION_WRITER_H_ +#define UPDATE_ENGINE_VABC_PARTITION_WRITER_H_ + +#include + +#include + +#include "update_engine/payload_consumer/install_plan.h" +#include "update_engine/payload_consumer/partition_writer.h" + +namespace chromeos_update_engine { +class VABCPartitionWriter final : public PartitionWriter { + public: + using PartitionWriter::PartitionWriter; + [[nodiscard]] bool Init(const InstallPlan* install_plan, + bool source_may_exist) override; + ~VABCPartitionWriter() override; + + [[nodiscard]] std::unique_ptr CreateBaseExtentWriter() override; + + // Only ZERO and SOURCE_COPY InstallOperations are treated special by VABC + // Partition Writer. These operations correspond to COW_ZERO and COW_COPY. All + // other operations just get converted to COW_REPLACE. + [[nodiscard]] bool PerformZeroOrDiscardOperation( + const InstallOperation& operation) override; + [[nodiscard]] bool PerformSourceCopyOperation( + const InstallOperation& operation, ErrorCode* error) override; + + private: + std::unique_ptr cow_writer_; +}; + +} // namespace chromeos_update_engine + +#endif From 852f57d536a8541f080e1f1feafa9e5c32dc73a5 Mon Sep 17 00:00:00 2001 From: Vyshu Date: Fri, 9 Oct 2020 17:35:14 +0000 Subject: [PATCH 417/624] update engine: clang formatting Fix clang formatting issues for all .cc, .h, and .proto files in update engine. ~/trunk/src/chromium/src/buildtools/linux64/clang-format -i \ -style=file $(find update_engine -name '*.h' -o -name '*.cc' \ -o -name '*.cpp' -o -name '*.c') BUG=b:169679497 TEST=CQ pass Change-Id: I4fde01d3e734dbffaa2c7e7b667503d310abccae Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2462840 Reviewed-by: Amin Hassani Reviewed-by: Jae Hoon Kim Tested-by: Vyshu Khota Commit-Queue: Vyshu Khota --- boot_control_chromeos.cc | 5 +++-- common/subprocess.h | 2 +- omaha_request_params.h | 4 ++-- payload_consumer/install_plan.h | 4 ++-- payload_generator/extent_ranges_unittest.cc | 4 ++-- payload_generator/generate_delta_main.cc | 1 - test_http_server.cc | 2 +- update_metadata.proto | 8 ++++---- 8 files changed, 15 insertions(+), 15 deletions(-) diff --git a/boot_control_chromeos.cc b/boot_control_chromeos.cc index 95456f00..da2c8916 100644 --- a/boot_control_chromeos.cc +++ b/boot_control_chromeos.cc @@ -128,8 +128,9 @@ bool BootControlChromeOS::Init() { } if (current_slot_ >= num_slots_) { LOG(ERROR) << "Couldn't find the slot number corresponding to the " - << "partition " << boot_device << ", number of slots: " - << num_slots_ << ". This device is not updateable."; + << "partition " << boot_device + << ", number of slots: " << num_slots_ + << ". This device is not updateable."; num_slots_ = 1; current_slot_ = BootControlInterface::kInvalidSlot; return false; diff --git a/common/subprocess.h b/common/subprocess.h index 179a5c51..2ed8b81f 100644 --- a/common/subprocess.h +++ b/common/subprocess.h @@ -37,7 +37,7 @@ #include #include #endif // __CHROMEOS__ -#include // for FRIEND_TEST +#include // The Subprocess class is a singleton. It's used to spawn off a subprocess // and get notified when the subprocess exits. The result of Exec() can diff --git a/omaha_request_params.h b/omaha_request_params.h index 5d30d583..1e9ab7d6 100644 --- a/omaha_request_params.h +++ b/omaha_request_params.h @@ -384,8 +384,8 @@ class OmahaRequestParams { // TODO(b:133324571) tracks removal of this field once it is no longer // needed in AU requests. Remove by October 1st 2019. std::string device_requisition_; // Chrome OS Requisition type. - bool delta_okay_; // If this client can accept a delta - bool interactive_; // Whether this is a user-initiated update check + bool delta_okay_; // If this client can accept a delta + bool interactive_; // Whether this is a user-initiated update check // The URL to send the Omaha request to. std::string update_url_; diff --git a/payload_consumer/install_plan.h b/payload_consumer/install_plan.h index f04c6504..5534fb33 100644 --- a/payload_consumer/install_plan.h +++ b/payload_consumer/install_plan.h @@ -59,8 +59,8 @@ struct InstallPlan { struct Payload { std::vector payload_urls; // URLs to download the payload - uint64_t size = 0; // size of the payload - uint64_t metadata_size = 0; // size of the metadata + uint64_t size = 0; // size of the payload + uint64_t metadata_size = 0; // size of the metadata std::string metadata_signature; // signature of the metadata in base64 brillo::Blob hash; // SHA256 hash of the payload InstallPayloadType type{InstallPayloadType::kUnknown}; diff --git a/payload_generator/extent_ranges_unittest.cc b/payload_generator/extent_ranges_unittest.cc index 326e9360..f55bb737 100644 --- a/payload_generator/extent_ranges_unittest.cc +++ b/payload_generator/extent_ranges_unittest.cc @@ -52,8 +52,8 @@ void ExpectRangeEq(const ExtentRanges& ranges, } } -#define EXPECT_RANGE_EQ(ranges, var) \ - do { \ +#define EXPECT_RANGE_EQ(ranges, var) \ + do { \ ExpectRangeEq(ranges, var, base::size(var), __LINE__); \ } while (0) diff --git a/payload_generator/generate_delta_main.cc b/payload_generator/generate_delta_main.cc index 7d62e776..1944847e 100644 --- a/payload_generator/generate_delta_main.cc +++ b/payload_generator/generate_delta_main.cc @@ -74,7 +74,6 @@ void ParseSignatureSizes(const string& signature_sizes_flag, } } - void CalculateHashForSigning(const vector& sizes, const string& out_hash_file, const string& out_metadata_hash_file, diff --git a/test_http_server.cc b/test_http_server.cc index cf6f10db..a2f1e052 100644 --- a/test_http_server.cc +++ b/test_http_server.cc @@ -190,7 +190,7 @@ ssize_t WriteHeaders(int fd, string("HTTP/1.1 ") + Itoa(return_code) + " " + GetHttpResponseDescription(return_code) + EOL "Content-Type: application/octet-stream" EOL - "Connection: close" EOL); + "Connection: close" EOL); if (ret < 0) return -1; written += ret; diff --git a/update_metadata.proto b/update_metadata.proto index 8c63ee71..99bfa847 100644 --- a/update_metadata.proto +++ b/update_metadata.proto @@ -173,15 +173,15 @@ message InstallOperation { BSDIFF = 3 [deprecated = true]; // The data is a bsdiff binary diff. // On minor version 2 or newer, these operations are supported: - SOURCE_COPY = 4; // Copy from source to target partition - SOURCE_BSDIFF = 5; // Like BSDIFF, but read from source partition + SOURCE_COPY = 4; // Copy from source to target partition + SOURCE_BSDIFF = 5; // Like BSDIFF, but read from source partition // On minor version 3 or newer and on major version 2 or newer, these // operations are supported: - REPLACE_XZ = 8; // Replace destination extents w/ attached xz data. + REPLACE_XZ = 8; // Replace destination extents w/ attached xz data. // On minor version 4 or newer, these operations are supported: - ZERO = 6; // Write zeros in the destination. + ZERO = 6; // Write zeros in the destination. DISCARD = 7; // Discard the destination blocks, reading as undefined. BROTLI_BSDIFF = 10; // Like SOURCE_BSDIFF, but compressed with brotli. From 7d64e28e6ddd726cefb050727f94c64185573991 Mon Sep 17 00:00:00 2001 From: Kelvin Zhang Date: Wed, 2 Sep 2020 15:27:34 -0400 Subject: [PATCH 418/624] Add estimate_cow_size field to update metadata Part of VAB project. When installing update through Virtual AB it is nice to have an estimate of COW image size. When generating an OTA update, delta_generator will fill in this field. Later on libsnapshot will use this as an hint when creating snapshots. Test: mm -j Change-Id: I56aa9dfa5e9d955f971a6464aea433b569cd9307 --- scripts/update_payload/update_metadata_pb2.py | 109 ++++++++++++++++-- update_metadata.proto | 5 + 2 files changed, 105 insertions(+), 9 deletions(-) diff --git a/scripts/update_payload/update_metadata_pb2.py b/scripts/update_payload/update_metadata_pb2.py index 841cd22b..ea4bc59c 100644 --- a/scripts/update_payload/update_metadata_pb2.py +++ b/scripts/update_payload/update_metadata_pb2.py @@ -18,7 +18,7 @@ package='chromeos_update_engine', syntax='proto2', serialized_options=b'H\003', - serialized_pb=b'\n\x15update_metadata.proto\x12\x16\x63hromeos_update_engine\"1\n\x06\x45xtent\x12\x13\n\x0bstart_block\x18\x01 \x01(\x04\x12\x12\n\nnum_blocks\x18\x02 \x01(\x04\"\x9f\x01\n\nSignatures\x12@\n\nsignatures\x18\x01 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x1aO\n\tSignature\x12\x13\n\x07version\x18\x01 \x01(\rB\x02\x18\x01\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\x12\x1f\n\x17unpadded_signature_size\x18\x03 \x01(\x07\"+\n\rPartitionInfo\x12\x0c\n\x04size\x18\x01 \x01(\x04\x12\x0c\n\x04hash\x18\x02 \x01(\x0c\"w\n\tImageInfo\x12\r\n\x05\x62oard\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\x0f\n\x07\x63hannel\x18\x03 \x01(\t\x12\x0f\n\x07version\x18\x04 \x01(\t\x12\x15\n\rbuild_channel\x18\x05 \x01(\t\x12\x15\n\rbuild_version\x18\x06 \x01(\t\"\xee\x03\n\x10InstallOperation\x12;\n\x04type\x18\x01 \x02(\x0e\x32-.chromeos_update_engine.InstallOperation.Type\x12\x13\n\x0b\x64\x61ta_offset\x18\x02 \x01(\x04\x12\x13\n\x0b\x64\x61ta_length\x18\x03 \x01(\x04\x12\x33\n\x0bsrc_extents\x18\x04 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\nsrc_length\x18\x05 \x01(\x04\x12\x33\n\x0b\x64st_extents\x18\x06 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\ndst_length\x18\x07 \x01(\x04\x12\x18\n\x10\x64\x61ta_sha256_hash\x18\x08 \x01(\x0c\x12\x17\n\x0fsrc_sha256_hash\x18\t \x01(\x0c\"\xad\x01\n\x04Type\x12\x0b\n\x07REPLACE\x10\x00\x12\x0e\n\nREPLACE_BZ\x10\x01\x12\x0c\n\x04MOVE\x10\x02\x1a\x02\x08\x01\x12\x0e\n\x06\x42SDIFF\x10\x03\x1a\x02\x08\x01\x12\x0f\n\x0bSOURCE_COPY\x10\x04\x12\x11\n\rSOURCE_BSDIFF\x10\x05\x12\x0e\n\nREPLACE_XZ\x10\x08\x12\x08\n\x04ZERO\x10\x06\x12\x0b\n\x07\x44ISCARD\x10\x07\x12\x11\n\rBROTLI_BSDIFF\x10\n\x12\x0c\n\x08PUFFDIFF\x10\t\"\xe8\x05\n\x0fPartitionUpdate\x12\x16\n\x0epartition_name\x18\x01 \x02(\t\x12\x17\n\x0frun_postinstall\x18\x02 \x01(\x08\x12\x18\n\x10postinstall_path\x18\x03 \x01(\t\x12\x17\n\x0f\x66ilesystem_type\x18\x04 \x01(\t\x12M\n\x17new_partition_signature\x18\x05 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x12\x41\n\x12old_partition_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x41\n\x12new_partition_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12<\n\noperations\x18\x08 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x1c\n\x14postinstall_optional\x18\t \x01(\x08\x12=\n\x15hash_tree_data_extent\x18\n \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x38\n\x10hash_tree_extent\x18\x0b \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x1b\n\x13hash_tree_algorithm\x18\x0c \x01(\t\x12\x16\n\x0ehash_tree_salt\x18\r \x01(\x0c\x12\x37\n\x0f\x66\x65\x63_data_extent\x18\x0e \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x32\n\nfec_extent\x18\x0f \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x14\n\tfec_roots\x18\x10 \x01(\r:\x01\x32\x12\x0f\n\x07version\x18\x11 \x01(\t\"L\n\x15\x44ynamicPartitionGroup\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\x0c\n\x04size\x18\x02 \x01(\x04\x12\x17\n\x0fpartition_names\x18\x03 \x03(\t\"s\n\x18\x44ynamicPartitionMetadata\x12=\n\x06groups\x18\x01 \x03(\x0b\x32-.chromeos_update_engine.DynamicPartitionGroup\x12\x18\n\x10snapshot_enabled\x18\x02 \x01(\x08\"\xe1\x06\n\x14\x44\x65ltaArchiveManifest\x12H\n\x12install_operations\x18\x01 \x03(\x0b\x32(.chromeos_update_engine.InstallOperationB\x02\x18\x01\x12O\n\x19kernel_install_operations\x18\x02 \x03(\x0b\x32(.chromeos_update_engine.InstallOperationB\x02\x18\x01\x12\x18\n\nblock_size\x18\x03 \x01(\r:\x04\x34\x30\x39\x36\x12\x19\n\x11signatures_offset\x18\x04 \x01(\x04\x12\x17\n\x0fsignatures_size\x18\x05 \x01(\x04\x12\x42\n\x0fold_kernel_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x42\n\x0fnew_kernel_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x42\n\x0fold_rootfs_info\x18\x08 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x42\n\x0fnew_rootfs_info\x18\t \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x39\n\x0eold_image_info\x18\n \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x39\n\x0enew_image_info\x18\x0b \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x18\n\rminor_version\x18\x0c \x01(\r:\x01\x30\x12;\n\npartitions\x18\r \x03(\x0b\x32\'.chromeos_update_engine.PartitionUpdate\x12\x15\n\rmax_timestamp\x18\x0e \x01(\x03\x12T\n\x1a\x64ynamic_partition_metadata\x18\x0f \x01(\x0b\x32\x30.chromeos_update_engine.DynamicPartitionMetadata\x12\x16\n\x0epartial_update\x18\x10 \x01(\x08\x42\x02H\x03' + serialized_pb=b'\n\x15update_metadata.proto\x12\x16\x63hromeos_update_engine\"1\n\x06\x45xtent\x12\x13\n\x0bstart_block\x18\x01 \x01(\x04\x12\x12\n\nnum_blocks\x18\x02 \x01(\x04\"\x9f\x01\n\nSignatures\x12@\n\nsignatures\x18\x01 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x1aO\n\tSignature\x12\x13\n\x07version\x18\x01 \x01(\rB\x02\x18\x01\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\x12\x1f\n\x17unpadded_signature_size\x18\x03 \x01(\x07\"+\n\rPartitionInfo\x12\x0c\n\x04size\x18\x01 \x01(\x04\x12\x0c\n\x04hash\x18\x02 \x01(\x0c\"w\n\tImageInfo\x12\r\n\x05\x62oard\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\x0f\n\x07\x63hannel\x18\x03 \x01(\t\x12\x0f\n\x07version\x18\x04 \x01(\t\x12\x15\n\rbuild_channel\x18\x05 \x01(\t\x12\x15\n\rbuild_version\x18\x06 \x01(\t\"\xee\x03\n\x10InstallOperation\x12;\n\x04type\x18\x01 \x02(\x0e\x32-.chromeos_update_engine.InstallOperation.Type\x12\x13\n\x0b\x64\x61ta_offset\x18\x02 \x01(\x04\x12\x13\n\x0b\x64\x61ta_length\x18\x03 \x01(\x04\x12\x33\n\x0bsrc_extents\x18\x04 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\nsrc_length\x18\x05 \x01(\x04\x12\x33\n\x0b\x64st_extents\x18\x06 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\ndst_length\x18\x07 \x01(\x04\x12\x18\n\x10\x64\x61ta_sha256_hash\x18\x08 \x01(\x0c\x12\x17\n\x0fsrc_sha256_hash\x18\t \x01(\x0c\"\xad\x01\n\x04Type\x12\x0b\n\x07REPLACE\x10\x00\x12\x0e\n\nREPLACE_BZ\x10\x01\x12\x0c\n\x04MOVE\x10\x02\x1a\x02\x08\x01\x12\x0e\n\x06\x42SDIFF\x10\x03\x1a\x02\x08\x01\x12\x0f\n\x0bSOURCE_COPY\x10\x04\x12\x11\n\rSOURCE_BSDIFF\x10\x05\x12\x0e\n\nREPLACE_XZ\x10\x08\x12\x08\n\x04ZERO\x10\x06\x12\x0b\n\x07\x44ISCARD\x10\x07\x12\x11\n\rBROTLI_BSDIFF\x10\n\x12\x0c\n\x08PUFFDIFF\x10\t\"\xcf\x01\n\x11\x43owMergeOperation\x12<\n\x04type\x18\x01 \x01(\x0e\x32..chromeos_update_engine.CowMergeOperation.Type\x12\x32\n\nsrc_extent\x18\x02 \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x32\n\ndst_extent\x18\x03 \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\"\x14\n\x04Type\x12\x0c\n\x08\x43OW_COPY\x10\x00\"\xc8\x06\n\x0fPartitionUpdate\x12\x16\n\x0epartition_name\x18\x01 \x02(\t\x12\x17\n\x0frun_postinstall\x18\x02 \x01(\x08\x12\x18\n\x10postinstall_path\x18\x03 \x01(\t\x12\x17\n\x0f\x66ilesystem_type\x18\x04 \x01(\t\x12M\n\x17new_partition_signature\x18\x05 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x12\x41\n\x12old_partition_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x41\n\x12new_partition_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12<\n\noperations\x18\x08 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x1c\n\x14postinstall_optional\x18\t \x01(\x08\x12=\n\x15hash_tree_data_extent\x18\n \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x38\n\x10hash_tree_extent\x18\x0b \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x1b\n\x13hash_tree_algorithm\x18\x0c \x01(\t\x12\x16\n\x0ehash_tree_salt\x18\r \x01(\x0c\x12\x37\n\x0f\x66\x65\x63_data_extent\x18\x0e \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x32\n\nfec_extent\x18\x0f \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x14\n\tfec_roots\x18\x10 \x01(\r:\x01\x32\x12\x0f\n\x07version\x18\x11 \x01(\t\x12\x43\n\x10merge_operations\x18\x12 \x03(\x0b\x32).chromeos_update_engine.CowMergeOperation\x12\x19\n\x11\x65stimate_cow_size\x18\x13 \x01(\x04\"L\n\x15\x44ynamicPartitionGroup\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\x0c\n\x04size\x18\x02 \x01(\x04\x12\x17\n\x0fpartition_names\x18\x03 \x03(\t\"s\n\x18\x44ynamicPartitionMetadata\x12=\n\x06groups\x18\x01 \x03(\x0b\x32-.chromeos_update_engine.DynamicPartitionGroup\x12\x18\n\x10snapshot_enabled\x18\x02 \x01(\x08\"\xe1\x06\n\x14\x44\x65ltaArchiveManifest\x12H\n\x12install_operations\x18\x01 \x03(\x0b\x32(.chromeos_update_engine.InstallOperationB\x02\x18\x01\x12O\n\x19kernel_install_operations\x18\x02 \x03(\x0b\x32(.chromeos_update_engine.InstallOperationB\x02\x18\x01\x12\x18\n\nblock_size\x18\x03 \x01(\r:\x04\x34\x30\x39\x36\x12\x19\n\x11signatures_offset\x18\x04 \x01(\x04\x12\x17\n\x0fsignatures_size\x18\x05 \x01(\x04\x12\x42\n\x0fold_kernel_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x42\n\x0fnew_kernel_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x42\n\x0fold_rootfs_info\x18\x08 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x42\n\x0fnew_rootfs_info\x18\t \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x39\n\x0eold_image_info\x18\n \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x39\n\x0enew_image_info\x18\x0b \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x18\n\rminor_version\x18\x0c \x01(\r:\x01\x30\x12;\n\npartitions\x18\r \x03(\x0b\x32\'.chromeos_update_engine.PartitionUpdate\x12\x15\n\rmax_timestamp\x18\x0e \x01(\x03\x12T\n\x1a\x64ynamic_partition_metadata\x18\x0f \x01(\x0b\x32\x30.chromeos_update_engine.DynamicPartitionMetadata\x12\x16\n\x0epartial_update\x18\x10 \x01(\x08\x42\x02H\x03' ) @@ -81,6 +81,24 @@ ) _sym_db.RegisterEnumDescriptor(_INSTALLOPERATION_TYPE) +_COWMERGEOPERATION_TYPE = _descriptor.EnumDescriptor( + name='Type', + full_name='chromeos_update_engine.CowMergeOperation.Type', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='COW_COPY', index=0, number=0, + serialized_options=None, + type=None), + ], + containing_type=None, + serialized_options=None, + serialized_start=1113, + serialized_end=1133, +) +_sym_db.RegisterEnumDescriptor(_COWMERGEOPERATION_TYPE) + _EXTENT = _descriptor.Descriptor( name='Extent', @@ -387,6 +405,52 @@ ) +_COWMERGEOPERATION = _descriptor.Descriptor( + name='CowMergeOperation', + full_name='chromeos_update_engine.CowMergeOperation', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='type', full_name='chromeos_update_engine.CowMergeOperation.type', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='src_extent', full_name='chromeos_update_engine.CowMergeOperation.src_extent', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='dst_extent', full_name='chromeos_update_engine.CowMergeOperation.dst_extent', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _COWMERGEOPERATION_TYPE, + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + serialized_start=926, + serialized_end=1133, +) + + _PARTITIONUPDATE = _descriptor.Descriptor( name='PartitionUpdate', full_name='chromeos_update_engine.PartitionUpdate', @@ -513,6 +577,20 @@ message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='merge_operations', full_name='chromeos_update_engine.PartitionUpdate.merge_operations', index=17, + number=18, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='estimate_cow_size', full_name='chromeos_update_engine.PartitionUpdate.estimate_cow_size', index=18, + number=19, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], @@ -525,8 +603,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=926, - serialized_end=1670, + serialized_start=1136, + serialized_end=1976, ) @@ -570,8 +648,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1672, - serialized_end=1748, + serialized_start=1978, + serialized_end=2054, ) @@ -608,8 +686,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1750, - serialized_end=1865, + serialized_start=2056, + serialized_end=2171, ) @@ -744,8 +822,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1868, - serialized_end=2733, + serialized_start=2174, + serialized_end=3039, ) _SIGNATURES_SIGNATURE.containing_type = _SIGNATURES @@ -754,6 +832,10 @@ _INSTALLOPERATION.fields_by_name['src_extents'].message_type = _EXTENT _INSTALLOPERATION.fields_by_name['dst_extents'].message_type = _EXTENT _INSTALLOPERATION_TYPE.containing_type = _INSTALLOPERATION +_COWMERGEOPERATION.fields_by_name['type'].enum_type = _COWMERGEOPERATION_TYPE +_COWMERGEOPERATION.fields_by_name['src_extent'].message_type = _EXTENT +_COWMERGEOPERATION.fields_by_name['dst_extent'].message_type = _EXTENT +_COWMERGEOPERATION_TYPE.containing_type = _COWMERGEOPERATION _PARTITIONUPDATE.fields_by_name['new_partition_signature'].message_type = _SIGNATURES_SIGNATURE _PARTITIONUPDATE.fields_by_name['old_partition_info'].message_type = _PARTITIONINFO _PARTITIONUPDATE.fields_by_name['new_partition_info'].message_type = _PARTITIONINFO @@ -762,6 +844,7 @@ _PARTITIONUPDATE.fields_by_name['hash_tree_extent'].message_type = _EXTENT _PARTITIONUPDATE.fields_by_name['fec_data_extent'].message_type = _EXTENT _PARTITIONUPDATE.fields_by_name['fec_extent'].message_type = _EXTENT +_PARTITIONUPDATE.fields_by_name['merge_operations'].message_type = _COWMERGEOPERATION _DYNAMICPARTITIONMETADATA.fields_by_name['groups'].message_type = _DYNAMICPARTITIONGROUP _DELTAARCHIVEMANIFEST.fields_by_name['install_operations'].message_type = _INSTALLOPERATION _DELTAARCHIVEMANIFEST.fields_by_name['kernel_install_operations'].message_type = _INSTALLOPERATION @@ -778,6 +861,7 @@ DESCRIPTOR.message_types_by_name['PartitionInfo'] = _PARTITIONINFO DESCRIPTOR.message_types_by_name['ImageInfo'] = _IMAGEINFO DESCRIPTOR.message_types_by_name['InstallOperation'] = _INSTALLOPERATION +DESCRIPTOR.message_types_by_name['CowMergeOperation'] = _COWMERGEOPERATION DESCRIPTOR.message_types_by_name['PartitionUpdate'] = _PARTITIONUPDATE DESCRIPTOR.message_types_by_name['DynamicPartitionGroup'] = _DYNAMICPARTITIONGROUP DESCRIPTOR.message_types_by_name['DynamicPartitionMetadata'] = _DYNAMICPARTITIONMETADATA @@ -827,6 +911,13 @@ }) _sym_db.RegisterMessage(InstallOperation) +CowMergeOperation = _reflection.GeneratedProtocolMessageType('CowMergeOperation', (_message.Message,), { + 'DESCRIPTOR' : _COWMERGEOPERATION, + '__module__' : 'update_metadata_pb2' + # @@protoc_insertion_point(class_scope:chromeos_update_engine.CowMergeOperation) + }) +_sym_db.RegisterMessage(CowMergeOperation) + PartitionUpdate = _reflection.GeneratedProtocolMessageType('PartitionUpdate', (_message.Message,), { 'DESCRIPTOR' : _PARTITIONUPDATE, '__module__' : 'update_metadata_pb2' diff --git a/update_metadata.proto b/update_metadata.proto index 373ee5ef..ca59a02b 100644 --- a/update_metadata.proto +++ b/update_metadata.proto @@ -314,6 +314,11 @@ message PartitionUpdate { // skip writing the raw bytes for these extents. During snapshot merge, the // bytes will read from the source partitions instead. repeated CowMergeOperation merge_operations = 18; + + // Estimated size for COW image. This is used by libsnapshot + // as a hint. If set to 0, libsnapshot should use alternative + // methods for estimating size. + optional uint64 estimate_cow_size = 19; } message DynamicPartitionGroup { From a22ef552c04ca23165b428e51c43ed109ad6e5db Mon Sep 17 00:00:00 2001 From: Kelvin Zhang Date: Mon, 12 Oct 2020 19:03:52 -0400 Subject: [PATCH 419/624] Disable read/write to ota.warm_reset in recovery mode Bug: 155372775 Test: treehugger Change-Id: Ifd34f185e5df98984794dc0cda584f00769eb084 --- common/platform_constants.h | 6 ++++++ dynamic_partition_control_android.cc | 8 +------- hardware_android.cc | 8 +++++--- 3 files changed, 12 insertions(+), 10 deletions(-) diff --git a/common/platform_constants.h b/common/platform_constants.h index 243af69e..c060133f 100644 --- a/common/platform_constants.h +++ b/common/platform_constants.h @@ -58,6 +58,12 @@ extern const char kNonVolatileDirectory[]; // postinstall. extern const char kPostinstallMountOptions[]; +#ifdef __ANDROID_RECOVERY__ +constexpr bool kIsRecovery = true; +#else +constexpr bool kIsRecovery = false; +#endif + } // namespace constants } // namespace chromeos_update_engine diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc index c9888abd..1fb2c253 100644 --- a/dynamic_partition_control_android.cc +++ b/dynamic_partition_control_android.cc @@ -86,12 +86,6 @@ constexpr std::chrono::milliseconds kMapTimeout{1000}; // needs to be mapped, this timeout is longer than |kMapTimeout|. constexpr std::chrono::milliseconds kMapSnapshotTimeout{5000}; -#ifdef __ANDROID_RECOVERY__ -constexpr bool kIsRecovery = true; -#else -constexpr bool kIsRecovery = false; -#endif - DynamicPartitionControlAndroid::~DynamicPartitionControlAndroid() { Cleanup(); } @@ -1085,7 +1079,7 @@ void DynamicPartitionControlAndroid::set_fake_mapped_devices( } bool DynamicPartitionControlAndroid::IsRecovery() { - return kIsRecovery; + return constants::kIsRecovery; } static bool IsIncrementalUpdate(const DeltaArchiveManifest& manifest) { diff --git a/hardware_android.cc b/hardware_android.cc index a659bf67..5e246211 100644 --- a/hardware_android.cc +++ b/hardware_android.cc @@ -241,9 +241,11 @@ bool HardwareAndroid::SetFirstActiveOmahaPingSent() { } void HardwareAndroid::SetWarmReset(bool warm_reset) { - constexpr char warm_reset_prop[] = "ota.warm_reset"; - if (!android::base::SetProperty(warm_reset_prop, warm_reset ? "1" : "0")) { - LOG(WARNING) << "Failed to set prop " << warm_reset_prop; + if constexpr (!constants::kIsRecovery) { + constexpr char warm_reset_prop[] = "ota.warm_reset"; + if (!android::base::SetProperty(warm_reset_prop, warm_reset ? "1" : "0")) { + LOG(WARNING) << "Failed to set prop " << warm_reset_prop; + } } } From 3dd83979414d78c43b3c3512f77f1bdf7621b334 Mon Sep 17 00:00:00 2001 From: Kelvin Zhang Date: Thu, 15 Oct 2020 14:02:00 -0400 Subject: [PATCH 420/624] Call fsync() when close file descriptor We get OTA tests failures on cuttlefish targets every once a while, smells like IO error. Add fsync() to flush InstallOps. Test: treehugger Change-Id: I63a4bce17a4fdad611f0dab46cd08dfb61ba63e7 --- payload_consumer/file_descriptor.cc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/payload_consumer/file_descriptor.cc b/payload_consumer/file_descriptor.cc index 1de615cb..6101c680 100644 --- a/payload_consumer/file_descriptor.cc +++ b/payload_consumer/file_descriptor.cc @@ -21,6 +21,7 @@ #include #include #include +#include #include @@ -125,11 +126,16 @@ bool EintrSafeFileDescriptor::BlkIoctl(int request, bool EintrSafeFileDescriptor::Flush() { CHECK_GE(fd_, 0); + // Implemented as a No-Op, as delta_performer typically uses |O_DSYNC|, except + // in interactive settings. return true; } bool EintrSafeFileDescriptor::Close() { CHECK_GE(fd_, 0); + // https://stackoverflow.com/questions/705454/does-linux-guarantee-the-contents-of-a-file-is-flushed-to-disc-after-close + // |close()| doesn't imply |fsync()|, we need to do it manually. + fsync(fd_); if (IGNORE_EINTR(close(fd_))) return false; fd_ = -1; From 6c190a2d716d346ccf60fceae1a4985581b42d0c Mon Sep 17 00:00:00 2001 From: David Anderson Date: Mon, 21 Sep 2020 17:09:53 -0700 Subject: [PATCH 421/624] Link to libsnapshot_cow and libz where libsnapshot is linked. Bug: 168554689 Test: builds Change-Id: Ibbb6b4795ee5f0056e52ad7379a0033092209e78 --- Android.bp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Android.bp b/Android.bp index 27ba172b..52019b6e 100644 --- a/Android.bp +++ b/Android.bp @@ -203,6 +203,8 @@ cc_defaults { "libgsi", "libpayload_consumer", "libsnapshot", + "libsnapshot_cow", + "libz", "update_metadata-protos", ], shared_libs: [ From 19e55297ac3805c7109325ea7d536887e302403e Mon Sep 17 00:00:00 2001 From: Tianjie Date: Mon, 19 Oct 2020 21:49:37 -0700 Subject: [PATCH 422/624] Don't fail downgrade check for partitions without version Some partitions, e.g. vbmeta, won't have timestamp. And the existing downgrade check will fail for these partitions for partial updates. This cl switches the logic to allow empty version for a given partition in the payload, if that source partition doesn't have version (timestamp) on device. This way, we can avoid adding another hardcoded list of partitions with build props. Bug: 170796643 Test: install a partial update on cuttlefish Change-Id: I747910b353bd71e10ec7c32ebb125bde6c16e48a --- payload_consumer/delta_performer.cc | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc index 9bf6d7ed..e7ef8a33 100644 --- a/payload_consumer/delta_performer.cc +++ b/payload_consumer/delta_performer.cc @@ -1132,28 +1132,35 @@ ErrorCode DeltaPerformer::CheckTimestampError() const { auto&& timestamp_valid = [this](const PartitionUpdate& partition, bool allow_empty_version, bool* downgrade_detected) -> ErrorCode { + const auto& partition_name = partition.partition_name(); if (!partition.has_version()) { + if (hardware_->GetVersionForLogging(partition_name).empty()) { + LOG(INFO) << partition_name << " does't have version, skipping " + << "downgrade check."; + return ErrorCode::kSuccess; + } + if (allow_empty_version) { return ErrorCode::kSuccess; } LOG(ERROR) - << "PartitionUpdate " << partition.partition_name() - << " does ot have a version field. Not allowed in partial updates."; + << "PartitionUpdate " << partition_name + << " doesn't have a version field. Not allowed in partial updates."; return ErrorCode::kDownloadManifestParseError; } - auto error_code = hardware_->IsPartitionUpdateValid( - partition.partition_name(), partition.version()); + auto error_code = + hardware_->IsPartitionUpdateValid(partition_name, partition.version()); switch (error_code) { case ErrorCode::kSuccess: break; case ErrorCode::kPayloadTimestampError: *downgrade_detected = true; - LOG(WARNING) << "PartitionUpdate " << partition.partition_name() + LOG(WARNING) << "PartitionUpdate " << partition_name << " has an older version than partition on device."; break; default: - LOG(ERROR) << "IsPartitionUpdateValid(" << partition.partition_name() + LOG(ERROR) << "IsPartitionUpdateValid(" << partition_name << ") returned" << utils::ErrorCodeToString(error_code); break; } From 34618521d487a37d21f25812f324380f1eb394a8 Mon Sep 17 00:00:00 2001 From: Kelvin Zhang Date: Mon, 28 Sep 2020 09:21:02 -0400 Subject: [PATCH 423/624] Add CowWriter interface methods to DynamicPartiitonControl Test: treehugger Change-Id: I110c65e75a4594af51e68390e570ac31b50de8b5 --- common/dynamic_partition_control_interface.h | 15 ++++++++++ common/dynamic_partition_control_stub.cc | 9 ++++++ common/dynamic_partition_control_stub.h | 6 +++- dynamic_partition_control_android.cc | 29 ++++++++++++++++++++ dynamic_partition_control_android.h | 8 ++++++ mock_dynamic_partition_control.h | 9 ++++++ 6 files changed, 75 insertions(+), 1 deletion(-) diff --git a/common/dynamic_partition_control_interface.h b/common/dynamic_partition_control_interface.h index 22f6db87..530b0af4 100644 --- a/common/dynamic_partition_control_interface.h +++ b/common/dynamic_partition_control_interface.h @@ -26,8 +26,14 @@ #include "update_engine/common/action.h" #include "update_engine/common/cleanup_previous_update_action_delegate.h" #include "update_engine/common/error_code.h" +#include "update_engine/payload_consumer/file_descriptor.h" #include "update_engine/update_metadata.pb.h" +// Forware declare for libsnapshot/snapshot_writer.h +namespace android::snapshot { +class ISnapshotWriter; +} + namespace chromeos_update_engine { struct FeatureFlag { @@ -139,6 +145,15 @@ class DynamicPartitionControlInterface { uint32_t source_slot, uint32_t target_slot, const std::vector& partitions) = 0; + // Partition name is expected to be unsuffixed. e.g. system, vendor + // Return an interface to write to a snapshoted partition. + // If `is_append` is false, then existing COW data will be overwritten. + // Otherwise the cow writer will be opened on APPEND mode, existing COW data + // is preserved. + virtual std::unique_ptr OpenCowWriter( + const std::string& unsuffixed_partition_name, + const std::optional&, + bool is_append = false) = 0; }; } // namespace chromeos_update_engine diff --git a/common/dynamic_partition_control_stub.cc b/common/dynamic_partition_control_stub.cc index c63a8ff6..64ab201d 100644 --- a/common/dynamic_partition_control_stub.cc +++ b/common/dynamic_partition_control_stub.cc @@ -20,6 +20,7 @@ #include #include +#include #include "update_engine/common/dynamic_partition_control_stub.h" @@ -87,4 +88,12 @@ bool DynamicPartitionControlStub::VerifyExtentsForUntouchedPartitions( return true; } +std::unique_ptr +DynamicPartitionControlStub::OpenCowWriter( + const std::string& /*unsuffixed_partition_name*/, + const std::optional& /*source_path*/, + bool /*is_append*/) { + return nullptr; +} + } // namespace chromeos_update_engine diff --git a/common/dynamic_partition_control_stub.h b/common/dynamic_partition_control_stub.h index 8bff4743..a939cfbb 100644 --- a/common/dynamic_partition_control_stub.h +++ b/common/dynamic_partition_control_stub.h @@ -57,8 +57,12 @@ class DynamicPartitionControlStub : public DynamicPartitionControlInterface { uint32_t source_slot, uint32_t target_slot, const std::vector& partitions) override; -}; + std::unique_ptr OpenCowWriter( + const std::string& unsuffixed_partition_name, + const std::optional&, + bool is_append) override; +}; } // namespace chromeos_update_engine #endif // UPDATE_ENGINE_COMMON_DYNAMIC_PARTITION_CONTROL_STUB_H_ diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc index 1fb2c253..06e57453 100644 --- a/dynamic_partition_control_android.cc +++ b/dynamic_partition_control_android.cc @@ -17,6 +17,7 @@ #include "update_engine/dynamic_partition_control_android.h" #include // NOLINT(build/c++11) - using libsnapshot / liblp API +#include #include #include #include @@ -36,6 +37,8 @@ #include #include #include +#include +#include #include #include @@ -1230,4 +1233,30 @@ bool DynamicPartitionControlAndroid::EnsureMetadataMounted() { return metadata_device_ != nullptr; } +std::unique_ptr +DynamicPartitionControlAndroid::OpenCowWriter( + const std::string& partition_name, + const std::optional& source_path, + bool is_append) { + auto suffix = SlotSuffixForSlotNumber(target_slot_); + + std::string device_dir_str; + if (!GetDeviceDir(&device_dir_str)) { + LOG(ERROR) << "Failed to get device dir!"; + return nullptr; + } + base::FilePath device_dir(device_dir_str); + auto super_device = + device_dir.Append(GetSuperPartitionName(target_slot_)).value(); + CreateLogicalPartitionParams params = { + .block_device = super_device, + .metadata_slot = target_slot_, + .partition_name = partition_name + suffix, + .force_writable = true, + }; + // TODO(zhangkelvin) Open an APPEND mode CowWriter once there's an API to do + // it. + return snapshot_->OpenSnapshotWriter(params, std::move(source_path)); +} + } // namespace chromeos_update_engine diff --git a/dynamic_partition_control_android.h b/dynamic_partition_control_android.h index f3805f0e..9bffb59b 100644 --- a/dynamic_partition_control_android.h +++ b/dynamic_partition_control_android.h @@ -25,6 +25,7 @@ #include #include #include +#include #include "update_engine/common/dynamic_partition_control_interface.h" @@ -82,6 +83,13 @@ class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { uint32_t current_slot, std::string* device); + // Partition name is expected to be unsuffixed. e.g. system, vendor + // Return an interface to write to a snapshoted partition. + std::unique_ptr OpenCowWriter( + const std::string& unsuffixed_partition_name, + const std::optional& source_path, + bool is_append) override; + protected: // These functions are exposed for testing. diff --git a/mock_dynamic_partition_control.h b/mock_dynamic_partition_control.h index e85df327..5144cbbc 100644 --- a/mock_dynamic_partition_control.h +++ b/mock_dynamic_partition_control.h @@ -22,6 +22,9 @@ #include +#include + +#include "libsnapshot/snapshot_writer.h" #include "update_engine/common/boot_control_interface.h" #include "update_engine/common/dynamic_partition_control_interface.h" #include "update_engine/dynamic_partition_control_android.h" @@ -81,6 +84,12 @@ class MockDynamicPartitionControlAndroid PrepareDynamicPartitionsForUpdate, (uint32_t, uint32_t, const DeltaArchiveManifest&, bool), (override)); + MOCK_METHOD(std::unique_ptr, + OpenCowWriter, + (const std::string& unsuffixed_partition_name, + const std::optional& source_path, + bool is_append), + (override)); void set_fake_mapped_devices(const std::set& fake) override { DynamicPartitionControlAndroid::set_fake_mapped_devices(fake); From b0b6cc2b0cfe7a5313b1b416424409be6ea05eb0 Mon Sep 17 00:00:00 2001 From: Qijiang Fan Date: Thu, 15 Oct 2020 21:54:11 +0900 Subject: [PATCH 424/624] update_engine: remove usages of base::MessageLoop. It's replaced by base::SingleThreadTaskExecutor. BUG=chromium:1094927 TEST=unittest Change-Id: I281063564037a2ccf1b93d00253180a0000abd09 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2474955 Commit-Queue: Qijiang Fan Tested-by: Qijiang Fan Reviewed-by: Amin Hassani --- client_library/client_dbus.cc | 1 - common/http_fetcher_unittest.cc | 10 ++++++++++ common/subprocess_unittest.cc | 10 ++++++++++ p2p_manager_unittest.cc | 10 ++++++++++ payload_consumer/postinstall_runner_action_unittest.cc | 10 ++++++++++ update_attempter_unittest.cc | 6 +++--- 6 files changed, 43 insertions(+), 4 deletions(-) diff --git a/client_library/client_dbus.cc b/client_library/client_dbus.cc index caf7befe..30ad78c6 100644 --- a/client_library/client_dbus.cc +++ b/client_library/client_dbus.cc @@ -16,7 +16,6 @@ #include "update_engine/client_library/client_dbus.h" -#include #include #include diff --git a/common/http_fetcher_unittest.cc b/common/http_fetcher_unittest.cc index 9338087d..1ead6813 100644 --- a/common/http_fetcher_unittest.cc +++ b/common/http_fetcher_unittest.cc @@ -28,11 +28,16 @@ #include #include #include +#if BASE_VER < 780000 // Android #include +#endif // BASE_VER < 780000 #include #include #include #include +#if BASE_VER >= 780000 // CrOS +#include +#endif // BASE_VER >= 780000 #include #include #include @@ -403,8 +408,13 @@ class MultiRangeHttpFetcherOverFileFetcherTest : public FileFetcherTest { template class HttpFetcherTest : public ::testing::Test { public: +#if BASE_VER < 780000 // Android base::MessageLoopForIO base_loop_; brillo::BaseMessageLoop loop_{&base_loop_}; +#else // Chrome OS + base::SingleThreadTaskExecutor base_loop_{base::MessagePumpType::IO}; + brillo::BaseMessageLoop loop_{base_loop_.task_runner()}; +#endif // BASE_VER < 780000 T test_; diff --git a/common/subprocess_unittest.cc b/common/subprocess_unittest.cc index b4d068f0..ff4158e8 100644 --- a/common/subprocess_unittest.cc +++ b/common/subprocess_unittest.cc @@ -28,9 +28,14 @@ #include #include #include +#if BASE_VER < 780000 // Android #include +#endif // BASE_VER < 780000 #include #include +#if BASE_VER >= 780000 // Chrome OS +#include +#endif // BASE_VER >= 780000 #include #include #include @@ -70,8 +75,13 @@ class SubprocessTest : public ::testing::Test { subprocess_.Init(&async_signal_handler_); } +#if BASE_VER < 780000 // Android base::MessageLoopForIO base_loop_; brillo::BaseMessageLoop loop_{&base_loop_}; +#else // Chrome OS + base::SingleThreadTaskExecutor base_loop_{base::MessagePumpType::IO}; + brillo::BaseMessageLoop loop_{base_loop_.task_runner()}; +#endif // BASE_VER < 780000 brillo::AsynchronousSignalHandler async_signal_handler_; Subprocess subprocess_; unique_ptr watcher_; diff --git a/p2p_manager_unittest.cc b/p2p_manager_unittest.cc index 5771ec1d..5510dd7b 100644 --- a/p2p_manager_unittest.cc +++ b/p2p_manager_unittest.cc @@ -30,8 +30,13 @@ #include #include #include +#if BASE_VER < 780000 // Android #include +#endif // BASE_VER < 780000 #include +#if BASE_VER >= 780000 // CrOS +#include +#endif // BASE_VER >= 780000 #include #include #include @@ -92,8 +97,13 @@ class P2PManagerTest : public testing::Test { TimeDelta::FromDays(5))); } +#if BASE_VER < 780000 // Android base::MessageLoopForIO base_loop_; brillo::BaseMessageLoop loop_{&base_loop_}; +#else // CrOS + base::SingleThreadTaskExecutor base_loop_{base::MessagePumpType::IO}; + brillo::BaseMessageLoop loop_{base_loop_.task_runner()}; +#endif // BASE_VER < 780000 brillo::AsynchronousSignalHandler async_signal_handler_; Subprocess subprocess_; diff --git a/payload_consumer/postinstall_runner_action_unittest.cc b/payload_consumer/postinstall_runner_action_unittest.cc index cf5158b2..5910c239 100644 --- a/payload_consumer/postinstall_runner_action_unittest.cc +++ b/payload_consumer/postinstall_runner_action_unittest.cc @@ -26,9 +26,14 @@ #include #include +#if BASE_VER < 780000 // Android #include +#endif // BASE_VER < 780000 #include #include +#if BASE_VER >= 780000 // CrOS +#include +#endif // BASE_VER >= 780000 #include #include #include @@ -156,8 +161,13 @@ class PostinstallRunnerActionTest : public ::testing::Test { } protected: +#if BASE_VER < 780000 // Android base::MessageLoopForIO base_loop_; brillo::BaseMessageLoop loop_{&base_loop_}; +#else // CrOS + base::SingleThreadTaskExecutor base_loop_{base::MessagePumpType::IO}; + brillo::BaseMessageLoop loop_{base_loop_.task_runner()}; +#endif // BASE_VER < 780000 brillo::AsynchronousSignalHandler async_signal_handler_; Subprocess subprocess_; diff --git a/update_attempter_unittest.cc b/update_attempter_unittest.cc index edcb67bc..60267f05 100644 --- a/update_attempter_unittest.cc +++ b/update_attempter_unittest.cc @@ -26,7 +26,7 @@ #include #include -#include +#include #include #include #include @@ -337,8 +337,8 @@ class UpdateAttempterTest : public ::testing::Test { // |ProcessingDone()| related member functions. void TestProcessingDone(); - base::MessageLoopForIO base_loop_; - brillo::BaseMessageLoop loop_{&base_loop_}; + base::SingleThreadTaskExecutor base_loop_{base::MessagePumpType::IO}; + brillo::BaseMessageLoop loop_{base_loop_.task_runner()}; FakeSystemState fake_system_state_; UpdateAttempterUnderTest attempter_{&fake_system_state_}; From 9b10dba4599bf3acd2195e144a51fa24e3c90a5d Mon Sep 17 00:00:00 2001 From: Kelvin Zhang Date: Fri, 25 Sep 2020 17:09:11 -0400 Subject: [PATCH 425/624] Implement VABCPartitionWriter Test: treehugger Change-Id: I7ec47adba0bc095879989b43ec642ea6a7852ab6 --- Android.bp | 28 ++++++++++ common/cow_operation_convert.cc | 30 +++++++++++ common/cow_operation_convert.h | 55 +++++++++++++++++++ payload_consumer/snapshot_extent_writer.cc | 54 +++++++++++++++++++ payload_consumer/snapshot_extent_writer.h | 47 ++++++++++++++++ payload_consumer/vabc_partition_writer.cc | 62 +++++++++++++++++++--- payload_consumer/vabc_partition_writer.h | 5 +- 7 files changed, 272 insertions(+), 9 deletions(-) create mode 100644 common/cow_operation_convert.cc create mode 100644 common/cow_operation_convert.h create mode 100644 payload_consumer/snapshot_extent_writer.cc create mode 100644 payload_consumer/snapshot_extent_writer.h diff --git a/Android.bp b/Android.bp index 52019b6e..a8fdf518 100644 --- a/Android.bp +++ b/Android.bp @@ -82,6 +82,28 @@ cc_defaults { }, } +// libcow_operation_convert (type: library) +// ======================================================== +cc_library { + name: "libcow_operation_convert", + host_supported: true, + recovery_available: true, + defaults: [ + "ue_defaults", + "update_metadata-protos_exports", + ], + srcs: [ + "common/cow_operation_convert.cc", + ], + static_libs: [ + "libsnapshot_cow", + "update_metadata-protos", + "libpayload_extent_ranges", + "libbrotli", + "libz", + ], +} + // update_metadata-protos (type: static_library) // ======================================================== // Protobufs. @@ -125,6 +147,10 @@ cc_defaults { "libpuffpatch", "libverity_tree", "libsnapshot_cow", + "libbrotli", + "libz", + "libpayload_extent_ranges", + "libcow_operation_convert", ], shared_libs: [ "libbase", @@ -182,6 +208,7 @@ cc_library_static { "payload_consumer/partition_writer.cc", "payload_consumer/partition_writer_factory_android.cc", "payload_consumer/vabc_partition_writer.cc", + "payload_consumer/snapshot_extent_writer.cc", "payload_consumer/postinstall_runner_action.cc", "payload_consumer/verity_writer_android.cc", "payload_consumer/xz_extent_writer.cc", @@ -482,6 +509,7 @@ cc_library_static { "ue_defaults", ], host_supported: true, + recovery_available: true, srcs: [ "payload_generator/extent_ranges.cc", ], diff --git a/common/cow_operation_convert.cc b/common/cow_operation_convert.cc new file mode 100644 index 00000000..a4eaba30 --- /dev/null +++ b/common/cow_operation_convert.cc @@ -0,0 +1,30 @@ +// +// Copyright (C) 2020 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include "update_engine/common/cow_operation_convert.h" + +#include "update_engine/payload_generator/extent_ranges.h" + +namespace chromeos_update_engine { +std::vector ConvertToCowOperations( + const ::google::protobuf::RepeatedPtrField< + ::chromeos_update_engine::InstallOperation>& operations, + const ::google::protobuf::RepeatedPtrField& + merge_operations) { + // TODO(zhangkelvin) Implement this. + return {}; +} +} // namespace chromeos_update_engine diff --git a/common/cow_operation_convert.h b/common/cow_operation_convert.h new file mode 100644 index 00000000..bca10ac2 --- /dev/null +++ b/common/cow_operation_convert.h @@ -0,0 +1,55 @@ +// +// Copyright (C) 2020 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef __COW_OPERATION_CONVERT_H +#define __COW_OPERATION_CONVERT_H + +#include + +#include + +#include "update_engine/update_metadata.pb.h" + +namespace chromeos_update_engine { +struct CowOperation { + enum Type { + CowCopy = android::snapshot::kCowCopyOp, + CowReplace = android::snapshot::kCowReplaceOp, + }; + Type op; + uint64_t src_block; + uint64_t dst_block; +}; + +// Convert SOURCE_COPY operations in `operations` list to a list of +// CowOperations according to the merge sequence. This function only converts +// SOURCE_COPY, other operations are ignored. If there's a merge conflict in +// SOURCE_COPY operations, some blocks may be converted to COW_REPLACE instead +// of COW_COPY. + +// The list returned does not necessarily preserve the order of +// SOURCE_COPY in `operations`. The only guarantee about ordering in the +// returned list is that if operations are applied in such order, there would be +// no merge conflicts. + +// This funnction is intended to be used by delta_performer to perform +// SOURCE_COPY operations on Virtual AB Compression devices. +std::vector ConvertToCowOperations( + const ::google::protobuf::RepeatedPtrField< + ::chromeos_update_engine::InstallOperation>& operations, + const ::google::protobuf::RepeatedPtrField& + merge_operations); +} // namespace chromeos_update_engine +#endif diff --git a/payload_consumer/snapshot_extent_writer.cc b/payload_consumer/snapshot_extent_writer.cc new file mode 100644 index 00000000..882d1f7f --- /dev/null +++ b/payload_consumer/snapshot_extent_writer.cc @@ -0,0 +1,54 @@ +// +// Copyright (C) 2020 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "update_engine/payload_consumer/snapshot_extent_writer.h" + +#include +#include + +#include + +#include "update_engine/update_metadata.pb.h" + +namespace chromeos_update_engine { +SnapshotExtentWriter::SnapshotExtentWriter( + android::snapshot::ICowWriter* cow_writer) + : cow_writer_(cow_writer) { + CHECK_NE(cow_writer, nullptr); +} + +SnapshotExtentWriter::~SnapshotExtentWriter() { + CHECK(buffer_.empty()); +} + +bool SnapshotExtentWriter::Init( + FileDescriptorPtr /*fd*/, + const google::protobuf::RepeatedPtrField& extents, + uint32_t /*block_size*/) { + // TODO(zhangkelvin) Implement this + return true; +} + +// Returns true on success. +// This will construct a COW_REPLACE operation and forward it to CowWriter. It +// is important that caller does not perform SOURCE_COPY operation on this +// class, otherwise raw data will be stored. Caller should find ways to use +// COW_COPY whenever possible. +bool SnapshotExtentWriter::Write(const void* bytes, size_t count) { + // TODO(zhangkelvin) Implement this + return true; +} + +} // namespace chromeos_update_engine diff --git a/payload_consumer/snapshot_extent_writer.h b/payload_consumer/snapshot_extent_writer.h new file mode 100644 index 00000000..43a83173 --- /dev/null +++ b/payload_consumer/snapshot_extent_writer.h @@ -0,0 +1,47 @@ +// +// Copyright (C) 2020 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include +#include + +#include + +#include "update_engine/payload_consumer/extent_writer.h" + +namespace chromeos_update_engine { +class SnapshotExtentWriter : public chromeos_update_engine::ExtentWriter { + public: + explicit SnapshotExtentWriter(android::snapshot::ICowWriter* cow_writer); + ~SnapshotExtentWriter(); + // Returns true on success. + bool Init(FileDescriptorPtr fd, + const google::protobuf::RepeatedPtrField& extents, + uint32_t block_size) override; + // Returns true on success. + // This will construct a COW_REPLACE operation and forward it to CowWriter. It + // is important that caller does not perform SOURCE_COPY operation on this + // class, otherwise raw data will be stored. Caller should find ways to use + // COW_COPY whenever possible. + bool Write(const void* bytes, size_t count) override; + + private: + // It's a non-owning pointer, because PartitionWriter owns the CowWruter. This + // allows us to use a single instance of CowWriter for all operations applied + // to the same partition. + [[maybe_unused]] android::snapshot::ICowWriter* cow_writer_; + [[maybe_unused]] google::protobuf::RepeatedPtrField extents_; + [[maybe_unused]] std::vector buffer_; +}; +} // namespace chromeos_update_engine diff --git a/payload_consumer/vabc_partition_writer.cc b/payload_consumer/vabc_partition_writer.cc index ab4897fc..1578f29e 100644 --- a/payload_consumer/vabc_partition_writer.cc +++ b/payload_consumer/vabc_partition_writer.cc @@ -17,33 +17,74 @@ #include "update_engine/payload_consumer/vabc_partition_writer.h" #include +#include #include +#include "update_engine/common/cow_operation_convert.h" #include "update_engine/common/utils.h" #include "update_engine/payload_consumer/extent_writer.h" #include "update_engine/payload_consumer/install_plan.h" #include "update_engine/payload_consumer/partition_writer.h" +#include "update_engine/payload_consumer/snapshot_extent_writer.h" namespace chromeos_update_engine { bool VABCPartitionWriter::Init(const InstallPlan* install_plan, bool source_may_exist) { + TEST_AND_RETURN_FALSE(install_plan != nullptr); TEST_AND_RETURN_FALSE(PartitionWriter::Init(install_plan, source_may_exist)); + cow_writer_ = dynamic_control_->OpenCowWriter( + install_part_.name, install_part_.source_path, install_plan->is_resume); + TEST_AND_RETURN_FALSE(cow_writer_ != nullptr); - // TODO(zhangkelvin) Add code specific to VABC. E.x. Convert InstallOps to - // CowOps, perform all SOURCE_COPY upfront according to merge sequence. + // TODO(zhangkelvin) Emit a label before writing SOURCE_COPY. When resuming, + // use pref or CowWriter::GetLastLabel to determine if the SOURCE_COPY ops are + // written. No need to handle SOURCE_COPY operations when resuming. + + // ===== Resume case handling code goes here ==== + + // ============================================== + + // TODO(zhangkelvin) Rewrite this in C++20 coroutine once that's available. + auto converted = ConvertToCowOperations(partition_update_.operations(), + partition_update_.merge_operations()); + std::vector buffer(block_size_); + for (const auto& cow_op : converted) { + switch (cow_op.op) { + case CowOperation::CowCopy: + TEST_AND_RETURN_FALSE( + cow_writer_->AddCopy(cow_op.dst_block, cow_op.src_block)); + break; + case CowOperation::CowReplace: + ssize_t bytes_read = 0; + TEST_AND_RETURN_FALSE(utils::PReadAll(source_fd_, + buffer.data(), + block_size_, + cow_op.src_block * block_size_, + &bytes_read)); + if (bytes_read <= 0 || static_cast(bytes_read) != block_size_) { + LOG(ERROR) << "source_fd->Read failed: " << bytes_read; + return false; + } + TEST_AND_RETURN_FALSE(cow_writer_->AddRawBlocks( + cow_op.dst_block, buffer.data(), block_size_)); + break; + } + } return true; } std::unique_ptr VABCPartitionWriter::CreateBaseExtentWriter() { - // TODO(zhangkelvin) Return a SnapshotExtentWriter - return std::make_unique(); + return std::make_unique(cow_writer_.get()); } [[nodiscard]] bool VABCPartitionWriter::PerformZeroOrDiscardOperation( const InstallOperation& operation) { - // TODO(zhangkelvin) Create a COW_ZERO operation and send it to CowWriter - return PartitionWriter::PerformZeroOrDiscardOperation(operation); + for (const auto& extent : operation.dst_extents()) { + TEST_AND_RETURN_FALSE( + cow_writer_->AddZeroBlocks(extent.start_block(), extent.num_blocks())); + } + return true; } [[nodiscard]] bool VABCPartitionWriter::PerformSourceCopyOperation( @@ -53,6 +94,13 @@ std::unique_ptr VABCPartitionWriter::CreateBaseExtentWriter() { return true; } -VABCPartitionWriter::~VABCPartitionWriter() = default; +bool VABCPartitionWriter::Flush() { + // No need to do anything, as CowWriter automatically flushes every OP added. + return true; +} + +VABCPartitionWriter::~VABCPartitionWriter() { + cow_writer_->Finalize(); +} } // namespace chromeos_update_engine diff --git a/payload_consumer/vabc_partition_writer.h b/payload_consumer/vabc_partition_writer.h index 034fb57a..d65ac4a5 100644 --- a/payload_consumer/vabc_partition_writer.h +++ b/payload_consumer/vabc_partition_writer.h @@ -19,7 +19,7 @@ #include -#include +#include #include "update_engine/payload_consumer/install_plan.h" #include "update_engine/payload_consumer/partition_writer.h" @@ -41,9 +41,10 @@ class VABCPartitionWriter final : public PartitionWriter { const InstallOperation& operation) override; [[nodiscard]] bool PerformSourceCopyOperation( const InstallOperation& operation, ErrorCode* error) override; + [[nodiscard]] bool Flush() override; private: - std::unique_ptr cow_writer_; + std::unique_ptr cow_writer_; }; } // namespace chromeos_update_engine From b31e1ac799a1f40bc43b8354257cae5d1e5970ef Mon Sep 17 00:00:00 2001 From: Parveen Kumar Date: Fri, 16 Oct 2020 15:30:09 -0700 Subject: [PATCH 426/624] Look for shflags in parent dir of brillo_update_payload's dir. Android host tools binaries are under $HOST_OUT/bin and libs are under $HOST_OUT/lib. So 'lib' dir is sibling of 'bin' dir and not under it. Bug: 168477594 Test: treehugger Change-Id: I67d49f243b06bae9c4d4e4afe2a6bb922a1ea31a --- scripts/brillo_update_payload | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/scripts/brillo_update_payload b/scripts/brillo_update_payload index 3bc87bd6..77d372c3 100755 --- a/scripts/brillo_update_payload +++ b/scripts/brillo_update_payload @@ -89,12 +89,14 @@ die() { exit 1 } -# Loads shflags. We first look at the default install location; then look for -# crosutils (chroot); finally check our own directory. +# Loads shflags. We first look at the default install location; then our own +# directory; finally the parent directory. load_shflags() { local my_dir="$(dirname "$(readlink -f "$0")")" local path - for path in /usr/share/misc "${my_dir}"/lib/shflags; do + for path in /usr/share/misc \ + "${my_dir}"/lib/shflags \ + "${my_dir}"/../lib/shflags; do if [[ -r "${path}/shflags" ]]; then . "${path}/shflags" || die "Could not load ${path}/shflags." return From b05e4e2705ea06a2f7772ec2ab2c5353a696226c Mon Sep 17 00:00:00 2001 From: Kelvin Zhang Date: Fri, 25 Sep 2020 16:16:19 -0400 Subject: [PATCH 427/624] Implement common library for converting InstallOps to Cow Ops Test: generate && serve an OTA Bug: 168554689 Change-Id: If9e87ca3e993372ebb6b24ed64e71b319630bb18 --- Android.bp | 1 + common/cow_operation_convert.cc | 47 ++++- common/cow_operation_convert.h | 1 + common/cow_operation_convert_unittest.cc | 220 +++++++++++++++++++++++ payload_generator/extent_utils.h | 39 ++++ 5 files changed, 306 insertions(+), 2 deletions(-) create mode 100644 common/cow_operation_convert_unittest.cc diff --git a/Android.bp b/Android.bp index a8fdf518..178b7daa 100644 --- a/Android.bp +++ b/Android.bp @@ -703,6 +703,7 @@ cc_test { "common/action_pipe_unittest.cc", "common/action_processor_unittest.cc", "common/action_unittest.cc", + "common/cow_operation_convert_unittest.cc", "common/cpu_limiter_unittest.cc", "common/fake_prefs.cc", "common/file_fetcher_unittest.cc", diff --git a/common/cow_operation_convert.cc b/common/cow_operation_convert.cc index a4eaba30..db17b5fd 100644 --- a/common/cow_operation_convert.cc +++ b/common/cow_operation_convert.cc @@ -16,15 +16,58 @@ #include "update_engine/common/cow_operation_convert.h" +#include + #include "update_engine/payload_generator/extent_ranges.h" +#include "update_engine/payload_generator/extent_utils.h" namespace chromeos_update_engine { + std::vector ConvertToCowOperations( const ::google::protobuf::RepeatedPtrField< ::chromeos_update_engine::InstallOperation>& operations, const ::google::protobuf::RepeatedPtrField& merge_operations) { - // TODO(zhangkelvin) Implement this. - return {}; + ExtentRanges merge_extents; + std::vector converted; + + // We want all CowCopy ops to be done first, before any COW_REPLACE happen. + // Therefore we add these ops in 2 separate loops. This is because during + // merge, a CowReplace might modify a block needed by CowCopy, so we always + // perform CowCopy first. + + // This loop handles CowCopy blocks within SOURCE_COPY, and the next loop + // converts the leftover blocks to CowReplace? + for (const auto& merge_op : merge_operations) { + merge_extents.AddExtent(merge_op.dst_extent()); + const auto& src_extent = merge_op.src_extent(); + const auto& dst_extent = merge_op.dst_extent(); + for (uint64_t i = 0; i < src_extent.num_blocks(); i++) { + converted.push_back({CowOperation::CowCopy, + src_extent.start_block() + i, + dst_extent.start_block() + i}); + } + } + // COW_REPLACE are added after COW_COPY, because replace might modify blocks + // needed by COW_COPY. Please don't merge this loop with the previous one. + for (const auto& operation : operations) { + if (operation.type() != InstallOperation::SOURCE_COPY) { + continue; + } + const auto& src_extents = operation.src_extents(); + const auto& dst_extents = operation.dst_extents(); + BlockIterator it1{src_extents}; + BlockIterator it2{dst_extents}; + while (!it1.is_end() && !it2.is_end()) { + auto src_block = *it1; + auto dst_block = *it2; + if (!merge_extents.ContainsBlock(dst_block)) { + converted.push_back({CowOperation::CowReplace, src_block, dst_block}); + } + ++it1; + ++it2; + } + } + return converted; } } // namespace chromeos_update_engine diff --git a/common/cow_operation_convert.h b/common/cow_operation_convert.h index bca10ac2..c0543f7b 100644 --- a/common/cow_operation_convert.h +++ b/common/cow_operation_convert.h @@ -13,6 +13,7 @@ // See the License for the specific language governing permissions and // limitations under the License. // + #ifndef __COW_OPERATION_CONVERT_H #define __COW_OPERATION_CONVERT_H diff --git a/common/cow_operation_convert_unittest.cc b/common/cow_operation_convert_unittest.cc new file mode 100644 index 00000000..b70dcdfa --- /dev/null +++ b/common/cow_operation_convert_unittest.cc @@ -0,0 +1,220 @@ +// +// Copyright (C) 2020 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include +#include +#include + +#include + +#include "update_engine/common/cow_operation_convert.h" +#include "update_engine/payload_generator/extent_ranges.h" +#include "update_engine/update_metadata.pb.h" + +namespace chromeos_update_engine { +using OperationList = ::google::protobuf::RepeatedPtrField< + ::chromeos_update_engine::InstallOperation>; +using MergeOplist = ::google::protobuf::RepeatedPtrField< + ::chromeos_update_engine::CowMergeOperation>; + +std::ostream& operator<<(std::ostream& out, CowOperation::Type op) { + switch (op) { + case CowOperation::Type::CowCopy: + out << "CowCopy"; + break; + case CowOperation::Type::CowReplace: + out << "CowReplace"; + break; + default: + out << op; + break; + } + return out; +} + +std::ostream& operator<<(std::ostream& out, const CowOperation& c) { + out << "{" << c.op << ", " << c.src_block << ", " << c.dst_block << "}"; + return out; +} + +class CowOperationConvertTest : public testing::Test { + public: + void VerifyCowMergeOp(const std::vector& cow_ops) { + // Build a set of all extents covered by InstallOps. + ExtentRanges src_extent_set; + ExtentRanges dst_extent_set; + for (auto&& op : operations_) { + src_extent_set.AddRepeatedExtents(op.src_extents()); + dst_extent_set.AddRepeatedExtents(op.dst_extents()); + } + ExtentRanges modified_extents; + for (auto&& cow_op : cow_ops) { + if (cow_op.op == CowOperation::CowCopy) { + EXPECT_TRUE(src_extent_set.ContainsBlock(cow_op.src_block)); + // converted operations should be conflict free. + EXPECT_FALSE(modified_extents.ContainsBlock(cow_op.src_block)) + << "SOURCE_COPY operation " << cow_op + << " read from a modified block"; + src_extent_set.SubtractExtent(ExtentForRange(cow_op.src_block, 1)); + } + EXPECT_TRUE(dst_extent_set.ContainsBlock(cow_op.dst_block)); + dst_extent_set.SubtractExtent(ExtentForRange(cow_op.dst_block, 1)); + modified_extents.AddBlock(cow_op.dst_block); + } + // The generated CowOps should cover all extents in InstallOps. + EXPECT_EQ(dst_extent_set.blocks(), 0UL); + // It's possible that src_extent_set is non-empty, because some operations + // will be converted to CowReplace, and we don't count the source extent for + // those. + } + OperationList operations_; + MergeOplist merge_operations_; +}; + +void AddOperation(OperationList* operations, + ::chromeos_update_engine::InstallOperation_Type op_type, + std::initializer_list> src_extents, + std::initializer_list> dst_extents) { + auto&& op = operations->Add(); + op->set_type(op_type); + for (const auto& extent : src_extents) { + *op->add_src_extents() = ExtentForRange(extent[0], extent[1]); + } + for (const auto& extent : dst_extents) { + *op->add_dst_extents() = ExtentForRange(extent[0], extent[1]); + } +} + +void AddMergeOperation(MergeOplist* operations, + ::chromeos_update_engine::CowMergeOperation_Type op_type, + std::array src_extent, + std::array dst_extent) { + auto&& op = operations->Add(); + op->set_type(op_type); + *op->mutable_src_extent() = ExtentForRange(src_extent[0], src_extent[1]); + *op->mutable_dst_extent() = ExtentForRange(dst_extent[0], dst_extent[1]); +} + +TEST_F(CowOperationConvertTest, NoConflict) { + AddOperation( + &operations_, InstallOperation::SOURCE_COPY, {{20, 1}}, {{30, 1}}); + AddOperation( + &operations_, InstallOperation::SOURCE_COPY, {{10, 1}}, {{20, 1}}); + AddOperation( + &operations_, InstallOperation::SOURCE_COPY, {{0, 1}}, {{10, 1}}); + + AddMergeOperation( + &merge_operations_, CowMergeOperation::COW_COPY, {20, 1}, {30, 1}); + AddMergeOperation( + &merge_operations_, CowMergeOperation::COW_COPY, {10, 1}, {20, 1}); + AddMergeOperation( + &merge_operations_, CowMergeOperation::COW_COPY, {0, 1}, {10, 1}); + + auto cow_ops = ConvertToCowOperations(operations_, merge_operations_); + ASSERT_EQ(cow_ops.size(), 3UL); + ASSERT_TRUE(std::all_of(cow_ops.begin(), cow_ops.end(), [](auto&& cow_op) { + return cow_op.op == CowOperation::CowCopy; + })); + VerifyCowMergeOp(cow_ops); +} + +TEST_F(CowOperationConvertTest, CowReplace) { + AddOperation( + &operations_, InstallOperation::SOURCE_COPY, {{30, 1}}, {{0, 1}}); + AddOperation( + &operations_, InstallOperation::SOURCE_COPY, {{20, 1}}, {{30, 1}}); + AddOperation( + &operations_, InstallOperation::SOURCE_COPY, {{10, 1}}, {{20, 1}}); + AddOperation( + &operations_, InstallOperation::SOURCE_COPY, {{0, 1}}, {{10, 1}}); + + AddMergeOperation( + &merge_operations_, CowMergeOperation::COW_COPY, {20, 1}, {30, 1}); + AddMergeOperation( + &merge_operations_, CowMergeOperation::COW_COPY, {10, 1}, {20, 1}); + AddMergeOperation( + &merge_operations_, CowMergeOperation::COW_COPY, {0, 1}, {10, 1}); + + auto cow_ops = ConvertToCowOperations(operations_, merge_operations_); + ASSERT_EQ(cow_ops.size(), 4UL); + // Expect 3 COW_COPY and 1 COW_REPLACE + ASSERT_EQ(std::count_if(cow_ops.begin(), + cow_ops.end(), + [](auto&& cow_op) { + return cow_op.op == CowOperation::CowCopy; + }), + 3); + ASSERT_EQ(std::count_if(cow_ops.begin(), + cow_ops.end(), + [](auto&& cow_op) { + return cow_op.op == CowOperation::CowReplace; + }), + 1); + VerifyCowMergeOp(cow_ops); +} + +TEST_F(CowOperationConvertTest, ReOrderSourceCopy) { + AddOperation( + &operations_, InstallOperation::SOURCE_COPY, {{30, 1}}, {{20, 1}}); + AddOperation( + &operations_, InstallOperation::SOURCE_COPY, {{20, 1}}, {{10, 1}}); + AddOperation( + &operations_, InstallOperation::SOURCE_COPY, {{10, 1}}, {{0, 1}}); + + AddMergeOperation( + &merge_operations_, CowMergeOperation::COW_COPY, {10, 1}, {0, 1}); + AddMergeOperation( + &merge_operations_, CowMergeOperation::COW_COPY, {20, 1}, {10, 1}); + AddMergeOperation( + &merge_operations_, CowMergeOperation::COW_COPY, {30, 1}, {20, 1}); + + auto cow_ops = ConvertToCowOperations(operations_, merge_operations_); + ASSERT_EQ(cow_ops.size(), 3UL); + // Expect 3 COW_COPY + ASSERT_TRUE(std::all_of(cow_ops.begin(), cow_ops.end(), [](auto&& cow_op) { + return cow_op.op == CowOperation::CowCopy; + })); + VerifyCowMergeOp(cow_ops); +} + +TEST_F(CowOperationConvertTest, InterleavingSrcExtent) { + AddOperation(&operations_, + InstallOperation::SOURCE_COPY, + {{30, 5}, {35, 5}}, + {{20, 10}}); + AddOperation( + &operations_, InstallOperation::SOURCE_COPY, {{20, 1}}, {{10, 1}}); + AddOperation( + &operations_, InstallOperation::SOURCE_COPY, {{10, 1}}, {{0, 1}}); + + AddMergeOperation( + &merge_operations_, CowMergeOperation::COW_COPY, {10, 1}, {0, 1}); + AddMergeOperation( + &merge_operations_, CowMergeOperation::COW_COPY, {20, 1}, {10, 1}); + AddMergeOperation( + &merge_operations_, CowMergeOperation::COW_COPY, {30, 5}, {20, 5}); + AddMergeOperation( + &merge_operations_, CowMergeOperation::COW_COPY, {35, 5}, {25, 5}); + + auto cow_ops = ConvertToCowOperations(operations_, merge_operations_); + // Expect 4 COW_COPY + ASSERT_EQ(cow_ops.size(), 12UL); + ASSERT_TRUE(std::all_of(cow_ops.begin(), cow_ops.end(), [](auto&& cow_op) { + return cow_op.op == CowOperation::CowCopy; + })); + VerifyCowMergeOp(cow_ops); +} +} // namespace chromeos_update_engine diff --git a/payload_generator/extent_utils.h b/payload_generator/extent_utils.h index 9763b1f1..f870b29a 100644 --- a/payload_generator/extent_utils.h +++ b/payload_generator/extent_utils.h @@ -20,6 +20,8 @@ #include #include +#include + #include "update_engine/payload_consumer/payload_constants.h" #include "update_engine/update_metadata.pb.h" @@ -83,6 +85,43 @@ std::vector ExtentsSublist(const std::vector& extents, bool operator==(const Extent& a, const Extent& b); +// TODO(zhangkelvin) This is ugly. Rewrite using C++20's coroutine once +// that's available. Unfortunately with C++17 this is the best I could do. + +// An iterator that takes a sequence of extents, and iterate over blocks +// inside this sequence of extents. +// Example usage: + +// BlockIterator it1{src_extents}; +// while(!it1.is_end()) { +// auto block = *it1; +// Do stuff with |block| +// } +struct BlockIterator { + explicit BlockIterator( + const google::protobuf::RepeatedPtrField& src_extents) + : src_extents_(src_extents) {} + + BlockIterator& operator++() { + CHECK_LT(cur_extent_, src_extents_.size()); + block_offset_++; + if (block_offset_ >= src_extents_[cur_extent_].num_blocks()) { + cur_extent_++; + block_offset_ = 0; + } + return *this; + } + + [[nodiscard]] bool is_end() { return cur_extent_ >= src_extents_.size(); } + [[nodiscard]] uint64_t operator*() { + return src_extents_[cur_extent_].start_block() + block_offset_; + } + + const google::protobuf::RepeatedPtrField& src_extents_; + int cur_extent_ = 0; + size_t block_offset_ = 0; +}; + } // namespace chromeos_update_engine #endif // UPDATE_ENGINE_PAYLOAD_GENERATOR_EXTENT_UTILS_H_ From b78e1bceba7a70d4ce38452547b123650d18484b Mon Sep 17 00:00:00 2001 From: Qijiang Fan Date: Tue, 27 Oct 2020 16:31:39 +0900 Subject: [PATCH 428/624] update_engine: use unversioned libchrome libraray. Libchrome is changing pkg-config and .so library name from xxxx-${libbase_ver}.{pc,so} to xxx.{pc,so}. Libchrome uprev rebuild will be handled by ebuild subslot change. And there's no need to install multiple libchrome. Keeping libbase_ver suffix will cause troubles on linking libraries when developers wants to test locally emerge a-single-package without build_packages, after a libchrome uprev. Especially in a case a package depends on another package(library), and both depends on libchrome. Thus, removing libbase_ver from suffix. BUG=chromium:920513 TEST=CQ Change-Id: I65cff884c1f852f51b2a7e2dfc258cac340b2747 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2500818 Reviewed-by: Amin Hassani Commit-Queue: Qijiang Fan Tested-by: Qijiang Fan --- BUILD.gn | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/BUILD.gn b/BUILD.gn index b1719e20..8f065133 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -95,7 +95,7 @@ pkg_config("target_defaults") { # NOSORT pkg_deps = [ "libbrillo", - "libchrome-${libbase_ver}", + "libchrome", # system_api depends on protobuf (or protobuf-lite). It must appear # before protobuf here or the linker flags won't be in the right @@ -562,7 +562,7 @@ if (use.test) { ] pkg_deps = [ "libbrillo-test", - "libchrome-test-${libbase_ver}", + "libchrome-test", "libdebugd-client-test", "libpower_manager-client-test", "libsession_manager-client-test", @@ -586,7 +586,7 @@ if (use.fuzzer) { ] pkg_deps = [ "libbrillo-test", - "libchrome-test-${libbase_ver}", + "libchrome-test", ] deps = [ ":libupdate_engine", @@ -601,7 +601,7 @@ if (use.fuzzer) { ] pkg_deps = [ "libbrillo-test", - "libchrome-test-${libbase_ver}", + "libchrome-test", ] deps = [ ":libupdate_engine", From e8153631e85189ab578aa04b556801ee80d6d686 Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Tue, 27 Oct 2020 15:11:28 -0700 Subject: [PATCH 429/624] update_engine: Make policy UpdateCheckParams processing easier Currently there are a ton of arguments from UpdateCheckParams that is passed around in the udpate_attampter.cc. With this CL UpdateCheckParams is directy passed and this simplies the logic. BUG=b:171829801 TEST=cros_workon_make --board reef --test update_enigne Change-Id: If454f6393fc6e28d41fa5d14d184f0db32e8bd19 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2504453 Commit-Queue: Amin Hassani Tested-by: Amin Hassani Reviewed-by: Jae Hoon Kim --- mock_update_attempter.h | 12 +- omaha_request_params.cc | 56 +++++- omaha_request_params.h | 3 +- omaha_request_params_unittest.cc | 30 +-- payload_state_unittest.cc | 6 +- real_system_state.cc | 2 +- update_attempter.cc | 102 ++-------- update_attempter.h | 32 +--- update_attempter_unittest.cc | 179 ++++-------------- update_manager/policy.h | 11 +- .../real_updater_provider_unittest.cc | 8 +- 11 files changed, 134 insertions(+), 307 deletions(-) diff --git a/mock_update_attempter.h b/mock_update_attempter.h index d502222a..96d93fd5 100644 --- a/mock_update_attempter.h +++ b/mock_update_attempter.h @@ -32,17 +32,7 @@ class MockUpdateAttempter : public UpdateAttempter { MOCK_METHOD(void, Update, - (const std::string& app_version, - const std::string& omaha_url, - const std::string& target_channel, - const std::string& lts_tag, - const std::string& target_version_prefix, - bool rollback_allowed, - bool rollback_data_save_requested, - int rollback_allowed_milestones, - bool rollback_on_channel_downgrade, - bool obey_proxies, - bool interactive), + (const chromeos_update_manager::UpdateCheckParams& params), (override)); MOCK_METHOD1(GetStatus, bool(update_engine::UpdateEngineStatus* out_status)); diff --git a/omaha_request_params.cc b/omaha_request_params.cc index 8a2e3dcb..79d19e8f 100644 --- a/omaha_request_params.cc +++ b/omaha_request_params.cc @@ -37,9 +37,11 @@ #include "update_engine/common/platform_constants.h" #include "update_engine/common/utils.h" #include "update_engine/system_state.h" +#include "update_engine/update_manager/policy.h" #define CALL_MEMBER_FN(object, member) ((object).*(member)) +using chromeos_update_manager::UpdateCheckParams; using std::string; namespace chromeos_update_engine { @@ -59,9 +61,9 @@ OmahaRequestParams::~OmahaRequestParams() { test::SetImagePropertiesRootPrefix(nullptr); } -bool OmahaRequestParams::Init(const string& in_app_version, - const string& in_update_url, - bool in_interactive) { +bool OmahaRequestParams::Init(const string& app_version, + const string& update_url, + const UpdateCheckParams& params) { LOG(INFO) << "Initializing parameters for this update attempt"; image_props_ = LoadImageProperties(system_state_); mutable_image_props_ = LoadMutableImageProperties(system_state_); @@ -77,15 +79,15 @@ bool OmahaRequestParams::Init(const string& in_app_version, os_platform_ = constants::kOmahaPlatformName; if (!image_props_.system_version.empty()) { - if (in_app_version == "ForcedUpdate") { - image_props_.system_version = in_app_version; + if (app_version == "ForcedUpdate") { + image_props_.system_version = app_version; } os_version_ = image_props_.system_version; } else { os_version_ = OmahaRequestParams::kOsVersion; } - if (!in_app_version.empty()) - image_props_.version = in_app_version; + if (!app_version.empty()) + image_props_.version = app_version; os_sp_ = image_props_.version + "_" + GetMachineType(); app_lang_ = "en-US"; @@ -115,17 +117,51 @@ bool OmahaRequestParams::Init(const string& in_app_version, delta_okay_ = false; } - if (in_update_url.empty()) + if (update_url.empty()) update_url_ = image_props_.omaha_url; else - update_url_ = in_update_url; + update_url_ = update_url; // Set the interactive flag accordingly. - interactive_ = in_interactive; + interactive_ = params.interactive; dlc_apps_params_.clear(); // Set false so it will do update by default. is_install_ = false; + + target_version_prefix_ = params.target_version_prefix; + + lts_tag_ = params.lts_tag; + + rollback_allowed_ = params.rollback_allowed; + + // Set whether saving data over rollback is requested. + rollback_data_save_requested_ = params.rollback_data_save_requested; + + // Set how many milestones of rollback are allowed. + rollback_allowed_milestones_ = params.rollback_allowed_milestones; + + // Set the target channel, if one was provided. + if (params.target_channel.empty()) { + LOG(INFO) << "No target channel mandated by policy."; + } else { + LOG(INFO) << "Setting target channel as mandated: " + << params.target_channel; + string error_message; + if (!SetTargetChannel(params.target_channel, + params.rollback_on_channel_downgrade, + &error_message)) { + LOG(ERROR) << "Setting the channel failed: " << error_message; + } + + // Since this is the beginning of a new attempt, update the download + // channel. The download channel won't be updated until the next attempt, + // even if target channel changes meanwhile, so that how we'll know if we + // should cancel the current download attempt if there's such a change in + // target channel. + UpdateDownloadChannel(); + } + return true; } diff --git a/omaha_request_params.h b/omaha_request_params.h index 1e9ab7d6..7e192620 100644 --- a/omaha_request_params.h +++ b/omaha_request_params.h @@ -30,6 +30,7 @@ #include "update_engine/common/constants.h" #include "update_engine/common/platform_constants.h" #include "update_engine/image_properties.h" +#include "update_engine/update_manager/policy.h" // This gathers local system information and prepares info used by the // Omaha request action. @@ -249,7 +250,7 @@ class OmahaRequestParams { // of the parameter. Returns true on success, false otherwise. bool Init(const std::string& in_app_version, const std::string& in_update_url, - bool in_interactive); + const chromeos_update_manager::UpdateCheckParams& params); // Permanently changes the release channel to |channel|. Performs a // powerwash, if required and allowed. diff --git a/omaha_request_params_unittest.cc b/omaha_request_params_unittest.cc index 110fb2bd..140cad3c 100644 --- a/omaha_request_params_unittest.cc +++ b/omaha_request_params_unittest.cc @@ -75,36 +75,36 @@ string GetMachineType() { } // namespace TEST_F(OmahaRequestParamsTest, MissingChannelTest) { - EXPECT_TRUE(params_.Init("", "", false)); + EXPECT_TRUE(params_.Init("", "", {})); // By default, if no channel is set, we should track the stable-channel. EXPECT_EQ("stable-channel", params_.target_channel()); } TEST_F(OmahaRequestParamsTest, ForceVersionTest) { - EXPECT_TRUE(params_.Init("ForcedVersion", "", false)); + EXPECT_TRUE(params_.Init("ForcedVersion", "", {})); EXPECT_EQ(string("ForcedVersion_") + GetMachineType(), params_.os_sp()); EXPECT_EQ("ForcedVersion", params_.app_version()); } TEST_F(OmahaRequestParamsTest, ForcedURLTest) { - EXPECT_TRUE(params_.Init("", "http://forced.google.com", false)); + EXPECT_TRUE(params_.Init("", "http://forced.google.com", {})); EXPECT_EQ("http://forced.google.com", params_.update_url()); } TEST_F(OmahaRequestParamsTest, MissingURLTest) { - EXPECT_TRUE(params_.Init("", "", false)); + EXPECT_TRUE(params_.Init("", "", {})); EXPECT_EQ(constants::kOmahaDefaultProductionURL, params_.update_url()); } TEST_F(OmahaRequestParamsTest, DeltaOKTest) { - EXPECT_TRUE(params_.Init("", "", false)); + EXPECT_TRUE(params_.Init("", "", {})); EXPECT_TRUE(params_.delta_okay()); } TEST_F(OmahaRequestParamsTest, NoDeltasTest) { ASSERT_TRUE( WriteFileString(tempdir_.GetPath().Append(".nodelta").value(), "")); - EXPECT_TRUE(params_.Init("", "", false)); + EXPECT_TRUE(params_.Init("", "", {})); EXPECT_FALSE(params_.delta_okay()); } @@ -112,12 +112,12 @@ TEST_F(OmahaRequestParamsTest, SetTargetChannelTest) { { OmahaRequestParams params(&fake_system_state_); params.set_root(tempdir_.GetPath().value()); - EXPECT_TRUE(params.Init("", "", false)); + EXPECT_TRUE(params.Init("", "", {})); EXPECT_TRUE(params.SetTargetChannel("canary-channel", false, nullptr)); EXPECT_FALSE(params.mutable_image_props_.is_powerwash_allowed); } params_.set_root(tempdir_.GetPath().value()); - EXPECT_TRUE(params_.Init("", "", false)); + EXPECT_TRUE(params_.Init("", "", {})); EXPECT_EQ("canary-channel", params_.target_channel()); EXPECT_FALSE(params_.mutable_image_props_.is_powerwash_allowed); } @@ -126,12 +126,12 @@ TEST_F(OmahaRequestParamsTest, SetIsPowerwashAllowedTest) { { OmahaRequestParams params(&fake_system_state_); params.set_root(tempdir_.GetPath().value()); - EXPECT_TRUE(params.Init("", "", false)); + EXPECT_TRUE(params.Init("", "", {})); EXPECT_TRUE(params.SetTargetChannel("canary-channel", true, nullptr)); EXPECT_TRUE(params.mutable_image_props_.is_powerwash_allowed); } params_.set_root(tempdir_.GetPath().value()); - EXPECT_TRUE(params_.Init("", "", false)); + EXPECT_TRUE(params_.Init("", "", {})); EXPECT_EQ("canary-channel", params_.target_channel()); EXPECT_TRUE(params_.mutable_image_props_.is_powerwash_allowed); } @@ -141,7 +141,7 @@ TEST_F(OmahaRequestParamsTest, SetTargetChannelInvalidTest) { OmahaRequestParams params(&fake_system_state_); params.set_root(tempdir_.GetPath().value()); SetLockDown(true); - EXPECT_TRUE(params.Init("", "", false)); + EXPECT_TRUE(params.Init("", "", {})); params.image_props_.allow_arbitrary_channels = false; string error_message; EXPECT_FALSE( @@ -151,7 +151,7 @@ TEST_F(OmahaRequestParamsTest, SetTargetChannelInvalidTest) { EXPECT_FALSE(params.mutable_image_props_.is_powerwash_allowed); } params_.set_root(tempdir_.GetPath().value()); - EXPECT_TRUE(params_.Init("", "", false)); + EXPECT_TRUE(params_.Init("", "", {})); EXPECT_EQ("stable-channel", params_.target_channel()); EXPECT_FALSE(params_.mutable_image_props_.is_powerwash_allowed); } @@ -197,7 +197,7 @@ TEST_F(OmahaRequestParamsTest, SetTargetChannelWorks) { // When set to a valid value while a change is already pending, it should // succeed. - params_.Init("", "", false); + params_.Init("", "", {}); EXPECT_TRUE(params_.SetTargetChannel("beta-channel", true, nullptr)); // The target channel should reflect the change, but the download channel // should continue to retain the old value ... @@ -237,7 +237,7 @@ TEST_F(OmahaRequestParamsTest, ToMoreStableChannelFlagTest) { } TEST_F(OmahaRequestParamsTest, TargetChannelHintTest) { - EXPECT_TRUE(params_.Init("", "", false)); + EXPECT_TRUE(params_.Init("", "", {})); const string kHint("foo-hint"); params_.set_lts_tag(kHint); EXPECT_EQ(kHint, params_.lts_tag()); @@ -266,7 +266,7 @@ TEST_F(OmahaRequestParamsTest, CollectECFWVersionsTest) { } TEST_F(OmahaRequestParamsTest, RequisitionIsSetTest) { - EXPECT_TRUE(params_.Init("", "", false)); + EXPECT_TRUE(params_.Init("", "", {})); EXPECT_EQ("fake_requisition", params_.device_requisition()); } } // namespace chromeos_update_engine diff --git a/payload_state_unittest.cc b/payload_state_unittest.cc index 8667548f..2d571c12 100644 --- a/payload_state_unittest.cc +++ b/payload_state_unittest.cc @@ -630,7 +630,7 @@ TEST(PayloadStateTest, NoBackoffInteractiveChecks) { PayloadState payload_state; FakeSystemState fake_system_state; OmahaRequestParams params(&fake_system_state); - params.Init("", "", true); // interactive = True. + params.Init("", "", {.interactive = true}); fake_system_state.set_request_params(¶ms); EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); @@ -653,7 +653,7 @@ TEST(PayloadStateTest, NoBackoffForP2PUpdates) { PayloadState payload_state; FakeSystemState fake_system_state; OmahaRequestParams params(&fake_system_state); - params.Init("", "", false); // interactive = False. + params.Init("", "", {}); fake_system_state.set_request_params(¶ms); EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); @@ -1019,7 +1019,7 @@ TEST(PayloadStateTest, RollbackVersion) { // Mock out the os version and make sure it's excluded correctly. string rollback_version = "2345.0.0"; OmahaRequestParams params(&fake_system_state); - params.Init(rollback_version, "", false); + params.Init(rollback_version, "", {}); fake_system_state.set_request_params(¶ms); EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); diff --git a/real_system_state.cc b/real_system_state.cc index 74a37f35..924271ed 100644 --- a/real_system_state.cc +++ b/real_system_state.cc @@ -138,7 +138,7 @@ bool RealSystemState::Initialize() { // will be re-initialized before every request using the actual request // options. This initialization here pre-loads current channel and version, so // the DBus service can access it. - if (!request_params_.Init("", "", false)) { + if (!request_params_.Init("", "", {})) { LOG(WARNING) << "Ignoring OmahaRequestParams initialization error. Some " "features might not work properly."; } diff --git a/update_attempter.cc b/update_attempter.cc index 14d5837d..38b0f828 100644 --- a/update_attempter.cc +++ b/update_attempter.cc @@ -244,17 +244,7 @@ void UpdateAttempter::ReportOSAge() { system_state_->metrics_reporter()->ReportDailyMetrics(age); } -void UpdateAttempter::Update(const string& app_version, - const string& omaha_url, - const string& target_channel, - const string& lts_tag, - const string& target_version_prefix, - bool rollback_allowed, - bool rollback_data_save_requested, - int rollback_allowed_milestones, - bool rollback_on_channel_downgrade, - bool obey_proxies, - bool interactive) { +void UpdateAttempter::Update(const UpdateCheckParams& params) { // This is normally called frequently enough so it's appropriate to use as a // hook for reporting daily metrics. // TODO(garnold) This should be hooked to a separate (reliable and consistent) @@ -283,21 +273,11 @@ void UpdateAttempter::Update(const string& app_version, return; } - if (!CalculateUpdateParams(app_version, - omaha_url, - target_channel, - lts_tag, - target_version_prefix, - rollback_allowed, - rollback_data_save_requested, - rollback_allowed_milestones, - rollback_on_channel_downgrade, - obey_proxies, - interactive)) { + if (!CalculateUpdateParams(params)) { return; } - BuildUpdateActions(interactive); + BuildUpdateActions(params.interactive); SetStatusAndNotify(UpdateStatus::CHECKING_FOR_UPDATE); @@ -360,17 +340,7 @@ void UpdateAttempter::CalculateP2PParams(bool interactive) { payload_state->SetUsingP2PForSharing(use_p2p_for_sharing); } -bool UpdateAttempter::CalculateUpdateParams(const string& app_version, - const string& omaha_url, - const string& target_channel, - const string& lts_tag, - const string& target_version_prefix, - bool rollback_allowed, - bool rollback_data_save_requested, - int rollback_allowed_milestones, - bool rollback_on_channel_downgrade, - bool obey_proxies, - bool interactive) { +bool UpdateAttempter::CalculateUpdateParams(const UpdateCheckParams& params) { http_response_code_ = 0; PayloadStateInterface* const payload_state = system_state_->payload_state(); @@ -381,30 +351,13 @@ bool UpdateAttempter::CalculateUpdateParams(const string& app_version, // policy is available again. UpdateRollbackHappened(); - // Update the target version prefix. - omaha_request_params_->set_target_version_prefix(target_version_prefix); - - // Update the LTS support. - omaha_request_params_->set_lts_tag(lts_tag); - - // Set whether rollback is allowed. - omaha_request_params_->set_rollback_allowed(rollback_allowed); - - // Set whether saving data over rollback is requested. - omaha_request_params_->set_rollback_data_save_requested( - rollback_data_save_requested); - - CalculateStagingParams(interactive); + CalculateStagingParams(params.interactive); // If staging_wait_time_ wasn't set, staging is off, use scattering instead. if (staging_wait_time_.InSeconds() == 0) { - CalculateScatteringParams(interactive); + CalculateScatteringParams(params.interactive); } - // Set how many milestones of rollback are allowed. - omaha_request_params_->set_rollback_allowed_milestones( - rollback_allowed_milestones); - - CalculateP2PParams(interactive); + CalculateP2PParams(params.interactive); if (payload_state->GetUsingP2PForDownloading() || payload_state->GetUsingP2PForSharing()) { // OK, p2p is to be used - start it and perform housekeeping. @@ -417,30 +370,12 @@ bool UpdateAttempter::CalculateUpdateParams(const string& app_version, } } - if (!omaha_request_params_->Init(app_version, omaha_url, interactive)) { + if (!omaha_request_params_->Init( + forced_app_version_, forced_omaha_url_, params)) { LOG(ERROR) << "Unable to initialize Omaha request params."; return false; } - // Set the target channel, if one was provided. - if (target_channel.empty()) { - LOG(INFO) << "No target channel mandated by policy."; - } else { - LOG(INFO) << "Setting target channel as mandated: " << target_channel; - string error_message; - if (!omaha_request_params_->SetTargetChannel( - target_channel, rollback_on_channel_downgrade, &error_message)) { - LOG(ERROR) << "Setting the channel failed: " << error_message; - } - - // Since this is the beginning of a new attempt, update the download - // channel. The download channel won't be updated until the next attempt, - // even if target channel changes meanwhile, so that how we'll know if we - // should cancel the current download attempt if there's such a change in - // target channel. - omaha_request_params_->UpdateDownloadChannel(); - } - // The function |CalculateDlcParams| makes use of the function |GetAppId| from // |OmahaRequestParams|, so to ensure that the return from |GetAppId| // doesn't change, no changes to the values |download_channel_|, @@ -448,8 +383,6 @@ bool UpdateAttempter::CalculateUpdateParams(const string& app_version, // |omaha_request_params_| shall be made below this line. CalculateDlcParams(); - omaha_request_params_->set_is_install(is_install_); - // Set Quick Fix Build token if policy is set and the device is enterprise // enrolled. string token; @@ -480,7 +413,7 @@ bool UpdateAttempter::CalculateUpdateParams(const string& app_version, << payload_state->GetUsingP2PForSharing(); obeying_proxies_ = true; - if (obey_proxies || proxy_manual_checks_ == 0) { + if (proxy_manual_checks_ == 0) { LOG(INFO) << "forced to obey proxies"; // If forced to obey proxies, every 20th request will not use proxies proxy_manual_checks_++; @@ -767,6 +700,7 @@ void UpdateAttempter::CalculateDlcParams() { dlc_apps_params[omaha_request_params_->GetDlcAppId(dlc_id)] = dlc_params; } omaha_request_params_->set_dlc_apps_params(dlc_apps_params); + omaha_request_params_->set_is_install(is_install_); } void UpdateAttempter::BuildUpdateActions(bool interactive) { @@ -881,7 +815,7 @@ bool UpdateAttempter::Rollback(bool powerwash) { processor_->set_delegate(this); // Initialize the default request params. - if (!omaha_request_params_->Init("", "", true)) { + if (!omaha_request_params_->Init("", "", {.interactive = true})) { LOG(ERROR) << "Unable to initialize Omaha request params."; return false; } @@ -1107,17 +1041,7 @@ void UpdateAttempter::OnUpdateScheduled(EvalStatus status, LOG(INFO) << "Update attempt flags in use = 0x" << std::hex << current_update_attempt_flags_; - Update(forced_app_version_, - forced_omaha_url_, - params.target_channel, - params.lts_tag, - params.target_version_prefix, - params.rollback_allowed, - params.rollback_data_save_requested, - params.rollback_allowed_milestones, - params.rollback_on_channel_downgrade, - /*obey_proxies=*/false, - params.interactive); + Update(params); // Always clear the forced app_version and omaha_url after an update attempt // so the next update uses the defaults. forced_app_version_.clear(); diff --git a/update_attempter.h b/update_attempter.h index 6c931509..3a1bef48 100644 --- a/update_attempter.h +++ b/update_attempter.h @@ -76,23 +76,8 @@ class UpdateAttempter : public ActionProcessorDelegate, virtual bool ScheduleUpdates(); // Checks for update and, if a newer version is available, attempts to update - // the system. Non-empty |in_app_version| or |in_update_url| prevents - // automatic detection of the parameter. |target_channel| denotes a - // policy-mandated channel we are updating to, if not empty. If |obey_proxies| - // is true, the update will likely respect Chrome's proxy setting. For - // security reasons, we may still not honor them. |interactive| should be true - // if this was called from the user (ie dbus). - virtual void Update(const std::string& app_version, - const std::string& omaha_url, - const std::string& target_channel, - const std::string& lts_tag, - const std::string& target_version_prefix, - bool rollback_allowed, - bool rollback_data_save_requested, - int rollback_allowed_milestones, - bool rollback_on_channel_downgrade, - bool obey_proxies, - bool interactive); + // the system. + virtual void Update(const chromeos_update_manager::UpdateCheckParams& params); // ActionProcessorDelegate methods: void ProcessingDone(const ActionProcessor* processor, @@ -371,17 +356,8 @@ class UpdateAttempter : public ActionProcessorDelegate, // Helper method of Update() to calculate the update-related parameters // from various sources and set the appropriate state. Please refer to // Update() method for the meaning of the parameters. - bool CalculateUpdateParams(const std::string& app_version, - const std::string& omaha_url, - const std::string& target_channel, - const std::string& lts_tag, - const std::string& target_version_prefix, - bool rollback_allowed, - bool rollback_data_save_requested, - int rollback_allowed_milestones, - bool rollback_on_channel_downgrade, - bool obey_proxies, - bool interactive); + bool CalculateUpdateParams( + const chromeos_update_manager::UpdateCheckParams& params); // Calculates all the scattering related parameters (such as waiting period, // which type of scattering is enabled, etc.) and also updates/deletes diff --git a/update_attempter_unittest.cc b/update_attempter_unittest.cc index 60267f05..767bb824 100644 --- a/update_attempter_unittest.cc +++ b/update_attempter_unittest.cc @@ -172,30 +172,10 @@ class UpdateAttempterUnderTest : public UpdateAttempter { explicit UpdateAttempterUnderTest(SystemState* system_state) : UpdateAttempter(system_state, nullptr) {} - void Update(const std::string& app_version, - const std::string& omaha_url, - const std::string& target_channel, - const std::string& lts_tag, - const std::string& target_version_prefix, - bool rollback_allowed, - bool rollback_data_save_requested, - int rollback_allowed_milestones, - bool rollback_on_channel_downgrade, - bool obey_proxies, - bool interactive) override { + void Update(const UpdateCheckParams& params) override { update_called_ = true; if (do_update_) { - UpdateAttempter::Update(app_version, - omaha_url, - target_channel, - lts_tag, - target_version_prefix, - rollback_allowed, - rollback_data_save_requested, - rollback_allowed_milestones, - rollback_on_channel_downgrade, - obey_proxies, - interactive); + UpdateAttempter::Update(params); return; } LOG(INFO) << "[TEST] Update() disabled."; @@ -430,7 +410,7 @@ void UpdateAttempterTest::ScheduleQuitMainLoop() { void UpdateAttempterTest::SessionIdTestChange() { EXPECT_NE(UpdateStatus::UPDATED_NEED_REBOOT, attempter_.status()); const auto old_session_id = attempter_.session_id_; - attempter_.Update("", "", "", "", "", false, false, 0, false, false, false); + attempter_.Update({}); EXPECT_NE(old_session_id, attempter_.session_id_); ScheduleQuitMainLoop(); } @@ -801,7 +781,7 @@ void UpdateAttempterTest::UpdateTestStart() { EXPECT_CALL(*processor_, StartProcessing()); } - attempter_.Update("", "", "", "", "", false, false, 0, false, false, false); + attempter_.Update({}); loop_.PostTask(FROM_HERE, base::Bind(&UpdateAttempterTest::UpdateTestVerify, base::Unretained(this))); @@ -1001,7 +981,7 @@ void UpdateAttempterTest::P2PNotEnabledStart() { fake_system_state_.set_p2p_manager(&mock_p2p_manager); mock_p2p_manager.fake().SetP2PEnabled(false); EXPECT_CALL(mock_p2p_manager, PerformHousekeeping()).Times(0); - attempter_.Update("", "", "", "", "", false, false, 0, false, false, false); + attempter_.Update({}); EXPECT_FALSE(actual_using_p2p_for_downloading_); EXPECT_FALSE(actual_using_p2p_for_sharing()); ScheduleQuitMainLoop(); @@ -1023,7 +1003,7 @@ void UpdateAttempterTest::P2PEnabledStartingFailsStart() { mock_p2p_manager.fake().SetEnsureP2PRunningResult(false); mock_p2p_manager.fake().SetPerformHousekeepingResult(false); EXPECT_CALL(mock_p2p_manager, PerformHousekeeping()).Times(0); - attempter_.Update("", "", "", "", "", false, false, 0, false, false, false); + attempter_.Update({}); EXPECT_FALSE(actual_using_p2p_for_downloading()); EXPECT_FALSE(actual_using_p2p_for_sharing()); ScheduleQuitMainLoop(); @@ -1046,7 +1026,7 @@ void UpdateAttempterTest::P2PEnabledHousekeepingFailsStart() { mock_p2p_manager.fake().SetEnsureP2PRunningResult(true); mock_p2p_manager.fake().SetPerformHousekeepingResult(false); EXPECT_CALL(mock_p2p_manager, PerformHousekeeping()); - attempter_.Update("", "", "", "", "", false, false, 0, false, false, false); + attempter_.Update({}); EXPECT_FALSE(actual_using_p2p_for_downloading()); EXPECT_FALSE(actual_using_p2p_for_sharing()); ScheduleQuitMainLoop(); @@ -1068,7 +1048,7 @@ void UpdateAttempterTest::P2PEnabledStart() { mock_p2p_manager.fake().SetEnsureP2PRunningResult(true); mock_p2p_manager.fake().SetPerformHousekeepingResult(true); EXPECT_CALL(mock_p2p_manager, PerformHousekeeping()); - attempter_.Update("", "", "", "", "", false, false, 0, false, false, false); + attempter_.Update({}); EXPECT_TRUE(actual_using_p2p_for_downloading()); EXPECT_TRUE(actual_using_p2p_for_sharing()); ScheduleQuitMainLoop(); @@ -1091,17 +1071,7 @@ void UpdateAttempterTest::P2PEnabledInteractiveStart() { mock_p2p_manager.fake().SetEnsureP2PRunningResult(true); mock_p2p_manager.fake().SetPerformHousekeepingResult(true); EXPECT_CALL(mock_p2p_manager, PerformHousekeeping()); - attempter_.Update("", - "", - "", - "", - "", - false, - false, - /*rollback_allowed_milestones=*/0, - false, - false, - /*interactive=*/true); + attempter_.Update({.interactive = true}); EXPECT_FALSE(actual_using_p2p_for_downloading()); EXPECT_TRUE(actual_using_p2p_for_sharing()); ScheduleQuitMainLoop(); @@ -1131,7 +1101,7 @@ void UpdateAttempterTest::ReadScatterFactorFromPolicyTestStart() { attempter_.policy_provider_.reset( new policy::PolicyProvider(std::move(device_policy))); - attempter_.Update("", "", "", "", "", false, false, 0, false, false, false); + attempter_.Update({}); EXPECT_EQ(scatter_factor_in_seconds, attempter_.scatter_factor_.InSeconds()); ScheduleQuitMainLoop(); @@ -1169,7 +1139,7 @@ void UpdateAttempterTest::DecrementUpdateCheckCountTestStart() { attempter_.policy_provider_.reset( new policy::PolicyProvider(std::move(device_policy))); - attempter_.Update("", "", "", "", "", false, false, 0, false, false, false); + attempter_.Update({}); EXPECT_EQ(scatter_factor_in_seconds, attempter_.scatter_factor_.InSeconds()); // Make sure the file still exists. @@ -1185,7 +1155,7 @@ void UpdateAttempterTest::DecrementUpdateCheckCountTestStart() { // However, if the count is already 0, it's not decremented. Test that. initial_value = 0; EXPECT_TRUE(fake_prefs.SetInt64(kPrefsUpdateCheckCount, initial_value)); - attempter_.Update("", "", "", "", "", false, false, 0, false, false, false); + attempter_.Update({}); EXPECT_TRUE(fake_prefs.Exists(kPrefsUpdateCheckCount)); EXPECT_TRUE(fake_prefs.GetInt64(kPrefsUpdateCheckCount, &new_value)); EXPECT_EQ(initial_value, new_value); @@ -1232,17 +1202,7 @@ void UpdateAttempterTest::NoScatteringDoneDuringManualUpdateTestStart() { new policy::PolicyProvider(std::move(device_policy))); // Trigger an interactive check so we can test that scattering is disabled. - attempter_.Update("", - "", - "", - "", - "", - false, - false, - /*rollback_allowed_milestones=*/0, - false, - false, - /*interactive=*/true); + attempter_.Update({.interactive = true}); EXPECT_EQ(scatter_factor_in_seconds, attempter_.scatter_factor_.InSeconds()); // Make sure scattering is disabled for manual (i.e. user initiated) update @@ -1294,7 +1254,7 @@ void UpdateAttempterTest::StagingSetsPrefsAndTurnsOffScatteringStart() { FakePrefs fake_prefs; SetUpStagingTest(kValidStagingSchedule, &fake_prefs); - attempter_.Update("", "", "", "", "", false, false, 0, false, false, false); + attempter_.Update({}); // Check that prefs have the correct values. int64_t update_count; EXPECT_TRUE(fake_prefs.GetInt64(kPrefsUpdateCheckCount, &update_count)); @@ -1351,17 +1311,7 @@ void UpdateAttempterTest::StagingOffIfInteractiveStart() { FakePrefs fake_prefs; SetUpStagingTest(kValidStagingSchedule, &fake_prefs); - attempter_.Update("", - "", - "", - "", - "", - false, - false, - 0, - false, - false, - /* interactive = */ true); + attempter_.Update({.interactive = true}); CheckStagingOff(); ScheduleQuitMainLoop(); @@ -1381,17 +1331,7 @@ void UpdateAttempterTest::StagingOffIfOobeStart() { FakePrefs fake_prefs; SetUpStagingTest(kValidStagingSchedule, &fake_prefs); - attempter_.Update("", - "", - "", - "", - "", - false, - false, - 0, - false, - false, - /* interactive = */ true); + attempter_.Update({.interactive = true}); CheckStagingOff(); ScheduleQuitMainLoop(); @@ -1719,64 +1659,38 @@ TEST_F(UpdateAttempterTest, UpdateAfterInstall) { } TEST_F(UpdateAttempterTest, TargetVersionPrefixSetAndReset) { - attempter_.CalculateUpdateParams( - /*app_version=*/"", - /*omaha_url=*/"", - /*target_channel=*/"", - /*lts_tag=*/"", - /*target_version_prefix=*/"1234", - /*rollback_allowed=*/false, - /*rollback_data_save_requested=*/false, - /*rollback_allowed_milestones=*/4, - /*rollback_on_channel_downgrade=*/false, - /*obey_proxies=*/false, - /*interactive=*/false); + UpdateCheckParams params; + attempter_.CalculateUpdateParams({.target_version_prefix = "1234"}); EXPECT_EQ("1234", fake_system_state_.request_params()->target_version_prefix()); - attempter_.CalculateUpdateParams( - "", "", "", "", "", false, false, 4, false, false, false); + attempter_.CalculateUpdateParams({}); EXPECT_TRUE( fake_system_state_.request_params()->target_version_prefix().empty()); } TEST_F(UpdateAttempterTest, TargetChannelHintSetAndReset) { - attempter_.CalculateUpdateParams( - "", "", "", "hint", "", false, false, 4, false, false, false); + attempter_.CalculateUpdateParams({.lts_tag = "hint"}); EXPECT_EQ("hint", fake_system_state_.request_params()->lts_tag()); - attempter_.CalculateUpdateParams( - "", "", "", "", "", false, false, 4, false, false, false); + attempter_.CalculateUpdateParams({}); EXPECT_TRUE(fake_system_state_.request_params()->lts_tag().empty()); } TEST_F(UpdateAttempterTest, RollbackAllowedSetAndReset) { - attempter_.CalculateUpdateParams("", - "", - "", - "", - "1234", - /*rollback_allowed=*/true, - /*rollback_data_save_requested=*/false, - /*rollback_allowed_milestones=*/4, - /*rollback_on_channel_downgrade=*/false, - false, - false); + attempter_.CalculateUpdateParams({ + .target_version_prefix = "1234", + .rollback_allowed = true, + .rollback_allowed_milestones = 4, + }); EXPECT_TRUE(fake_system_state_.request_params()->rollback_allowed()); EXPECT_EQ(4, fake_system_state_.request_params()->rollback_allowed_milestones()); - attempter_.CalculateUpdateParams("", - "", - "", - "", - "1234", - /*rollback_allowed=*/false, - /*rollback_data_save_requested=*/false, - /*rollback_allowed_milestones=*/4, - /*rollback_on_channel_downgrade=*/false, - false, - false); + attempter_.CalculateUpdateParams({ + .target_version_prefix = "1234", + .rollback_allowed_milestones = 4, + }); EXPECT_FALSE(fake_system_state_.request_params()->rollback_allowed()); EXPECT_EQ(4, fake_system_state_.request_params()->rollback_allowed_milestones()); @@ -1786,17 +1700,9 @@ TEST_F(UpdateAttempterTest, ChannelDowngradeNoRollback) { base::ScopedTempDir tempdir; ASSERT_TRUE(tempdir.CreateUniqueTempDir()); fake_system_state_.request_params()->set_root(tempdir.GetPath().value()); - attempter_.CalculateUpdateParams(/*app_version=*/"", - /*omaha_url=*/"", - /*target_channel=*/"stable-channel", - /*lts_tag=*/"", - /*target_version_prefix=*/"", - /*rollback_allowed=*/false, - /*rollback_data_save_requested=*/false, - /*rollback_allowed_milestones=*/4, - /*rollback_on_channel_downgrade=*/false, - /*obey_proxies=*/false, - /*interactive=*/false); + attempter_.CalculateUpdateParams({ + .target_channel = "stable-channel", + }); EXPECT_FALSE(fake_system_state_.request_params()->is_powerwash_allowed()); } @@ -1804,17 +1710,10 @@ TEST_F(UpdateAttempterTest, ChannelDowngradeRollback) { base::ScopedTempDir tempdir; ASSERT_TRUE(tempdir.CreateUniqueTempDir()); fake_system_state_.request_params()->set_root(tempdir.GetPath().value()); - attempter_.CalculateUpdateParams(/*app_version=*/"", - /*omaha_url=*/"", - /*target_channel=*/"stable-channel", - /*lts_tag=*/"", - /*target_version_prefix=*/"", - /*rollback_allowed=*/false, - /*rollback_data_save_requested=*/false, - /*rollback_allowed_milestones=*/4, - /*rollback_on_channel_downgrade=*/true, - /*obey_proxies=*/false, - /*interactive=*/false); + attempter_.CalculateUpdateParams({ + .rollback_on_channel_downgrade = true, + .target_channel = "stable-channel", + }); EXPECT_TRUE(fake_system_state_.request_params()->is_powerwash_allowed()); } @@ -1932,7 +1831,7 @@ void UpdateAttempterTest::ResetRollbackHappenedStart(bool is_consumer, SetRollbackHappened(false)) .Times(expected_reset ? 1 : 0); attempter_.policy_provider_ = std::move(mock_policy_provider); - attempter_.Update("", "", "", "", "", false, false, 0, false, false, false); + attempter_.Update({}); ScheduleQuitMainLoop(); } @@ -2273,7 +2172,7 @@ void UpdateAttempterTest::UpdateToQuickFixBuildStart(bool set_token) { .WillOnce(Return(false)); attempter_.policy_provider_.reset( new policy::PolicyProvider(std::move(device_policy))); - attempter_.Update("", "", "", "", "", false, false, 0, false, false, false); + attempter_.Update({}); EXPECT_EQ(token, attempter_.omaha_request_params_->autoupdate_token()); ScheduleQuitMainLoop(); diff --git a/update_manager/policy.h b/update_manager/policy.h index 4b3bfc72..ad6994c6 100644 --- a/update_manager/policy.h +++ b/update_manager/policy.h @@ -43,21 +43,22 @@ std::string ToString(EvalStatus status); // Parameters of an update check. These parameters are determined by the // UpdateCheckAllowed policy. struct UpdateCheckParams { - bool updates_enabled; // Whether the auto-updates are enabled on this build. + // Whether the auto-updates are enabled on this build. + bool updates_enabled{true}; // Attributes pertaining to the case where update checks are allowed. // // A target version prefix, if imposed by policy; otherwise, an empty string. std::string target_version_prefix; // Specifies whether rollback images are allowed by device policy. - bool rollback_allowed; + bool rollback_allowed{false}; // Specifies if rollbacks should attempt to preserve some system state. - bool rollback_data_save_requested; + bool rollback_data_save_requested{false}; // Specifies the number of Chrome milestones rollback should be allowed, // starting from the stable version at any time. Value is -1 if unspecified // (e.g. no device policy is available yet), in this case no version // roll-forward should happen. - int rollback_allowed_milestones; + int rollback_allowed_milestones{0}; // Whether a rollback with data save should be initiated on channel // downgrade (e.g. beta to stable). bool rollback_on_channel_downgrade{false}; @@ -67,7 +68,7 @@ struct UpdateCheckParams { std::string lts_tag; // Whether the allowed update is interactive (user-initiated) or periodic. - bool interactive; + bool interactive{false}; }; // Input arguments to UpdateCanStart. diff --git a/update_manager/real_updater_provider_unittest.cc b/update_manager/real_updater_provider_unittest.cc index f0804c44..06808b81 100644 --- a/update_manager/real_updater_provider_unittest.cc +++ b/update_manager/real_updater_provider_unittest.cc @@ -327,7 +327,7 @@ TEST_F(UmRealUpdaterProviderTest, GetPayloadSizeFailNoValue) { TEST_F(UmRealUpdaterProviderTest, GetCurrChannelOkay) { const string kChannelName("foo-channel"); OmahaRequestParams request_params(&fake_sys_state_); - request_params.Init("", "", false); + request_params.Init("", "", {}); request_params.set_current_channel(kChannelName); fake_sys_state_.set_request_params(&request_params); UmTestUtils::ExpectVariableHasValue(kChannelName, @@ -336,7 +336,7 @@ TEST_F(UmRealUpdaterProviderTest, GetCurrChannelOkay) { TEST_F(UmRealUpdaterProviderTest, GetCurrChannelFailEmpty) { OmahaRequestParams request_params(&fake_sys_state_); - request_params.Init("", "", false); + request_params.Init("", "", {}); request_params.set_current_channel(""); fake_sys_state_.set_request_params(&request_params); UmTestUtils::ExpectVariableNotSet(provider_->var_curr_channel()); @@ -345,7 +345,7 @@ TEST_F(UmRealUpdaterProviderTest, GetCurrChannelFailEmpty) { TEST_F(UmRealUpdaterProviderTest, GetNewChannelOkay) { const string kChannelName("foo-channel"); OmahaRequestParams request_params(&fake_sys_state_); - request_params.Init("", "", false); + request_params.Init("", "", {}); request_params.set_target_channel(kChannelName); fake_sys_state_.set_request_params(&request_params); UmTestUtils::ExpectVariableHasValue(kChannelName, @@ -354,7 +354,7 @@ TEST_F(UmRealUpdaterProviderTest, GetNewChannelOkay) { TEST_F(UmRealUpdaterProviderTest, GetNewChannelFailEmpty) { OmahaRequestParams request_params(&fake_sys_state_); - request_params.Init("", "", false); + request_params.Init("", "", {}); request_params.set_target_channel(""); fake_sys_state_.set_request_params(&request_params); UmTestUtils::ExpectVariableNotSet(provider_->var_new_channel()); From 5d56c1eef7fd730a164c44a742ef2cb0afcfca8d Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Tue, 27 Oct 2020 15:44:33 -0700 Subject: [PATCH 430/624] update_engine: Remove ec_version and fw_version It seems like these values were set for devices like parrot that have been AUE'ed long time ago. The server side doesn't use this values and it doesn't seem we need them anywhere. So its better to not send them anymore since currently we're just sending empty values for them. BUG=b:171829801 TEST=FEATURES=test emerge-reef update_engine Change-Id: I2d2cd5f9d079ce5c87fc8167e30f91d1ffe1c87a Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2504454 Tested-by: Amin Hassani Reviewed-by: Jae Hoon Kim Reviewed-by: Tianjie Xu Commit-Queue: Amin Hassani --- common/fake_hardware.h | 12 ------------ common/hardware_interface.h | 8 -------- common/mock_hardware.h | 7 ------- common/utils.cc | 21 --------------------- common/utils.h | 4 ---- common/utils_unittest.cc | 13 ------------- hardware_android.cc | 10 ---------- hardware_android.h | 2 -- hardware_chromeos.cc | 19 ------------------- hardware_chromeos.h | 2 -- omaha_request_action_unittest.cc | 4 ---- omaha_request_builder_xml.cc | 2 -- omaha_request_builder_xml_unittest.cc | 4 ---- omaha_request_params.cc | 12 ------------ omaha_request_params.h | 15 --------------- omaha_request_params_unittest.cc | 8 -------- 16 files changed, 143 deletions(-) diff --git a/common/fake_hardware.h b/common/fake_hardware.h index 82382ffd..00a212e3 100644 --- a/common/fake_hardware.h +++ b/common/fake_hardware.h @@ -76,10 +76,6 @@ class FakeHardware : public HardwareInterface { std::string GetHardwareClass() const override { return hardware_class_; } - std::string GetFirmwareVersion() const override { return firmware_version_; } - - std::string GetECVersion() const override { return ec_version_; } - std::string GetDeviceRequisition() const override { return device_requisition_; } @@ -176,12 +172,6 @@ class FakeHardware : public HardwareInterface { hardware_class_ = hardware_class; } - void SetFirmwareVersion(const std::string& firmware_version) { - firmware_version_ = firmware_version; - } - - void SetECVersion(const std::string& ec_version) { ec_version_ = ec_version; } - void SetDeviceRequisition(const std::string& requisition) { device_requisition_ = requisition; } @@ -233,8 +223,6 @@ class FakeHardware : public HardwareInterface { // Jan 20, 2007 base::Time oobe_timestamp_{base::Time::FromTimeT(1169280000)}; std::string hardware_class_{"Fake HWID BLAH-1234"}; - std::string firmware_version_{"Fake Firmware v1.0.1"}; - std::string ec_version_{"Fake EC v1.0a"}; std::string device_requisition_{"fake_requisition"}; int min_kernel_key_version_{kMinKernelKeyVersion}; int min_firmware_key_version_{kMinFirmwareKeyVersion}; diff --git a/common/hardware_interface.h b/common/hardware_interface.h index b37b0074..cad32fc5 100644 --- a/common/hardware_interface.h +++ b/common/hardware_interface.h @@ -64,14 +64,6 @@ class HardwareInterface { // Returns the HWID or an empty string on error. virtual std::string GetHardwareClass() const = 0; - // Returns the firmware version or an empty string if the system is - // not running chrome os firmware. - virtual std::string GetFirmwareVersion() const = 0; - - // Returns the ec version or an empty string if the system is not - // running a custom chrome os ec. - virtual std::string GetECVersion() const = 0; - // Returns the OEM device requisition or an empty string if the system does // not have a requisition, or if not running Chrome OS. virtual std::string GetDeviceRequisition() const = 0; diff --git a/common/mock_hardware.h b/common/mock_hardware.h index 84c0c5ba..071906b5 100644 --- a/common/mock_hardware.h +++ b/common/mock_hardware.h @@ -45,11 +45,6 @@ class MockHardware : public HardwareInterface { ON_CALL(*this, GetHardwareClass()) .WillByDefault( testing::Invoke(&fake_, &FakeHardware::GetHardwareClass)); - ON_CALL(*this, GetFirmwareVersion()) - .WillByDefault( - testing::Invoke(&fake_, &FakeHardware::GetFirmwareVersion)); - ON_CALL(*this, GetECVersion()) - .WillByDefault(testing::Invoke(&fake_, &FakeHardware::GetECVersion)); ON_CALL(*this, GetMinKernelKeyVersion()) .WillByDefault( testing::Invoke(&fake_, &FakeHardware::GetMinKernelKeyVersion)); @@ -90,8 +85,6 @@ class MockHardware : public HardwareInterface { MOCK_CONST_METHOD0(IsOOBEEnabled, bool()); MOCK_CONST_METHOD1(IsOOBEComplete, bool(base::Time* out_time_of_oobe)); MOCK_CONST_METHOD0(GetHardwareClass, std::string()); - MOCK_CONST_METHOD0(GetFirmwareVersion, std::string()); - MOCK_CONST_METHOD0(GetECVersion, std::string()); MOCK_CONST_METHOD0(GetMinKernelKeyVersion, int()); MOCK_CONST_METHOD0(GetMinFirmwareKeyVersion, int()); MOCK_CONST_METHOD0(GetMaxFirmwareKeyRollforward, int()); diff --git a/common/utils.cc b/common/utils.cc index 9e1e6c58..c8924b1e 100644 --- a/common/utils.cc +++ b/common/utils.cc @@ -112,27 +112,6 @@ bool GetTempName(const string& path, base::FilePath* template_path) { namespace utils { -string ParseECVersion(string input_line) { - base::TrimWhitespaceASCII(input_line, base::TRIM_ALL, &input_line); - - // At this point we want to convert the format key=value pair from mosys to - // a vector of key value pairs. - vector> kv_pairs; - if (base::SplitStringIntoKeyValuePairs(input_line, '=', ' ', &kv_pairs)) { - for (const pair& kv_pair : kv_pairs) { - // Finally match against the fw_verion which may have quotes. - if (kv_pair.first == "fw_version") { - string output; - // Trim any quotes. - base::TrimString(kv_pair.second, "\"", &output); - return output; - } - } - } - LOG(ERROR) << "Unable to parse fwid from ec info."; - return ""; -} - bool WriteFile(const char* path, const void* data, size_t data_len) { int fd = HANDLE_EINTR(open(path, O_WRONLY | O_CREAT | O_TRUNC, 0600)); TEST_AND_RETURN_FALSE_ERRNO(fd >= 0); diff --git a/common/utils.h b/common/utils.h index bcaed318..f364bfdb 100644 --- a/common/utils.h +++ b/common/utils.h @@ -53,10 +53,6 @@ std::string StringVectorToString(const std::vector& vec_str); std::string CalculateP2PFileId(const brillo::Blob& payload_hash, size_t payload_size); -// Parse the firmware version from one line of output from the -// "mosys" command. -std::string ParseECVersion(std::string input_line); - // Writes the data passed to path. The file at path will be overwritten if it // exists. Returns true on success, false otherwise. bool WriteFile(const char* path, const void* data, size_t data_len); diff --git a/common/utils_unittest.cc b/common/utils_unittest.cc index d73b3da7..bc5c3b05 100644 --- a/common/utils_unittest.cc +++ b/common/utils_unittest.cc @@ -41,19 +41,6 @@ namespace chromeos_update_engine { class UtilsTest : public ::testing::Test {}; -TEST(UtilsTest, CanParseECVersion) { - // Should be able to parse and valid key value line. - EXPECT_EQ("12345", utils::ParseECVersion("fw_version=12345")); - EXPECT_EQ("123456", - utils::ParseECVersion("b=1231a fw_version=123456 a=fasd2")); - EXPECT_EQ("12345", utils::ParseECVersion("fw_version=12345")); - EXPECT_EQ("00VFA616", - utils::ParseECVersion("vendor=\"sam\" fw_version=\"00VFA616\"")); - - // For invalid entries, should return the empty string. - EXPECT_EQ("", utils::ParseECVersion("b=1231a fw_version a=fasd2")); -} - TEST(UtilsTest, WriteFileOpenFailure) { EXPECT_FALSE(utils::WriteFile("/this/doesn't/exist", "hello", 5)); } diff --git a/hardware_android.cc b/hardware_android.cc index 48945224..8d1fdfdd 100644 --- a/hardware_android.cc +++ b/hardware_android.cc @@ -49,8 +49,6 @@ namespace { // Android properties that identify the hardware and potentially non-updatable // parts of the bootloader (such as the bootloader version and the baseband // version). -const char kPropBootBootloader[] = "ro.boot.bootloader"; -const char kPropBootBaseband[] = "ro.boot.baseband"; const char kPropProductManufacturer[] = "ro.product.manufacturer"; const char kPropBootHardwareSKU[] = "ro.boot.hardware.sku"; const char kPropBootRevision[] = "ro.boot.revision"; @@ -128,14 +126,6 @@ string HardwareAndroid::GetHardwareClass() const { return manufacturer + ":" + sku + ":" + revision; } -string HardwareAndroid::GetFirmwareVersion() const { - return GetProperty(kPropBootBootloader, ""); -} - -string HardwareAndroid::GetECVersion() const { - return GetProperty(kPropBootBaseband, ""); -} - string HardwareAndroid::GetDeviceRequisition() const { LOG(WARNING) << "STUB: Getting requisition is not supported."; return ""; diff --git a/hardware_android.h b/hardware_android.h index b6704477..d7e39f3b 100644 --- a/hardware_android.h +++ b/hardware_android.h @@ -43,8 +43,6 @@ class HardwareAndroid : public HardwareInterface { bool IsOOBEEnabled() const override; bool IsOOBEComplete(base::Time* out_time_of_oobe) const override; std::string GetHardwareClass() const override; - std::string GetFirmwareVersion() const override; - std::string GetECVersion() const override; std::string GetDeviceRequisition() const override; int GetMinKernelKeyVersion() const override; int GetMinFirmwareKeyVersion() const override; diff --git a/hardware_chromeos.cc b/hardware_chromeos.cc index cce5e842..dbb99dba 100644 --- a/hardware_chromeos.cc +++ b/hardware_chromeos.cc @@ -175,25 +175,6 @@ string HardwareChromeOS::GetHardwareClass() const { return ReadValueFromCrosSystem("hwid"); } -string HardwareChromeOS::GetFirmwareVersion() const { - return ReadValueFromCrosSystem("fwid"); -} - -string HardwareChromeOS::GetECVersion() const { - string input_line, error; - int exit_code = 0; - vector cmd = {"/usr/sbin/mosys", "-k", "ec", "info"}; - - if (!Subprocess::SynchronousExec(cmd, &exit_code, &input_line, &error) || - exit_code != 0) { - LOG(ERROR) << "Unable to read EC info from mosys with exit code: " - << exit_code << " and error: " << error; - return ""; - } - - return utils::ParseECVersion(input_line); -} - string HardwareChromeOS::GetDeviceRequisition() const { #if USE_CFM const char* kLocalStatePath = "/home/chronos/Local State"; diff --git a/hardware_chromeos.h b/hardware_chromeos.h index bbfe2739..9ee62f68 100644 --- a/hardware_chromeos.h +++ b/hardware_chromeos.h @@ -46,8 +46,6 @@ class HardwareChromeOS final : public HardwareInterface { bool IsOOBEEnabled() const override; bool IsOOBEComplete(base::Time* out_time_of_oobe) const override; std::string GetHardwareClass() const override; - std::string GetFirmwareVersion() const override; - std::string GetECVersion() const override; std::string GetDeviceRequisition() const override; int GetMinKernelKeyVersion() const override; int GetMinFirmwareKeyVersion() const override; diff --git a/omaha_request_action_unittest.cc b/omaha_request_action_unittest.cc index adb95dff..61e988bb 100644 --- a/omaha_request_action_unittest.cc +++ b/omaha_request_action_unittest.cc @@ -365,8 +365,6 @@ class OmahaRequestActionTest : public ::testing::Test { request_params_.set_current_channel("unittest"); request_params_.set_target_channel("unittest"); request_params_.set_hwid("OEM MODEL 09235 7471"); - request_params_.set_fw_version("ChromeOSFirmware.1.0"); - request_params_.set_ec_version("0X0A1"); request_params_.set_delta_okay(true); request_params_.set_interactive(false); request_params_.set_update_url("http://url"); @@ -1604,8 +1602,6 @@ TEST_F(OmahaRequestActionTest, FormatUpdateCheckOutputTest) { string::npos); EXPECT_NE(post_str.find("hardware_class=\"OEM MODEL 09235 7471\""), string::npos); - EXPECT_NE(post_str.find("fw_version=\"ChromeOSFirmware.1.0\""), string::npos); - EXPECT_NE(post_str.find("ec_version=\"0X0A1\""), string::npos); // No tag should be sent if we didn't reboot to an update. EXPECT_EQ(post_str.find("app_lang(), "en-US") + "\" " + - "fw_version=\"" + XmlEncodeWithDefault(params_->fw_version()) + "\" " + - "ec_version=\"" + XmlEncodeWithDefault(params_->ec_version()) + "\" " + requisition_arg) + ">\n" + diff --git a/omaha_request_builder_xml_unittest.cc b/omaha_request_builder_xml_unittest.cc index a8044205..042d9919 100644 --- a/omaha_request_builder_xml_unittest.cc +++ b/omaha_request_builder_xml_unittest.cc @@ -108,8 +108,6 @@ TEST_F(OmahaRequestBuilderXmlTest, PlatformGetAppTest) { // in fact present in the . const string app = omaha_request.GetApp(dlc_app_data); EXPECT_NE(string::npos, app.find("lang=")); - EXPECT_NE(string::npos, app.find("fw_version=")); - EXPECT_NE(string::npos, app.find("ec_version=")); EXPECT_NE(string::npos, app.find("requisition=")); } @@ -132,8 +130,6 @@ TEST_F(OmahaRequestBuilderXmlTest, DlcGetAppTest) { // fact not present in the . const string app = omaha_request.GetApp(dlc_app_data); EXPECT_EQ(string::npos, app.find("lang=")); - EXPECT_EQ(string::npos, app.find("fw_version=")); - EXPECT_EQ(string::npos, app.find("ec_version=")); EXPECT_EQ(string::npos, app.find("requisition=")); } diff --git a/omaha_request_params.cc b/omaha_request_params.cc index 79d19e8f..5a487207 100644 --- a/omaha_request_params.cc +++ b/omaha_request_params.cc @@ -92,10 +92,6 @@ bool OmahaRequestParams::Init(const string& app_version, os_sp_ = image_props_.version + "_" + GetMachineType(); app_lang_ = "en-US"; hwid_ = system_state_->hardware()->GetHardwareClass(); - if (CollectECFWVersions()) { - fw_version_ = system_state_->hardware()->GetFirmwareVersion(); - ec_version_ = system_state_->hardware()->GetECVersion(); - } device_requisition_ = system_state_->hardware()->GetDeviceRequisition(); if (image_props_.current_channel == mutable_image_props_.target_channel) { @@ -170,14 +166,6 @@ bool OmahaRequestParams::IsUpdateUrlOfficial() const { update_url_ == image_props_.omaha_url); } -bool OmahaRequestParams::CollectECFWVersions() const { - return base::StartsWith( - hwid_, string("PARROT"), base::CompareCase::SENSITIVE) || - base::StartsWith( - hwid_, string("SPRING"), base::CompareCase::SENSITIVE) || - base::StartsWith(hwid_, string("SNOW"), base::CompareCase::SENSITIVE); -} - bool OmahaRequestParams::SetTargetChannel(const string& new_target_channel, bool is_powerwash_allowed, string* error_message) { diff --git a/omaha_request_params.h b/omaha_request_params.h index 7e192620..ed3cc80e 100644 --- a/omaha_request_params.h +++ b/omaha_request_params.h @@ -104,8 +104,6 @@ class OmahaRequestParams { } inline std::string app_lang() const { return app_lang_; } inline std::string hwid() const { return hwid_; } - inline std::string fw_version() const { return fw_version_; } - inline std::string ec_version() const { return ec_version_; } inline std::string device_requisition() const { return device_requisition_; } inline void set_app_version(const std::string& version) { @@ -294,12 +292,6 @@ class OmahaRequestParams { } void set_app_lang(const std::string& app_lang) { app_lang_ = app_lang; } void set_hwid(const std::string& hwid) { hwid_ = hwid; } - void set_fw_version(const std::string& fw_version) { - fw_version_ = fw_version; - } - void set_ec_version(const std::string& ec_version) { - ec_version_ = ec_version; - } void set_is_powerwash_allowed(bool powerwash_allowed) { mutable_image_props_.is_powerwash_allowed = powerwash_allowed; } @@ -313,7 +305,6 @@ class OmahaRequestParams { private: FRIEND_TEST(OmahaRequestParamsTest, ChannelIndexTest); - FRIEND_TEST(OmahaRequestParamsTest, CollectECFWVersionsTest); FRIEND_TEST(OmahaRequestParamsTest, IsValidChannelTest); FRIEND_TEST(OmahaRequestParamsTest, SetIsPowerwashAllowedTest); FRIEND_TEST(OmahaRequestParamsTest, SetTargetChannelInvalidTest); @@ -336,10 +327,6 @@ class OmahaRequestParams { // i.e. index(target_channel) > index(current_channel). bool ToMoreStableChannel() const; - // Returns True if we should store the fw/ec versions based on our hwid_. - // Compares hwid to a set of prefixes in the allowlist. - bool CollectECFWVersions() const; - // Gets the machine type (e.g. "i686"). std::string GetMachineType() const; @@ -380,8 +367,6 @@ class OmahaRequestParams { std::string lts_tag_; std::string hwid_; // Hardware Qualification ID of the client - std::string fw_version_; // Chrome OS Firmware Version. - std::string ec_version_; // Chrome OS EC Version. // TODO(b:133324571) tracks removal of this field once it is no longer // needed in AU requests. Remove by October 1st 2019. std::string device_requisition_; // Chrome OS Requisition type. diff --git a/omaha_request_params_unittest.cc b/omaha_request_params_unittest.cc index 140cad3c..fcf80625 100644 --- a/omaha_request_params_unittest.cc +++ b/omaha_request_params_unittest.cc @@ -257,14 +257,6 @@ TEST_F(OmahaRequestParamsTest, ShouldPowerwashTest) { EXPECT_TRUE(params_.ShouldPowerwash()); } -TEST_F(OmahaRequestParamsTest, CollectECFWVersionsTest) { - params_.hwid_ = string("STUMPY ALEX 12345"); - EXPECT_FALSE(params_.CollectECFWVersions()); - - params_.hwid_ = string("SNOW 12345"); - EXPECT_TRUE(params_.CollectECFWVersions()); -} - TEST_F(OmahaRequestParamsTest, RequisitionIsSetTest) { EXPECT_TRUE(params_.Init("", "", {})); EXPECT_EQ("fake_requisition", params_.device_requisition()); From ed03b44c7f9136d6491d7c238f1ab7b66c12456c Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Mon, 26 Oct 2020 17:21:29 -0700 Subject: [PATCH 431/624] update_engine: Fix leaking unit tests Some of the unit tests have been leaking temp files because they don't properly unlink them. In this CL, we did some rearrangement of the ScopedTempFile class and moved it into the utils.h (instead of testing only location) so it can be used everywhere and more efficiently. Also added functionality to open an file descriptor too so users don't have to keep a different object for the file descriptor. BUG=b:162766400 TEST=cros_workon_make --board reef --test; Then looked at the /build/reef/tmp directory and no files were leaked. Change-Id: Id64a2923d30f27628120497fdefe16bf65fa3fb0 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2500772 Tested-by: Amin Hassani Reviewed-by: Jae Hoon Kim Commit-Queue: Amin Hassani --- common/hash_calculator_unittest.cc | 4 +- common/http_fetcher_unittest.cc | 2 +- common/test_utils.h | 16 -- common/utils.h | 36 ++++ common/utils_unittest.cc | 12 +- dynamic_partition_control_android_unittest.cc | 1 - omaha_response_handler_action_unittest.cc | 2 +- .../bzip_extent_writer_unittest.cc | 2 +- .../cached_file_descriptor_unittest.cc | 2 +- .../delta_performer_integration_test.cc | 176 ++++++++---------- payload_consumer/delta_performer_unittest.cc | 18 +- payload_consumer/download_action_unittest.cc | 2 - payload_consumer/extent_reader_unittest.cc | 2 +- payload_consumer/extent_writer_unittest.cc | 2 +- .../file_descriptor_utils_unittest.cc | 8 +- payload_consumer/file_writer_unittest.cc | 5 +- .../filesystem_verifier_action_unittest.cc | 6 +- .../verity_writer_android_unittest.cc | 2 +- payload_generator/ab_generator_unittest.cc | 14 +- .../blob_file_writer_unittest.cc | 23 +-- payload_generator/block_mapping_unittest.cc | 4 +- .../boot_img_filesystem_unittest.cc | 2 +- payload_generator/delta_diff_generator.cc | 15 +- payload_generator/delta_diff_utils.cc | 8 +- .../delta_diff_utils_unittest.cc | 32 ++-- payload_generator/ext2_filesystem_unittest.cc | 2 +- .../full_update_generator_unittest.cc | 21 +-- .../mapfile_filesystem_unittest.cc | 4 +- payload_generator/payload_file.cc | 10 +- payload_generator/payload_file_unittest.cc | 4 +- ...load_generation_config_android_unittest.cc | 3 +- .../payload_properties_unittest.cc | 24 +-- payload_generator/payload_signer_unittest.cc | 4 +- payload_generator/squashfs_filesystem.cc | 11 +- 34 files changed, 215 insertions(+), 264 deletions(-) diff --git a/common/hash_calculator_unittest.cc b/common/hash_calculator_unittest.cc index e8f73d5c..fe7d543d 100644 --- a/common/hash_calculator_unittest.cc +++ b/common/hash_calculator_unittest.cc @@ -104,7 +104,7 @@ TEST_F(HashCalculatorTest, BigTest) { } TEST_F(HashCalculatorTest, UpdateFileSimpleTest) { - test_utils::ScopedTempFile data_file("data.XXXXXX"); + ScopedTempFile data_file("data.XXXXXX"); ASSERT_TRUE(test_utils::WriteFileString(data_file.path(), "hi")); for (const int length : {-1, 2, 10}) { @@ -126,7 +126,7 @@ TEST_F(HashCalculatorTest, UpdateFileSimpleTest) { } TEST_F(HashCalculatorTest, RawHashOfFileSimpleTest) { - test_utils::ScopedTempFile data_file("data.XXXXXX"); + ScopedTempFile data_file("data.XXXXXX"); ASSERT_TRUE(test_utils::WriteFileString(data_file.path(), "hi")); for (const int length : {-1, 2, 10}) { diff --git a/common/http_fetcher_unittest.cc b/common/http_fetcher_unittest.cc index 1ead6813..99ea99bd 100644 --- a/common/http_fetcher_unittest.cc +++ b/common/http_fetcher_unittest.cc @@ -369,7 +369,7 @@ class FileFetcherTest : public AnyHttpFetcherTest { HttpServer* CreateServer() override { return new NullHttpServer; } private: - test_utils::ScopedTempFile temp_file_{"ue_file_fetcher.XXXXXX"}; + ScopedTempFile temp_file_{"ue_file_fetcher.XXXXXX"}; }; class MultiRangeHttpFetcherOverFileFetcherTest : public FileFetcherTest { diff --git a/common/test_utils.h b/common/test_utils.h index 63ea7492..bb5a6789 100644 --- a/common/test_utils.h +++ b/common/test_utils.h @@ -138,22 +138,6 @@ class ScopedLoopbackDeviceBinder { DISALLOW_COPY_AND_ASSIGN(ScopedLoopbackDeviceBinder); }; -class ScopedTempFile { - public: - ScopedTempFile() : ScopedTempFile("update_engine_test_temp_file.XXXXXX") {} - - explicit ScopedTempFile(const std::string& pattern) { - EXPECT_TRUE(utils::MakeTempFile(pattern, &path_, nullptr)); - unlinker_.reset(new ScopedPathUnlinker(path_)); - } - - const std::string& path() const { return path_; } - - private: - std::string path_; - std::unique_ptr unlinker_; -}; - class ScopedLoopMounter { public: explicit ScopedLoopMounter(const std::string& file_path, diff --git a/common/utils.h b/common/utils.h index f364bfdb..05a92be8 100644 --- a/common/utils.h +++ b/common/utils.h @@ -370,6 +370,42 @@ class ScopedPathUnlinker { DISALLOW_COPY_AND_ASSIGN(ScopedPathUnlinker); }; +class ScopedTempFile { + public: + ScopedTempFile() : ScopedTempFile("update_engine_temp.XXXXXX") {} + + // If |open_fd| is true, a writable file descriptor will be opened for this + // file. + explicit ScopedTempFile(const std::string& pattern, bool open_fd = false) { + CHECK(utils::MakeTempFile(pattern, &path_, open_fd ? &fd_ : nullptr)); + unlinker_.reset(new ScopedPathUnlinker(path_)); + if (open_fd) { + CHECK_GE(fd_, 0); + fd_closer_.reset(new ScopedFdCloser(&fd_)); + } + } + virtual ~ScopedTempFile() = default; + + const std::string& path() const { return path_; } + int fd() const { + CHECK(fd_closer_); + return fd_; + } + void CloseFd() { + CHECK(fd_closer_); + fd_closer_.reset(); + } + + private: + std::string path_; + std::unique_ptr unlinker_; + + int fd_{-1}; + std::unique_ptr fd_closer_; + + DISALLOW_COPY_AND_ASSIGN(ScopedTempFile); +}; + // A little object to call ActionComplete on the ActionProcessor when // it's destructed. class ScopedActionCompleter { diff --git a/common/utils_unittest.cc b/common/utils_unittest.cc index bc5c3b05..20c6b849 100644 --- a/common/utils_unittest.cc +++ b/common/utils_unittest.cc @@ -46,7 +46,7 @@ TEST(UtilsTest, WriteFileOpenFailure) { } TEST(UtilsTest, WriteFileReadFile) { - test_utils::ScopedTempFile file; + ScopedTempFile file; EXPECT_TRUE(utils::WriteFile(file.path().c_str(), "hello", 5)); brillo::Blob readback; @@ -60,7 +60,7 @@ TEST(UtilsTest, ReadFileFailure) { } TEST(UtilsTest, ReadFileChunk) { - test_utils::ScopedTempFile file; + ScopedTempFile file; brillo::Blob data; const size_t kSize = 1024 * 1024; for (size_t i = 0; i < kSize; i++) { @@ -149,7 +149,7 @@ TEST(UtilsTest, FuzzIntTest) { namespace { void GetFileFormatTester(const string& expected, const vector& contents) { - test_utils::ScopedTempFile file; + ScopedTempFile file; ASSERT_TRUE(utils::WriteFile(file.path().c_str(), reinterpret_cast(contents.data()), contents.size())); @@ -378,7 +378,7 @@ TEST(UtilsTest, RunAsRootUnmountFilesystemFailureTest) { } TEST(UtilsTest, RunAsRootUnmountFilesystemBusyFailureTest) { - test_utils::ScopedTempFile tmp_image("img.XXXXXX"); + ScopedTempFile tmp_image("img.XXXXXX"); EXPECT_TRUE(base::CopyFile( test_utils::GetBuildArtifactsPath().Append("gen/disk_ext2_4k.img"), @@ -418,7 +418,7 @@ TEST(UtilsTest, IsMountpointTest) { EXPECT_TRUE(mnt_dir.CreateUniqueTempDir()); EXPECT_FALSE(utils::IsMountpoint(mnt_dir.GetPath().value())); - test_utils::ScopedTempFile file; + ScopedTempFile file; EXPECT_FALSE(utils::IsMountpoint(file.path())); } @@ -460,7 +460,7 @@ TEST(UtilsTest, ParseDottedVersion) { } TEST(UtilsTest, GetFilePathTest) { - test_utils::ScopedTempFile file; + ScopedTempFile file; int fd = HANDLE_EINTR(open(file.path().c_str(), O_RDONLY)); EXPECT_GE(fd, 0); EXPECT_EQ(file.path(), utils::GetFilePath(fd)); diff --git a/dynamic_partition_control_android_unittest.cc b/dynamic_partition_control_android_unittest.cc index 223e177d..c1e0dafc 100644 --- a/dynamic_partition_control_android_unittest.cc +++ b/dynamic_partition_control_android_unittest.cc @@ -34,7 +34,6 @@ using android::dm::DmDeviceState; using android::snapshot::MockSnapshotManager; using chromeos_update_engine::test_utils::ScopedLoopbackDeviceBinder; -using chromeos_update_engine::test_utils::ScopedTempFile; using std::string; using testing::_; using testing::AnyNumber; diff --git a/omaha_response_handler_action_unittest.cc b/omaha_response_handler_action_unittest.cc index 4e421b03..9613e8de 100644 --- a/omaha_response_handler_action_unittest.cc +++ b/omaha_response_handler_action_unittest.cc @@ -176,7 +176,7 @@ bool OmahaResponseHandlerActionTest::DoTest(const OmahaResponse& in, } TEST_F(OmahaResponseHandlerActionTest, SimpleTest) { - test_utils::ScopedTempFile test_deadline_file( + ScopedTempFile test_deadline_file( "omaha_response_handler_action_unittest-XXXXXX"); { OmahaResponse in; diff --git a/payload_consumer/bzip_extent_writer_unittest.cc b/payload_consumer/bzip_extent_writer_unittest.cc index 125e1e5e..b5870401 100644 --- a/payload_consumer/bzip_extent_writer_unittest.cc +++ b/payload_consumer/bzip_extent_writer_unittest.cc @@ -49,7 +49,7 @@ class BzipExtentWriterTest : public ::testing::Test { void TearDown() override { fd_->Close(); } FileDescriptorPtr fd_; - test_utils::ScopedTempFile temp_file_{"BzipExtentWriterTest-file.XXXXXX"}; + ScopedTempFile temp_file_{"BzipExtentWriterTest-file.XXXXXX"}; }; TEST_F(BzipExtentWriterTest, SimpleTest) { diff --git a/payload_consumer/cached_file_descriptor_unittest.cc b/payload_consumer/cached_file_descriptor_unittest.cc index d2965fc0..b64420a8 100644 --- a/payload_consumer/cached_file_descriptor_unittest.cc +++ b/payload_consumer/cached_file_descriptor_unittest.cc @@ -73,7 +73,7 @@ class CachedFileDescriptorTest : public ::testing::Test { protected: FileDescriptorPtr fd_{new EintrSafeFileDescriptor}; - test_utils::ScopedTempFile temp_file_{"CachedFileDescriptor-file.XXXXXX"}; + ScopedTempFile temp_file_{"CachedFileDescriptor-file.XXXXXX"}; int value_{1}; FileDescriptorPtr cfd_; }; diff --git a/payload_consumer/delta_performer_integration_test.cc b/payload_consumer/delta_performer_integration_test.cc index 6857018f..74ddd27d 100644 --- a/payload_consumer/delta_performer_integration_test.cc +++ b/payload_consumer/delta_performer_integration_test.cc @@ -52,7 +52,9 @@ namespace chromeos_update_engine { +using std::list; using std::string; +using std::unique_ptr; using std::vector; using test_utils::GetBuildArtifactsPath; using test_utils::kRandomString; @@ -76,22 +78,24 @@ static const uint8_t kNewData[] = {'T', 'h', 'i', 's', ' ', 'i', 's', ' ', namespace { struct DeltaState { - string a_img; - string b_img; - string result_img; + unique_ptr a_img; + unique_ptr b_img; + unique_ptr result_img; size_t image_size; - string delta_path; + unique_ptr delta_file; + // The in-memory copy of delta file. + brillo::Blob delta; uint64_t metadata_size; uint32_t metadata_signature_size; - string old_kernel; + unique_ptr old_kernel; brillo::Blob old_kernel_data; - string new_kernel; + unique_ptr new_kernel; brillo::Blob new_kernel_data; - string result_kernel; + unique_ptr result_kernel; brillo::Blob result_kernel_data; size_t kernel_size; @@ -99,9 +103,6 @@ struct DeltaState { // the DeltaPerformer. InstallPlan install_plan; - // The in-memory copy of delta file. - brillo::Blob delta; - // Mock and fake instances used by the delta performer. FakeBootControl fake_boot_control_; FakeHardware fake_hardware_; @@ -155,7 +156,7 @@ class DeltaPerformerIntegrationTest : public ::testing::Test { EXPECT_EQ(expected, performer.ValidateManifest()); } void AddPartition(DeltaArchiveManifest* manifest, - std::string name, + string name, int timestamp) { auto& partition = *manifest->add_partitions(); partition.set_version(std::to_string(timestamp)); @@ -259,8 +260,7 @@ static void SignGeneratedShellPayloadWithKeys( } string signature_size_string = base::JoinString(signature_size_strings, ":"); - test_utils::ScopedTempFile hash_file("hash.XXXXXX"), - metadata_hash_file("hash.XXXXXX"); + ScopedTempFile hash_file("hash.XXXXXX"), metadata_hash_file("hash.XXXXXX"); string delta_generator_path = GetBuildArtifactsPath("delta_generator"); ASSERT_EQ(0, System(base::StringPrintf( @@ -273,29 +273,27 @@ static void SignGeneratedShellPayloadWithKeys( metadata_hash_file.path().c_str()))); // Sign the hash with all private keys. - vector sig_files, metadata_sig_files; + list sig_files, metadata_sig_files; vector sig_file_paths, metadata_sig_file_paths; for (const auto& key_path : private_key_paths) { brillo::Blob hash, signature; ASSERT_TRUE(utils::ReadFile(hash_file.path(), &hash)); ASSERT_TRUE(PayloadSigner::SignHash(hash, key_path, &signature)); - test_utils::ScopedTempFile sig_file("signature.XXXXXX"); - ASSERT_TRUE(test_utils::WriteFileVector(sig_file.path(), signature)); - sig_file_paths.push_back(sig_file.path()); - sig_files.push_back(std::move(sig_file)); + sig_files.emplace_back("signature.XXXXXX"); + ASSERT_TRUE( + test_utils::WriteFileVector(sig_files.back().path(), signature)); + sig_file_paths.push_back(sig_files.back().path()); brillo::Blob metadata_hash, metadata_signature; ASSERT_TRUE(utils::ReadFile(metadata_hash_file.path(), &metadata_hash)); ASSERT_TRUE( PayloadSigner::SignHash(metadata_hash, key_path, &metadata_signature)); - test_utils::ScopedTempFile metadata_sig_file("signature.XXXXXX"); - ASSERT_TRUE(test_utils::WriteFileVector(metadata_sig_file.path(), + metadata_sig_files.emplace_back("metadata_signature.XXXXXX"); + ASSERT_TRUE(test_utils::WriteFileVector(metadata_sig_files.back().path(), metadata_signature)); - - metadata_sig_file_paths.push_back(metadata_sig_file.path()); - metadata_sig_files.push_back(std::move(metadata_sig_file)); + metadata_sig_file_paths.push_back(metadata_sig_files.back().path()); } string sig_files_string = base::JoinString(sig_file_paths, ":"); string metadata_sig_files_string = @@ -377,7 +375,7 @@ static void SignGeneratedShellPayload(SignatureTest signature_test, GetBuildArtifactsPath(kUnittestPrivateKey2Path)); } - std::string public_key; + string public_key; if (signature_test == kSignatureGeneratedShellRotateCl2) { public_key = GetBuildArtifactsPath(kUnittestPublicKey2Path); } else if (signature_test == kSignatureGeneratedShellECKey) { @@ -397,24 +395,23 @@ static void GenerateDeltaFile(bool full_kernel, SignatureTest signature_test, DeltaState* state, uint32_t minor_version) { - EXPECT_TRUE(utils::MakeTempFile("a_img.XXXXXX", &state->a_img, nullptr)); - EXPECT_TRUE(utils::MakeTempFile("b_img.XXXXXX", &state->b_img, nullptr)); + state->a_img.reset(new ScopedTempFile("a_img.XXXXXX")); + state->b_img.reset(new ScopedTempFile("b_img.XXXXXX")); // result_img is used in minor version 2. Instead of applying the update // in-place on A, we apply it to a new image, result_img. - EXPECT_TRUE( - utils::MakeTempFile("result_img.XXXXXX", &state->result_img, nullptr)); + state->result_img.reset(new ScopedTempFile("result_img.XXXXXX")); EXPECT_TRUE( base::CopyFile(GetBuildArtifactsPath().Append("gen/disk_ext2_4k.img"), - base::FilePath(state->a_img))); + base::FilePath(state->a_img->path()))); - state->image_size = utils::FileSize(state->a_img); + state->image_size = utils::FileSize(state->a_img->path()); // Make some changes to the A image. { string a_mnt; - ScopedLoopMounter b_mounter(state->a_img, &a_mnt, 0); + ScopedLoopMounter b_mounter(state->a_img->path(), &a_mnt, 0); brillo::Blob hardtocompress; while (hardtocompress.size() < 3 * kBlockSize) { @@ -451,17 +448,18 @@ static void GenerateDeltaFile(bool full_kernel, // Create a result image with image_size bytes of garbage. brillo::Blob ones(state->image_size, 0xff); - EXPECT_TRUE( - utils::WriteFile(state->result_img.c_str(), ones.data(), ones.size())); - EXPECT_EQ(utils::FileSize(state->a_img), utils::FileSize(state->result_img)); + EXPECT_TRUE(utils::WriteFile( + state->result_img->path().c_str(), ones.data(), ones.size())); + EXPECT_EQ(utils::FileSize(state->a_img->path()), + utils::FileSize(state->result_img->path())); EXPECT_TRUE( base::CopyFile(GetBuildArtifactsPath().Append("gen/disk_ext2_4k.img"), - base::FilePath(state->b_img))); + base::FilePath(state->b_img->path()))); { // Make some changes to the B image. string b_mnt; - ScopedLoopMounter b_mounter(state->b_img, &b_mnt, 0); + ScopedLoopMounter b_mounter(state->b_img->path(), &b_mnt, 0); base::FilePath mnt_path(b_mnt); EXPECT_TRUE(base::CopyFile(mnt_path.Append("regular-small"), @@ -509,18 +507,9 @@ static void GenerateDeltaFile(bool full_kernel, hardtocompress.size())); } - string old_kernel; - EXPECT_TRUE( - utils::MakeTempFile("old_kernel.XXXXXX", &state->old_kernel, nullptr)); - - string new_kernel; - EXPECT_TRUE( - utils::MakeTempFile("new_kernel.XXXXXX", &state->new_kernel, nullptr)); - - string result_kernel; - EXPECT_TRUE(utils::MakeTempFile( - "result_kernel.XXXXXX", &state->result_kernel, nullptr)); - + state->old_kernel.reset(new ScopedTempFile("old_kernel.XXXXXX")); + state->new_kernel.reset(new ScopedTempFile("new_kernel.XXXXXX")); + state->result_kernel.reset(new ScopedTempFile("result_kernel.XXXXXX")); state->kernel_size = kDefaultKernelSize; state->old_kernel_data.resize(kDefaultKernelSize); state->new_kernel_data.resize(state->old_kernel_data.size()); @@ -534,18 +523,17 @@ static void GenerateDeltaFile(bool full_kernel, std::begin(kNewData), std::end(kNewData), state->new_kernel_data.begin()); // Write kernels to disk - EXPECT_TRUE(utils::WriteFile(state->old_kernel.c_str(), + EXPECT_TRUE(utils::WriteFile(state->old_kernel->path().c_str(), state->old_kernel_data.data(), state->old_kernel_data.size())); - EXPECT_TRUE(utils::WriteFile(state->new_kernel.c_str(), + EXPECT_TRUE(utils::WriteFile(state->new_kernel->path().c_str(), state->new_kernel_data.data(), state->new_kernel_data.size())); - EXPECT_TRUE(utils::WriteFile(state->result_kernel.c_str(), + EXPECT_TRUE(utils::WriteFile(state->result_kernel->path().c_str(), state->result_kernel_data.data(), state->result_kernel_data.size())); - EXPECT_TRUE(utils::MakeTempFile("delta.XXXXXX", &state->delta_path, nullptr)); - LOG(INFO) << "delta path: " << state->delta_path; + state->delta_file.reset(new ScopedTempFile("delta.XXXXXX")); { const string private_key = signature_test == kSignatureGenerator @@ -561,9 +549,10 @@ static void GenerateDeltaFile(bool full_kernel, if (!full_rootfs) { payload_config.source.partitions.emplace_back(kPartitionNameRoot); payload_config.source.partitions.emplace_back(kPartitionNameKernel); - payload_config.source.partitions.front().path = state->a_img; + payload_config.source.partitions.front().path = state->a_img->path(); if (!full_kernel) - payload_config.source.partitions.back().path = state->old_kernel; + payload_config.source.partitions.back().path = + state->old_kernel->path(); EXPECT_TRUE(payload_config.source.LoadImageSize()); for (PartitionConfig& part : payload_config.source.partitions) EXPECT_TRUE(part.OpenFilesystem()); @@ -573,28 +562,30 @@ static void GenerateDeltaFile(bool full_kernel, payload_config.hard_chunk_size = 1024 * 1024; } payload_config.target.partitions.emplace_back(kPartitionNameRoot); - payload_config.target.partitions.back().path = state->b_img; + payload_config.target.partitions.back().path = state->b_img->path(); payload_config.target.partitions.emplace_back(kPartitionNameKernel); - payload_config.target.partitions.back().path = state->new_kernel; + payload_config.target.partitions.back().path = state->new_kernel->path(); EXPECT_TRUE(payload_config.target.LoadImageSize()); for (PartitionConfig& part : payload_config.target.partitions) EXPECT_TRUE(part.OpenFilesystem()); EXPECT_TRUE(payload_config.Validate()); - EXPECT_TRUE(GenerateUpdatePayloadFile( - payload_config, state->delta_path, private_key, &state->metadata_size)); + EXPECT_TRUE(GenerateUpdatePayloadFile(payload_config, + state->delta_file->path(), + private_key, + &state->metadata_size)); } // Extend the "partitions" holding the file system a bit. EXPECT_EQ(0, - HANDLE_EINTR(truncate(state->a_img.c_str(), + HANDLE_EINTR(truncate(state->a_img->path().c_str(), state->image_size + 1024 * 1024))); EXPECT_EQ(static_cast(state->image_size + 1024 * 1024), - utils::FileSize(state->a_img)); + utils::FileSize(state->a_img->path())); EXPECT_EQ(0, - HANDLE_EINTR(truncate(state->b_img.c_str(), + HANDLE_EINTR(truncate(state->b_img->path().c_str(), state->image_size + 1024 * 1024))); EXPECT_EQ(static_cast(state->image_size + 1024 * 1024), - utils::FileSize(state->b_img)); + utils::FileSize(state->b_img->path())); if (signature_test == kSignatureGeneratedPlaceholder || signature_test == kSignatureGeneratedPlaceholderMismatch) { @@ -603,13 +594,13 @@ static void GenerateDeltaFile(bool full_kernel, GetBuildArtifactsPath(kUnittestPrivateKeyPath), &signature_size)); LOG(INFO) << "Inserting placeholder signature."; ASSERT_TRUE(InsertSignaturePlaceholder( - signature_size, state->delta_path, &state->metadata_size)); + signature_size, state->delta_file->path(), &state->metadata_size)); if (signature_test == kSignatureGeneratedPlaceholderMismatch) { signature_size -= 1; LOG(INFO) << "Inserting mismatched placeholder signature."; ASSERT_FALSE(InsertSignaturePlaceholder( - signature_size, state->delta_path, &state->metadata_size)); + signature_size, state->delta_file->path(), &state->metadata_size)); return; } } @@ -621,13 +612,13 @@ static void GenerateDeltaFile(bool full_kernel, // reflect the new size after adding the signature operation to the // manifest. LOG(INFO) << "Signing payload."; - SignGeneratedPayload(state->delta_path, &state->metadata_size); + SignGeneratedPayload(state->delta_file->path(), &state->metadata_size); } else if (signature_test == kSignatureGeneratedShell || signature_test == kSignatureGeneratedShellECKey || signature_test == kSignatureGeneratedShellBadKey || signature_test == kSignatureGeneratedShellRotateCl1 || signature_test == kSignatureGeneratedShellRotateCl2) { - SignGeneratedShellPayload(signature_test, state->delta_path); + SignGeneratedShellPayload(signature_test, state->delta_file->path()); } } @@ -641,7 +632,7 @@ static void ApplyDeltaFile(bool full_kernel, uint32_t minor_version) { // Check the metadata. { - EXPECT_TRUE(utils::ReadFile(state->delta_path, &state->delta)); + EXPECT_TRUE(utils::ReadFile(state->delta_file->path(), &state->delta)); PayloadMetadata payload_metadata; EXPECT_TRUE(payload_metadata.ParsePayloadHeader(state->delta)); state->metadata_size = payload_metadata.GetMetadataSize(); @@ -794,9 +785,10 @@ static void ApplyDeltaFile(bool full_kernel, (*performer)->set_public_key_path(public_key_path); (*performer)->set_update_certificates_path(""); - EXPECT_EQ(static_cast(state->image_size), - HashCalculator::RawHashOfFile( - state->a_img, state->image_size, &root_part.source_hash)); + EXPECT_EQ( + static_cast(state->image_size), + HashCalculator::RawHashOfFile( + state->a_img->path(), state->image_size, &root_part.source_hash)); EXPECT_TRUE(HashCalculator::RawHashOfData(state->old_kernel_data, &kernel_part.source_hash)); @@ -804,13 +796,15 @@ static void ApplyDeltaFile(bool full_kernel, install_plan->partitions.clear(); state->fake_boot_control_.SetPartitionDevice( - kPartitionNameRoot, install_plan->source_slot, state->a_img); - state->fake_boot_control_.SetPartitionDevice( - kPartitionNameKernel, install_plan->source_slot, state->old_kernel); + kPartitionNameRoot, install_plan->source_slot, state->a_img->path()); + state->fake_boot_control_.SetPartitionDevice(kPartitionNameKernel, + install_plan->source_slot, + state->old_kernel->path()); state->fake_boot_control_.SetPartitionDevice( - kPartitionNameRoot, install_plan->target_slot, state->result_img); - state->fake_boot_control_.SetPartitionDevice( - kPartitionNameKernel, install_plan->target_slot, state->result_kernel); + kPartitionNameRoot, install_plan->target_slot, state->result_img->path()); + state->fake_boot_control_.SetPartitionDevice(kPartitionNameKernel, + install_plan->target_slot, + state->result_kernel->path()); ErrorCode expected_error, actual_error; bool continue_writing; @@ -889,12 +883,15 @@ void VerifyPayloadResult(DeltaPerformer* performer, return; } + CompareFilesByBlock(state->result_kernel->path(), + state->new_kernel->path(), + state->kernel_size); CompareFilesByBlock( - state->result_kernel, state->new_kernel, state->kernel_size); - CompareFilesByBlock(state->result_img, state->b_img, state->image_size); + state->result_img->path(), state->b_img->path(), state->image_size); brillo::Blob updated_kernel_partition; - EXPECT_TRUE(utils::ReadFile(state->result_kernel, &updated_kernel_partition)); + EXPECT_TRUE( + utils::ReadFile(state->result_kernel->path(), &updated_kernel_partition)); ASSERT_GE(updated_kernel_partition.size(), base::size(kNewData)); EXPECT_TRUE(std::equal(std::begin(kNewData), std::end(kNewData), @@ -913,9 +910,10 @@ void VerifyPayloadResult(DeltaPerformer* performer, EXPECT_EQ(state->image_size, partitions[0].target_size); brillo::Blob expected_new_rootfs_hash; - EXPECT_EQ(static_cast(state->image_size), - HashCalculator::RawHashOfFile( - state->b_img, state->image_size, &expected_new_rootfs_hash)); + EXPECT_EQ( + static_cast(state->image_size), + HashCalculator::RawHashOfFile( + state->b_img->path(), state->image_size, &expected_new_rootfs_hash)); EXPECT_EQ(expected_new_rootfs_hash, partitions[0].target_hash); } @@ -953,13 +951,6 @@ void DoSmallImageTest(bool full_kernel, &state, minor_version); - ScopedPathUnlinker a_img_unlinker(state.a_img); - ScopedPathUnlinker b_img_unlinker(state.b_img); - ScopedPathUnlinker new_img_unlinker(state.result_img); - ScopedPathUnlinker delta_unlinker(state.delta_path); - ScopedPathUnlinker old_kernel_unlinker(state.old_kernel); - ScopedPathUnlinker new_kernel_unlinker(state.new_kernel); - ScopedPathUnlinker result_kernel_unlinker(state.result_kernel); ApplyDeltaFile(full_kernel, full_rootfs, signature_test, @@ -977,11 +968,6 @@ void DoOperationHashMismatchTest(OperationHashTest op_hash_test, DeltaState state; uint64_t minor_version = kFullPayloadMinorVersion; GenerateDeltaFile(true, true, -1, kSignatureGenerated, &state, minor_version); - ScopedPathUnlinker a_img_unlinker(state.a_img); - ScopedPathUnlinker b_img_unlinker(state.b_img); - ScopedPathUnlinker delta_unlinker(state.delta_path); - ScopedPathUnlinker old_kernel_unlinker(state.old_kernel); - ScopedPathUnlinker new_kernel_unlinker(state.new_kernel); DeltaPerformer* performer = nullptr; ApplyDeltaFile(true, true, diff --git a/payload_consumer/delta_performer_unittest.cc b/payload_consumer/delta_performer_unittest.cc index 449201ce..65b9dac9 100644 --- a/payload_consumer/delta_performer_unittest.cc +++ b/payload_consumer/delta_performer_unittest.cc @@ -202,7 +202,7 @@ class DeltaPerformerTest : public ::testing::Test { uint64_t major_version, uint32_t minor_version, PartitionConfig* old_part = nullptr) { - test_utils::ScopedTempFile blob_file("Blob-XXXXXX"); + ScopedTempFile blob_file("Blob-XXXXXX"); EXPECT_TRUE(test_utils::WriteFileVector(blob_file.path(), blob_data)); PayloadGenerationConfig config; @@ -236,7 +236,7 @@ class DeltaPerformerTest : public ::testing::Test { new_part.size = 0; payload.AddPartition(*old_part, new_part, {}, {}); - test_utils::ScopedTempFile payload_file("Payload-XXXXXX"); + ScopedTempFile payload_file("Payload-XXXXXX"); string private_key = sign_payload ? GetBuildArtifactsPath(kUnittestPrivateKeyPath) : ""; EXPECT_TRUE(payload.WritePayload(payload_file.path(), @@ -287,7 +287,7 @@ class DeltaPerformerTest : public ::testing::Test { const string& source_path, const brillo::Blob& target_data, bool expect_success) { - test_utils::ScopedTempFile new_part("Partition-XXXXXX"); + ScopedTempFile new_part("Partition-XXXXXX"); EXPECT_TRUE(test_utils::WriteFileVector(new_part.path(), target_data)); payload_.size = payload_data.size(); @@ -591,7 +591,7 @@ TEST_F(DeltaPerformerTest, SourceCopyOperationTest) { EXPECT_TRUE(HashCalculator::RawHashOfData(expected_data, &src_hash)); aop.op.set_src_sha256_hash(src_hash.data(), src_hash.size()); - test_utils::ScopedTempFile source("Source-XXXXXX"); + ScopedTempFile source("Source-XXXXXX"); EXPECT_TRUE(test_utils::WriteFileVector(source.path(), expected_data)); PartitionConfig old_part(kPartitionNameRoot); @@ -619,7 +619,7 @@ TEST_F(DeltaPerformerTest, PuffdiffOperationTest) { EXPECT_TRUE(HashCalculator::RawHashOfData(src, &src_hash)); aop.op.set_src_sha256_hash(src_hash.data(), src_hash.size()); - test_utils::ScopedTempFile source("Source-XXXXXX"); + ScopedTempFile source("Source-XXXXXX"); EXPECT_TRUE(test_utils::WriteFileVector(source.path(), src)); PartitionConfig old_part(kPartitionNameRoot); @@ -647,7 +647,7 @@ TEST_F(DeltaPerformerTest, SourceHashMismatchTest) { EXPECT_TRUE(HashCalculator::RawHashOfData(expected_data, &src_hash)); aop.op.set_src_sha256_hash(src_hash.data(), src_hash.size()); - test_utils::ScopedTempFile source("Source-XXXXXX"); + ScopedTempFile source("Source-XXXXXX"); EXPECT_TRUE(test_utils::WriteFileVector(source.path(), actual_data)); PartitionConfig old_part(kPartitionNameRoot); @@ -664,7 +664,7 @@ TEST_F(DeltaPerformerTest, SourceHashMismatchTest) { // since the source partition doesn't match the operation hash. TEST_F(DeltaPerformerTest, ErrorCorrectionSourceCopyFallbackTest) { constexpr size_t kCopyOperationSize = 4 * 4096; - test_utils::ScopedTempFile source("Source-XXXXXX"); + ScopedTempFile source("Source-XXXXXX"); // Write invalid data to the source image, which doesn't match the expected // hash. brillo::Blob invalid_data(kCopyOperationSize, 0x55); @@ -692,7 +692,7 @@ TEST_F(DeltaPerformerTest, ErrorCorrectionSourceCopyFallbackTest) { // file descriptor when the size of the error corrected one is too small. TEST_F(DeltaPerformerTest, ErrorCorrectionSourceCopyWhenNoHashFallbackTest) { constexpr size_t kCopyOperationSize = 4 * 4096; - test_utils::ScopedTempFile source("Source-XXXXXX"); + ScopedTempFile source("Source-XXXXXX"); // Setup the source path with the right expected data. brillo::Blob expected_data = FakeFileDescriptorData(kCopyOperationSize); EXPECT_TRUE(test_utils::WriteFileVector(source.path(), expected_data)); @@ -720,7 +720,7 @@ TEST_F(DeltaPerformerTest, ErrorCorrectionSourceCopyWhenNoHashFallbackTest) { TEST_F(DeltaPerformerTest, ChooseSourceFDTest) { constexpr size_t kSourceSize = 4 * 4096; - test_utils::ScopedTempFile source("Source-XXXXXX"); + ScopedTempFile source("Source-XXXXXX"); // Write invalid data to the source image, which doesn't match the expected // hash. brillo::Blob invalid_data(kSourceSize, 0x55); diff --git a/payload_consumer/download_action_unittest.cc b/payload_consumer/download_action_unittest.cc index e6ca219b..9daa7914 100644 --- a/payload_consumer/download_action_unittest.cc +++ b/payload_consumer/download_action_unittest.cc @@ -51,7 +51,6 @@ using base::ReadFileToString; using base::WriteFile; using std::string; using std::unique_ptr; -using test_utils::ScopedTempFile; using testing::_; using testing::AtLeast; using testing::InSequence; @@ -133,7 +132,6 @@ void TestWithData(const brillo::Blob& data, loop.SetAsCurrent(); FakeSystemState fake_system_state; - // TODO(adlr): see if we need a different file for build bots ScopedTempFile output_temp_file; TestDirectFileWriter writer; EXPECT_EQ( diff --git a/payload_consumer/extent_reader_unittest.cc b/payload_consumer/extent_reader_unittest.cc index b7059bc9..686f14d8 100644 --- a/payload_consumer/extent_reader_unittest.cc +++ b/payload_consumer/extent_reader_unittest.cc @@ -72,7 +72,7 @@ class ExtentReaderTest : public ::testing::Test { } FileDescriptorPtr fd_; - test_utils::ScopedTempFile temp_file_{"ExtentReaderTest-file.XXXXXX"}; + ScopedTempFile temp_file_{"ExtentReaderTest-file.XXXXXX"}; brillo::Blob sample_; }; diff --git a/payload_consumer/extent_writer_unittest.cc b/payload_consumer/extent_writer_unittest.cc index aef856bf..afebb1a9 100644 --- a/payload_consumer/extent_writer_unittest.cc +++ b/payload_consumer/extent_writer_unittest.cc @@ -59,7 +59,7 @@ class ExtentWriterTest : public ::testing::Test { void WriteAlignedExtents(size_t chunk_size, size_t first_chunk_size); FileDescriptorPtr fd_; - test_utils::ScopedTempFile temp_file_{"ExtentWriterTest-file.XXXXXX"}; + ScopedTempFile temp_file_{"ExtentWriterTest-file.XXXXXX"}; }; TEST_F(ExtentWriterTest, SimpleTest) { diff --git a/payload_consumer/file_descriptor_utils_unittest.cc b/payload_consumer/file_descriptor_utils_unittest.cc index 48e610f1..478893d7 100644 --- a/payload_consumer/file_descriptor_utils_unittest.cc +++ b/payload_consumer/file_descriptor_utils_unittest.cc @@ -52,14 +52,13 @@ RepeatedPtrField CreateExtentList( class FileDescriptorUtilsTest : public ::testing::Test { protected: void SetUp() override { - EXPECT_TRUE(utils::MakeTempFile("fd_tgt.XXXXXX", &tgt_path_, nullptr)); - EXPECT_TRUE(target_->Open(tgt_path_.c_str(), O_RDWR)); + EXPECT_TRUE(target_->Open(tgt_file_.path().c_str(), O_RDWR)); } // Check that the |target_| file contains |expected_contents|. void ExpectTarget(const std::string& expected_contents) { std::string target_contents; - EXPECT_TRUE(utils::ReadFile(tgt_path_, &target_contents)); + EXPECT_TRUE(utils::ReadFile(tgt_file_.path(), &target_contents)); EXPECT_EQ(expected_contents.size(), target_contents.size()); if (target_contents != expected_contents) { ADD_FAILURE() << "Contents don't match."; @@ -70,8 +69,7 @@ class FileDescriptorUtilsTest : public ::testing::Test { } } - // Path to the target temporary file. - std::string tgt_path_; + ScopedTempFile tgt_file_{"fd_tgt.XXXXXX"}; // Source and target file descriptor used for testing the tools. FakeFileDescriptor* fake_source_{new FakeFileDescriptor()}; diff --git a/payload_consumer/file_writer_unittest.cc b/payload_consumer/file_writer_unittest.cc index 59cfe2b9..3b959f30 100644 --- a/payload_consumer/file_writer_unittest.cc +++ b/payload_consumer/file_writer_unittest.cc @@ -35,8 +35,7 @@ namespace chromeos_update_engine { class FileWriterTest : public ::testing::Test {}; TEST(FileWriterTest, SimpleTest) { - // Create a uniquely named file for testing. - test_utils::ScopedTempFile file("FileWriterTest-XXXXXX"); + ScopedTempFile file("FileWriterTest-XXXXXX"); DirectFileWriter file_writer; EXPECT_EQ(0, file_writer.Open(file.path().c_str(), @@ -60,7 +59,7 @@ TEST(FileWriterTest, ErrorTest) { TEST(FileWriterTest, WriteErrorTest) { // Create a uniquely named file for testing. - test_utils::ScopedTempFile file("FileWriterTest-XXXXXX"); + ScopedTempFile file("FileWriterTest-XXXXXX"); DirectFileWriter file_writer; EXPECT_EQ(0, file_writer.Open(file.path().c_str(), diff --git a/payload_consumer/filesystem_verifier_action_unittest.cc b/payload_consumer/filesystem_verifier_action_unittest.cc index 2971849c..2c29b44f 100644 --- a/payload_consumer/filesystem_verifier_action_unittest.cc +++ b/payload_consumer/filesystem_verifier_action_unittest.cc @@ -92,7 +92,7 @@ class FilesystemVerifierActionTestDelegate : public ActionProcessorDelegate { bool FilesystemVerifierActionTest::DoTest(bool terminate_early, bool hash_fail) { - test_utils::ScopedTempFile a_loop_file("a_loop_file.XXXXXX"); + ScopedTempFile a_loop_file("a_loop_file.XXXXXX"); // Make random data for a. const size_t kLoopFileSize = 10 * 1024 * 1024 + 512; @@ -278,7 +278,7 @@ TEST_F(FilesystemVerifierActionTest, RunAsRootTerminateEarlyTest) { #ifdef __ANDROID__ TEST_F(FilesystemVerifierActionTest, RunAsRootWriteVerityTest) { - test_utils::ScopedTempFile part_file("part_file.XXXXXX"); + ScopedTempFile part_file("part_file.XXXXXX"); constexpr size_t filesystem_size = 200 * 4096; constexpr size_t part_size = 256 * 4096; brillo::Blob part_data(filesystem_size, 0x1); @@ -340,7 +340,7 @@ TEST_F(FilesystemVerifierActionTest, RunAsRootWriteVerityTest) { #endif // __ANDROID__ TEST_F(FilesystemVerifierActionTest, RunAsRootSkipWriteVerityTest) { - test_utils::ScopedTempFile part_file("part_file.XXXXXX"); + ScopedTempFile part_file("part_file.XXXXXX"); constexpr size_t filesystem_size = 200 * 4096; constexpr size_t part_size = 256 * 4096; brillo::Blob part_data(part_size); diff --git a/payload_consumer/verity_writer_android_unittest.cc b/payload_consumer/verity_writer_android_unittest.cc index f943ce8e..ec22ffbe 100644 --- a/payload_consumer/verity_writer_android_unittest.cc +++ b/payload_consumer/verity_writer_android_unittest.cc @@ -39,7 +39,7 @@ class VerityWriterAndroidTest : public ::testing::Test { VerityWriterAndroid verity_writer_; InstallPlan::Partition partition_; - test_utils::ScopedTempFile temp_file_; + ScopedTempFile temp_file_; }; TEST_F(VerityWriterAndroidTest, SimpleTest) { diff --git a/payload_generator/ab_generator_unittest.cc b/payload_generator/ab_generator_unittest.cc index 7a952841..84eeb77b 100644 --- a/payload_generator/ab_generator_unittest.cc +++ b/payload_generator/ab_generator_unittest.cc @@ -70,8 +70,7 @@ void TestSplitReplaceOrReplaceXzOperation(InstallOperation::Type orig_type, part_data.push_back(dis(gen)); } ASSERT_EQ(part_size, part_data.size()); - test_utils::ScopedTempFile part_file( - "SplitReplaceOrReplaceXzTest_part.XXXXXX"); + ScopedTempFile part_file("SplitReplaceOrReplaceXzTest_part.XXXXXX"); ASSERT_TRUE(test_utils::WriteFileVector(part_file.path(), part_data)); // Create original operation and blob data. @@ -107,8 +106,7 @@ void TestSplitReplaceOrReplaceXzOperation(InstallOperation::Type orig_type, aop.name = "SplitTestOp"; // Create the data file. - test_utils::ScopedTempFile data_file( - "SplitReplaceOrReplaceXzTest_data.XXXXXX"); + ScopedTempFile data_file("SplitReplaceOrReplaceXzTest_data.XXXXXX"); EXPECT_TRUE(test_utils::WriteFileVector(data_file.path(), op_blob)); int data_fd = open(data_file.path().c_str(), O_RDWR, 000); EXPECT_GE(data_fd, 0); @@ -220,8 +218,7 @@ void TestMergeReplaceOrReplaceXzOperations(InstallOperation::Type orig_type, part_data.push_back(dis(gen)); } ASSERT_EQ(part_size, part_data.size()); - test_utils::ScopedTempFile part_file( - "MergeReplaceOrReplaceXzTest_part.XXXXXX"); + ScopedTempFile part_file("MergeReplaceOrReplaceXzTest_part.XXXXXX"); ASSERT_TRUE(test_utils::WriteFileVector(part_file.path(), part_data)); // Create original operations and blob data. @@ -271,8 +268,7 @@ void TestMergeReplaceOrReplaceXzOperations(InstallOperation::Type orig_type, aops.push_back(second_aop); // Create the data file. - test_utils::ScopedTempFile data_file( - "MergeReplaceOrReplaceXzTest_data.XXXXXX"); + ScopedTempFile data_file("MergeReplaceOrReplaceXzTest_data.XXXXXX"); EXPECT_TRUE(test_utils::WriteFileVector(data_file.path(), blob_data)); int data_fd = open(data_file.path().c_str(), O_RDWR, 000); EXPECT_GE(data_fd, 0); @@ -561,7 +557,7 @@ TEST_F(ABGeneratorTest, AddSourceHashTest) { second_aop.op = second_op; aops.push_back(second_aop); - test_utils::ScopedTempFile src_part_file("AddSourceHashTest_src_part.XXXXXX"); + ScopedTempFile src_part_file("AddSourceHashTest_src_part.XXXXXX"); brillo::Blob src_data(kBlockSize); test_utils::FillWithData(&src_data); ASSERT_TRUE(test_utils::WriteFileVector(src_part_file.path(), src_data)); diff --git a/payload_generator/blob_file_writer_unittest.cc b/payload_generator/blob_file_writer_unittest.cc index 487bc737..f4dcafb4 100644 --- a/payload_generator/blob_file_writer_unittest.cc +++ b/payload_generator/blob_file_writer_unittest.cc @@ -31,24 +31,21 @@ namespace chromeos_update_engine { class BlobFileWriterTest : public ::testing::Test {}; TEST(BlobFileWriterTest, SimpleTest) { - string blob_path; - int blob_fd; - EXPECT_TRUE( - utils::MakeTempFile("BlobFileWriterTest.XXXXXX", &blob_path, &blob_fd)); + ScopedTempFile blob_file("BlobFileWriterTest.XXXXXX", true); off_t blob_file_size = 0; - BlobFileWriter blob_file(blob_fd, &blob_file_size); + BlobFileWriter blob_file_writer(blob_file.fd(), &blob_file_size); - off_t blob_size = 1024; - brillo::Blob blob(blob_size); + const off_t kBlobSize = 1024; + brillo::Blob blob(kBlobSize); FillWithData(&blob); - EXPECT_EQ(0, blob_file.StoreBlob(blob)); - EXPECT_EQ(blob_size, blob_file.StoreBlob(blob)); + EXPECT_EQ(0, blob_file_writer.StoreBlob(blob)); + EXPECT_EQ(kBlobSize, blob_file_writer.StoreBlob(blob)); - brillo::Blob stored_blob(blob_size); + brillo::Blob stored_blob(kBlobSize); ssize_t bytes_read; - ASSERT_TRUE( - utils::PReadAll(blob_fd, stored_blob.data(), blob_size, 0, &bytes_read)); - EXPECT_EQ(bytes_read, blob_size); + ASSERT_TRUE(utils::PReadAll( + blob_file.fd(), stored_blob.data(), kBlobSize, 0, &bytes_read)); + EXPECT_EQ(bytes_read, kBlobSize); EXPECT_EQ(blob, stored_blob); } diff --git a/payload_generator/block_mapping_unittest.cc b/payload_generator/block_mapping_unittest.cc index 9b9b4f16..017548a9 100644 --- a/payload_generator/block_mapping_unittest.cc +++ b/payload_generator/block_mapping_unittest.cc @@ -36,8 +36,8 @@ namespace chromeos_update_engine { class BlockMappingTest : public ::testing::Test { protected: // Old new partition files used in testing. - test_utils::ScopedTempFile old_part_{"BlockMappingTest_old.XXXXXX"}; - test_utils::ScopedTempFile new_part_{"BlockMappingTest_new.XXXXXX"}; + ScopedTempFile old_part_{"BlockMappingTest_old.XXXXXX"}; + ScopedTempFile new_part_{"BlockMappingTest_new.XXXXXX"}; size_t block_size_{1024}; BlockMapping bm_{block_size_}; // BlockMapping under test. diff --git a/payload_generator/boot_img_filesystem_unittest.cc b/payload_generator/boot_img_filesystem_unittest.cc index 0b115e02..7805156f 100644 --- a/payload_generator/boot_img_filesystem_unittest.cc +++ b/payload_generator/boot_img_filesystem_unittest.cc @@ -63,7 +63,7 @@ class BootImgFilesystemTest : public ::testing::Test { return boot_img; } - test_utils::ScopedTempFile boot_file_; + ScopedTempFile boot_file_; }; TEST_F(BootImgFilesystemTest, SimpleTest) { diff --git a/payload_generator/delta_diff_generator.cc b/payload_generator/delta_diff_generator.cc index c2b35ee8..ff8b0dae 100644 --- a/payload_generator/delta_diff_generator.cc +++ b/payload_generator/delta_diff_generator.cc @@ -119,18 +119,10 @@ bool GenerateUpdatePayloadFile(const PayloadGenerationConfig& config, PayloadFile payload; TEST_AND_RETURN_FALSE(payload.Init(config)); - const string kTempFileTemplate("CrAU_temp_data.XXXXXX"); - string temp_file_path; - int data_file_fd; - TEST_AND_RETURN_FALSE( - utils::MakeTempFile(kTempFileTemplate, &temp_file_path, &data_file_fd)); - ScopedPathUnlinker temp_file_unlinker(temp_file_path); - TEST_AND_RETURN_FALSE(data_file_fd >= 0); - + ScopedTempFile data_file("CrAU_temp_data.XXXXXX", true); { off_t data_file_size = 0; - ScopedFdCloser data_file_fd_closer(&data_file_fd); - BlobFileWriter blob_file(data_file_fd, &data_file_size); + BlobFileWriter blob_file(data_file.fd(), &data_file_size); if (config.is_delta) { TEST_AND_RETURN_FALSE(config.source.partitions.size() == config.target.partitions.size()); @@ -190,11 +182,12 @@ bool GenerateUpdatePayloadFile(const PayloadGenerationConfig& config, std::move(all_merge_sequences[i]))); } } + data_file.CloseFd(); LOG(INFO) << "Writing payload file..."; // Write payload file to disk. TEST_AND_RETURN_FALSE(payload.WritePayload( - output_path, temp_file_path, private_key_path, metadata_size)); + output_path, data_file.path(), private_key_path, metadata_size)); LOG(INFO) << "All done. Successfully created delta file with " << "metadata size = " << *metadata_size; diff --git a/payload_generator/delta_diff_utils.cc b/payload_generator/delta_diff_utils.cc index 220c7ae1..3c025e10 100644 --- a/payload_generator/delta_diff_utils.cc +++ b/payload_generator/delta_diff_utils.cc @@ -822,17 +822,13 @@ bool ReadExtentsToDiff(const string& old_part, // Only Puffdiff if both files have at least one deflate left. if (!src_deflates.empty() && !dst_deflates.empty()) { brillo::Blob puffdiff_delta; - string temp_file_path; - TEST_AND_RETURN_FALSE(utils::MakeTempFile( - "puffdiff-delta.XXXXXX", &temp_file_path, nullptr)); - ScopedPathUnlinker temp_file_unlinker(temp_file_path); - + ScopedTempFile temp_file("puffdiff-delta.XXXXXX"); // Perform PuffDiff operation. TEST_AND_RETURN_FALSE(puffin::PuffDiff(old_data, new_data, src_deflates, dst_deflates, - temp_file_path, + temp_file.path(), &puffdiff_delta)); TEST_AND_RETURN_FALSE(puffdiff_delta.size() > 0); if (IsDiffOperationBetter(operation, diff --git a/payload_generator/delta_diff_utils_unittest.cc b/payload_generator/delta_diff_utils_unittest.cc index 0857f9c1..f2db1bdc 100644 --- a/payload_generator/delta_diff_utils_unittest.cc +++ b/payload_generator/delta_diff_utils_unittest.cc @@ -69,13 +69,12 @@ bool WriteExtents(const string& part_path, // Create a fake filesystem of the given |size| and initialize the partition // holding it in the PartitionConfig |part|. void CreatePartition(PartitionConfig* part, - const string& pattern, + ScopedTempFile* part_file, uint64_t block_size, off_t size) { - int fd = -1; - ASSERT_TRUE(utils::MakeTempFile(pattern.c_str(), &part->path, &fd)); - ASSERT_EQ(0, ftruncate(fd, size)); - ASSERT_EQ(0, close(fd)); + part->path = part_file->path(); + ASSERT_EQ(0, ftruncate(part_file->fd(), size)); + part_file->CloseFd(); part->fs_interface.reset(new FakeFilesystem(block_size, size / block_size)); part->size = size; } @@ -112,30 +111,20 @@ class DeltaDiffUtilsTest : public ::testing::Test { void SetUp() override { CreatePartition(&old_part_, - "DeltaDiffUtilsTest-old_part-XXXXXX", + &old_part_file_, block_size_, block_size_ * kDefaultBlockCount); CreatePartition(&new_part_, - "DeltaDiffUtilsTest-old_part-XXXXXX", + &new_part_file_, block_size_, block_size_ * kDefaultBlockCount); - ASSERT_TRUE(utils::MakeTempFile( - "DeltaDiffUtilsTest-blob-XXXXXX", &blob_path_, &blob_fd_)); - } - - void TearDown() override { - unlink(old_part_.path.c_str()); - unlink(new_part_.path.c_str()); - if (blob_fd_ != -1) - close(blob_fd_); - unlink(blob_path_.c_str()); } // Helper function to call DeltaMovedAndZeroBlocks() using this class' data // members. This simply avoids repeating all the arguments that never change. bool RunDeltaMovedAndZeroBlocks(ssize_t chunk_blocks, uint32_t minor_version) { - BlobFileWriter blob_file(blob_fd_, &blob_size_); + BlobFileWriter blob_file(tmp_blob_file_.fd(), &blob_size_); PayloadVersion version(kBrilloMajorPayloadVersion, minor_version); ExtentRanges old_zero_blocks; return diff_utils::DeltaMovedAndZeroBlocks(&aops_, @@ -155,10 +144,11 @@ class DeltaDiffUtilsTest : public ::testing::Test { // with PartitionConfig old_part_{"part"}; PartitionConfig new_part_{"part"}; + ScopedTempFile old_part_file_{"DeltaDiffUtilsTest-old_part-XXXXXX", true}; + ScopedTempFile new_part_file_{"DeltaDiffUtilsTest-new_part-XXXXXX", true}; // The file holding the output blob from the various diff utils functions. - string blob_path_; - int blob_fd_{-1}; + ScopedTempFile tmp_blob_file_{"DeltaDiffUtilsTest-blob-XXXXXX", true}; off_t blob_size_{0}; size_t block_size_{kBlockSize}; @@ -173,7 +163,7 @@ TEST_F(DeltaDiffUtilsTest, SkipVerityExtentsTest) { new_part_.verity.hash_tree_extent = ExtentForRange(20, 30); new_part_.verity.fec_extent = ExtentForRange(40, 50); - BlobFileWriter blob_file(blob_fd_, &blob_size_); + BlobFileWriter blob_file(tmp_blob_file_.fd(), &blob_size_); EXPECT_TRUE(diff_utils::DeltaReadPartition( &aops_, old_part_, diff --git a/payload_generator/ext2_filesystem_unittest.cc b/payload_generator/ext2_filesystem_unittest.cc index 54600e90..88e15383 100644 --- a/payload_generator/ext2_filesystem_unittest.cc +++ b/payload_generator/ext2_filesystem_unittest.cc @@ -62,7 +62,7 @@ void ExpectBlocksInRange(const vector& extents, uint64_t total_blocks) { class Ext2FilesystemTest : public ::testing::Test {}; TEST_F(Ext2FilesystemTest, InvalidFilesystem) { - test_utils::ScopedTempFile fs_filename_{"Ext2FilesystemTest-XXXXXX"}; + ScopedTempFile fs_filename_{"Ext2FilesystemTest-XXXXXX"}; ASSERT_EQ(0, truncate(fs_filename_.path().c_str(), kDefaultFilesystemSize)); unique_ptr fs = Ext2Filesystem::CreateFromFile(fs_filename_.path()); diff --git a/payload_generator/full_update_generator_unittest.cc b/payload_generator/full_update_generator_unittest.cc index 5f39e8bc..d3b3491d 100644 --- a/payload_generator/full_update_generator_unittest.cc +++ b/payload_generator/full_update_generator_unittest.cc @@ -41,11 +41,9 @@ class FullUpdateGeneratorTest : public ::testing::Test { config_.block_size = 4096; new_part_conf.path = part_file_.path(); - EXPECT_TRUE(utils::MakeTempFile( - "FullUpdateTest_blobs.XXXXXX", &out_blobs_path_, &out_blobs_fd_)); - blob_file_.reset(new BlobFileWriter(out_blobs_fd_, &out_blobs_length_)); - out_blobs_unlinker_.reset(new ScopedPathUnlinker(out_blobs_path_)); + blob_file_writer_.reset( + new BlobFileWriter(blob_file_.fd(), &out_blobs_length_)); } PayloadGenerationConfig config_; @@ -54,14 +52,11 @@ class FullUpdateGeneratorTest : public ::testing::Test { vector aops; // Output file holding the payload blobs. - string out_blobs_path_; - int out_blobs_fd_{-1}; off_t out_blobs_length_{0}; - ScopedFdCloser out_blobs_fd_closer_{&out_blobs_fd_}; - test_utils::ScopedTempFile part_file_{"FullUpdateTest_partition.XXXXXX"}; + ScopedTempFile part_file_{"FullUpdateTest_partition.XXXXXX"}; - std::unique_ptr blob_file_; - std::unique_ptr out_blobs_unlinker_; + ScopedTempFile blob_file_{"FullUpdateTest_blobs.XXXXXX", true}; + std::unique_ptr blob_file_writer_; // FullUpdateGenerator under test. FullUpdateGenerator generator_; @@ -77,7 +72,7 @@ TEST_F(FullUpdateGeneratorTest, RunTest) { EXPECT_TRUE(generator_.GenerateOperations(config_, new_part_conf, // this is ignored new_part_conf, - blob_file_.get(), + blob_file_writer_.get(), &aops)); int64_t new_part_chunks = new_part_conf.size / config_.hard_chunk_size; EXPECT_EQ(new_part_chunks, static_cast(aops.size())); @@ -108,7 +103,7 @@ TEST_F(FullUpdateGeneratorTest, ChunkSizeTooBig) { EXPECT_TRUE(generator_.GenerateOperations(config_, new_part_conf, // this is ignored new_part_conf, - blob_file_.get(), + blob_file_writer_.get(), &aops)); // new_part has one chunk and a half. EXPECT_EQ(2U, aops.size()); @@ -129,7 +124,7 @@ TEST_F(FullUpdateGeneratorTest, ImageSizeTooSmall) { EXPECT_TRUE(generator_.GenerateOperations(config_, new_part_conf, // this is ignored new_part_conf, - blob_file_.get(), + blob_file_writer_.get(), &aops)); // new_part has less than one chunk. diff --git a/payload_generator/mapfile_filesystem_unittest.cc b/payload_generator/mapfile_filesystem_unittest.cc index 36ae3bfa..57b672b5 100644 --- a/payload_generator/mapfile_filesystem_unittest.cc +++ b/payload_generator/mapfile_filesystem_unittest.cc @@ -55,8 +55,8 @@ void ExpectBlocksInRange(const vector& extents, uint64_t total_blocks) { class MapfileFilesystemTest : public ::testing::Test { protected: - test_utils::ScopedTempFile temp_file_{"mapfile_file.XXXXXX"}; - test_utils::ScopedTempFile temp_mapfile_{"mapfile_mapfile.XXXXXX"}; + ScopedTempFile temp_file_{"mapfile_file.XXXXXX"}; + ScopedTempFile temp_mapfile_{"mapfile_mapfile.XXXXXX"}; }; TEST_F(MapfileFilesystemTest, EmptyFilesystem) { diff --git a/payload_generator/payload_file.cc b/payload_generator/payload_file.cc index 2688e9af..74423d12 100644 --- a/payload_generator/payload_file.cc +++ b/payload_generator/payload_file.cc @@ -103,11 +103,9 @@ bool PayloadFile::WritePayload(const string& payload_file, const string& private_key_path, uint64_t* metadata_size_out) { // Reorder the data blobs with the manifest_. - string ordered_blobs_path; - TEST_AND_RETURN_FALSE(utils::MakeTempFile( - "CrAU_temp_data.ordered.XXXXXX", &ordered_blobs_path, nullptr)); - ScopedPathUnlinker ordered_blobs_unlinker(ordered_blobs_path); - TEST_AND_RETURN_FALSE(ReorderDataBlobs(data_blobs_path, ordered_blobs_path)); + ScopedTempFile ordered_blobs_file("CrAU_temp_data.ordered.XXXXXX"); + TEST_AND_RETURN_FALSE( + ReorderDataBlobs(data_blobs_path, ordered_blobs_file.path())); // Check that install op blobs are in order. uint64_t next_blob_offset = 0; @@ -231,7 +229,7 @@ bool PayloadFile::WritePayload(const string& payload_file, // Append the data blobs. LOG(INFO) << "Writing final delta file data blobs..."; - int blobs_fd = open(ordered_blobs_path.c_str(), O_RDONLY, 0); + int blobs_fd = open(ordered_blobs_file.path().c_str(), O_RDONLY, 0); ScopedFdCloser blobs_fd_closer(&blobs_fd); TEST_AND_RETURN_FALSE(blobs_fd >= 0); for (;;) { diff --git a/payload_generator/payload_file_unittest.cc b/payload_generator/payload_file_unittest.cc index 45faebb9..1fd36f52 100644 --- a/payload_generator/payload_file_unittest.cc +++ b/payload_generator/payload_file_unittest.cc @@ -36,7 +36,7 @@ class PayloadFileTest : public ::testing::Test { }; TEST_F(PayloadFileTest, ReorderBlobsTest) { - test_utils::ScopedTempFile orig_blobs("ReorderBlobsTest.orig.XXXXXX"); + ScopedTempFile orig_blobs("ReorderBlobsTest.orig.XXXXXX"); // The operations have three blob and one gap (the whitespace): // Rootfs operation 1: [8, 3] bcd @@ -45,7 +45,7 @@ TEST_F(PayloadFileTest, ReorderBlobsTest) { string orig_data = "kernel abcd"; EXPECT_TRUE(test_utils::WriteFileString(orig_blobs.path(), orig_data)); - test_utils::ScopedTempFile new_blobs("ReorderBlobsTest.new.XXXXXX"); + ScopedTempFile new_blobs("ReorderBlobsTest.new.XXXXXX"); payload_.part_vec_.resize(2); diff --git a/payload_generator/payload_generation_config_android_unittest.cc b/payload_generator/payload_generation_config_android_unittest.cc index 44eaf55e..e87b034f 100644 --- a/payload_generator/payload_generation_config_android_unittest.cc +++ b/payload_generator/payload_generation_config_android_unittest.cc @@ -138,8 +138,7 @@ class PayloadGenerationConfigAndroidTest : public ::testing::Test { } ImageConfig image_config_; - test_utils::ScopedTempFile temp_file_{ - "PayloadGenerationConfigAndroidTest.XXXXXX"}; + ScopedTempFile temp_file_{"PayloadGenerationConfigAndroidTest.XXXXXX"}; }; TEST_F(PayloadGenerationConfigAndroidTest, LoadVerityConfigSimpleTest) { diff --git a/payload_generator/payload_properties_unittest.cc b/payload_generator/payload_properties_unittest.cc index 19bc2f8b..ed936ff7 100644 --- a/payload_generator/payload_properties_unittest.cc +++ b/payload_generator/payload_properties_unittest.cc @@ -40,7 +40,6 @@ #include "update_engine/payload_generator/payload_file.h" #include "update_engine/payload_generator/payload_generation_config.h" -using chromeos_update_engine::test_utils::ScopedTempFile; using std::string; using std::unique_ptr; using std::vector; @@ -60,14 +59,6 @@ class PayloadPropertiesTest : public ::testing::Test { PayloadFile payload; EXPECT_TRUE(payload.Init(config)); - const string kTempFileTemplate = "temp_data.XXXXXX"; - int data_file_fd; - string temp_file_path; - EXPECT_TRUE( - utils::MakeTempFile(kTempFileTemplate, &temp_file_path, &data_file_fd)); - ScopedPathUnlinker temp_file_unlinker(temp_file_path); - EXPECT_LE(0, data_file_fd); - const auto SetupPartitionConfig = [](PartitionConfig* config, const string& path, size_t size) { config->path = path; @@ -77,8 +68,8 @@ class PayloadPropertiesTest : public ::testing::Test { string zeros(size, '\0'); EXPECT_TRUE(utils::WriteFile(path, zeros.c_str(), zeros.size())); }; - ScopedTempFile old_part_file; - ScopedTempFile new_part_file; + ScopedTempFile old_part_file("old_part.XXXXXX"); + ScopedTempFile new_part_file("new_part.XXXXXX"); PartitionConfig old_part(kPartitionNameRoot); PartitionConfig new_part(kPartitionNameRoot); SetupPartitionConfig(&old_part, old_part_file.path(), 0); @@ -91,7 +82,8 @@ class PayloadPropertiesTest : public ::testing::Test { vector aops; off_t data_file_size = 0; - BlobFileWriter blob_file_writer(data_file_fd, &data_file_size); + ScopedTempFile data_file("temp_data.XXXXXX", true); + BlobFileWriter blob_file_writer(data_file.fd(), &data_file_size); // Generate the operations using the strategy we selected above. EXPECT_TRUE(strategy->GenerateOperations( config, old_part, new_part, &blob_file_writer, &aops)); @@ -100,10 +92,10 @@ class PayloadPropertiesTest : public ::testing::Test { uint64_t metadata_size; EXPECT_TRUE(payload.WritePayload( - payload_file.path(), temp_file_path, "", &metadata_size)); + payload_file_.path(), data_file.path(), "", &metadata_size)); } - ScopedTempFile payload_file; + ScopedTempFile payload_file_{"payload_file.XXXXXX"}; }; // Validate the hash of file exists within the output. @@ -119,7 +111,7 @@ TEST_F(PayloadPropertiesTest, GetPropertiesAsJsonTestHash) { "}"; string json; EXPECT_TRUE( - PayloadProperties(payload_file.path()).GetPropertiesAsJson(&json)); + PayloadProperties(payload_file_.path()).GetPropertiesAsJson(&json)); EXPECT_EQ(kJsonProperties, json) << "JSON contents:\n" << json; } @@ -131,7 +123,7 @@ TEST_F(PayloadPropertiesTest, GetPropertiesAsKeyValueTestHash) { "METADATA_HASH=aEKYyzJt2E8Gz8fzB+gmekN5mriotZCSq6R+kDfdeV4=\n" "METADATA_SIZE=165\n"; string key_value; - EXPECT_TRUE(PayloadProperties{payload_file.path()}.GetPropertiesAsKeyValue( + EXPECT_TRUE(PayloadProperties{payload_file_.path()}.GetPropertiesAsKeyValue( &key_value)); EXPECT_EQ(kKeyValueProperties, key_value) << "Key Value contents:\n" << key_value; diff --git a/payload_generator/payload_signer_unittest.cc b/payload_generator/payload_signer_unittest.cc index fe629977..2a0b394c 100644 --- a/payload_generator/payload_signer_unittest.cc +++ b/payload_generator/payload_signer_unittest.cc @@ -167,7 +167,7 @@ TEST_F(PayloadSignerTest, VerifySignatureTest) { } TEST_F(PayloadSignerTest, SkipMetadataSignatureTest) { - test_utils::ScopedTempFile payload_file("payload.XXXXXX"); + ScopedTempFile payload_file("payload.XXXXXX"); PayloadGenerationConfig config; config.version.major = kBrilloMajorPayloadVersion; PayloadFile payload; @@ -194,7 +194,7 @@ TEST_F(PayloadSignerTest, SkipMetadataSignatureTest) { } TEST_F(PayloadSignerTest, VerifySignedPayloadTest) { - test_utils::ScopedTempFile payload_file("payload.XXXXXX"); + ScopedTempFile payload_file("payload.XXXXXX"); PayloadGenerationConfig config; config.version.major = kBrilloMajorPayloadVersion; PayloadFile payload; diff --git a/payload_generator/squashfs_filesystem.cc b/payload_generator/squashfs_filesystem.cc index 6152d7d6..a41e2830 100644 --- a/payload_generator/squashfs_filesystem.cc +++ b/payload_generator/squashfs_filesystem.cc @@ -72,15 +72,10 @@ bool CheckHeader(const SquashfsFilesystem::SquashfsHeader& header) { } bool GetFileMapContent(const string& sqfs_path, string* map) { - // Create a tmp file - string map_file; - TEST_AND_RETURN_FALSE( - utils::MakeTempFile("squashfs_file_map.XXXXXX", &map_file, nullptr)); - ScopedPathUnlinker map_unlinker(map_file); - + ScopedTempFile map_file("squashfs_file_map.XXXXXX"); // Run unsquashfs to get the system file map. // unsquashfs -m - vector cmd = {"unsquashfs", "-m", map_file, sqfs_path}; + vector cmd = {"unsquashfs", "-m", map_file.path(), sqfs_path}; string stdout, stderr; int exit_code; if (!Subprocess::SynchronousExec(cmd, &exit_code, &stdout, &stderr) || @@ -89,7 +84,7 @@ bool GetFileMapContent(const string& sqfs_path, string* map) { << stdout << " and stderr content: " << stderr; return false; } - TEST_AND_RETURN_FALSE(utils::ReadFile(map_file, map)); + TEST_AND_RETURN_FALSE(utils::ReadFile(map_file.path(), map)); return true; } From 2fe84323164a95ce83c7936ef8f6c14483f38049 Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Wed, 28 Oct 2020 21:43:23 +0000 Subject: [PATCH 432/624] Revert "update_engine: Remove case conditions for deprecated operations" This reverts commit f4d1196edca97a666e20d16a334aa67f47bbcf0a. Reason for revert: Original change's description: > update_engine: Remove case conditions for deprecated operations > > MOVE and BSDIFF were related to minor version 1 and major version 1 > which both are deprecated so we should remove these so the builders > don't complain (with warning) about these values. > > BUG=None > TEST=unittests pass > > Change-Id: I7ccc2c18d2dfc8e80b7c5d560988762a4c4cbdc3 > Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2265160 > Tested-by: Amin Hassani > Reviewed-by: Jae Hoon Kim > Commit-Queue: Amin Hassani TBR=ahassani@chromium.org,kimjae@chromium.org # Not skipping CQ checks because original CL landed > 1 day ago. Bug: None Change-Id: I9de3451f1607301e2b273a815e116754e2fa1e25 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2506472 Reviewed-by: Amin Hassani Tested-by: Amin Hassani Commit-Queue: Amin Hassani --- payload_consumer/payload_constants.cc | 4 ++++ payload_generator/payload_generation_config.cc | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/payload_consumer/payload_constants.cc b/payload_consumer/payload_constants.cc index 663ab81f..d62a0ec6 100644 --- a/payload_consumer/payload_constants.cc +++ b/payload_consumer/payload_constants.cc @@ -66,6 +66,10 @@ const char* InstallOperationTypeName(InstallOperation::Type op_type) { return "PUFFDIFF"; case InstallOperation::BROTLI_BSDIFF: return "BROTLI_BSDIFF"; + + case InstallOperation::BSDIFF: + case InstallOperation::MOVE: + NOTREACHED(); } return ""; } diff --git a/payload_generator/payload_generation_config.cc b/payload_generator/payload_generation_config.cc index 7f853d8a..ef2f2401 100644 --- a/payload_generator/payload_generation_config.cc +++ b/payload_generator/payload_generation_config.cc @@ -258,6 +258,10 @@ bool PayloadVersion::OperationAllowed(InstallOperation::Type operation) const { case InstallOperation::PUFFDIFF: return minor >= kPuffdiffMinorPayloadVersion; + + case InstallOperation::MOVE: + case InstallOperation::BSDIFF: + NOTREACHED(); } return false; } From 9754f17341a37ca2b2191c7c452c193f823fe5d2 Mon Sep 17 00:00:00 2001 From: Kelvin Zhang Date: Fri, 25 Sep 2020 15:22:35 -0400 Subject: [PATCH 433/624] Implement SnapshotExtentWriter Part of the effort to integrate update_engine with VABC. SnapshotExtentWriter is the proxy between update_engine and VABC. Test: treehugger Bug: 168554689 Change-Id: Ib5afe8dac80658d3155f3fe9bcc42e9c3c78d6cb --- Android.bp | 1 + payload_consumer/snapshot_extent_writer.cc | 67 ++++++- payload_consumer/snapshot_extent_writer.h | 14 +- .../snapshot_extent_writer_unittest.cc | 180 ++++++++++++++++++ 4 files changed, 255 insertions(+), 7 deletions(-) create mode 100644 payload_consumer/snapshot_extent_writer_unittest.cc diff --git a/Android.bp b/Android.bp index 178b7daa..29e60256 100644 --- a/Android.bp +++ b/Android.bp @@ -728,6 +728,7 @@ cc_test { "payload_consumer/download_action_android_unittest.cc", "payload_consumer/extent_reader_unittest.cc", "payload_consumer/extent_writer_unittest.cc", + "payload_consumer/snapshot_extent_writer_unittest.cc", "payload_consumer/fake_file_descriptor.cc", "payload_consumer/file_descriptor_utils_unittest.cc", "payload_consumer/file_writer_unittest.cc", diff --git a/payload_consumer/snapshot_extent_writer.cc b/payload_consumer/snapshot_extent_writer.cc index 882d1f7f..5693c9b0 100644 --- a/payload_consumer/snapshot_extent_writer.cc +++ b/payload_consumer/snapshot_extent_writer.cc @@ -13,6 +13,7 @@ // See the License for the specific language governing permissions and // limitations under the License. // + #include "update_engine/payload_consumer/snapshot_extent_writer.h" #include @@ -23,6 +24,7 @@ #include "update_engine/update_metadata.pb.h" namespace chromeos_update_engine { + SnapshotExtentWriter::SnapshotExtentWriter( android::snapshot::ICowWriter* cow_writer) : cow_writer_(cow_writer) { @@ -30,25 +32,82 @@ SnapshotExtentWriter::SnapshotExtentWriter( } SnapshotExtentWriter::~SnapshotExtentWriter() { - CHECK(buffer_.empty()); + CHECK(buffer_.empty()) << buffer_.size(); } bool SnapshotExtentWriter::Init( FileDescriptorPtr /*fd*/, const google::protobuf::RepeatedPtrField& extents, - uint32_t /*block_size*/) { - // TODO(zhangkelvin) Implement this + uint32_t block_size) { + extents_ = extents; + cur_extent_idx_ = 0; + buffer_.clear(); + buffer_.reserve(block_size); + block_size_ = block_size; return true; } +size_t SnapshotExtentWriter::ConsumeWithBuffer(const uint8_t* data, + size_t count) { + CHECK_LT(cur_extent_idx_, static_cast(extents_.size())); + const auto& cur_extent = extents_[cur_extent_idx_]; + const auto cur_extent_size = cur_extent.num_blocks() * block_size_; + + if (buffer_.empty() && count >= cur_extent_size) { + TEST_AND_RETURN_FALSE(cow_writer_->AddRawBlocks( + cur_extent.start_block(), data, cur_extent_size)); + if (!next_extent()) { + CHECK_EQ(count, cur_extent_size) + << "Exhausted all blocks, but still have " << count - cur_extent_size + << " bytes left"; + } + return cur_extent_size; + } + CHECK_LT(buffer_.size(), cur_extent_size) + << "Data left in buffer should never be >= cur_extent_size, otherwise " + "we should have send that data to CowWriter. Buffer size: " + << buffer_.size() << " current extent size: " << cur_extent_size; + size_t bytes_to_copy = + std::min(count, cur_extent_size - buffer_.size()); + CHECK_GT(bytes_to_copy, 0U); + + buffer_.insert(buffer_.end(), data, data + bytes_to_copy); + CHECK_LE(buffer_.size(), cur_extent_size); + + if (buffer_.size() == cur_extent_size) { + TEST_AND_RETURN_FALSE(cow_writer_->AddRawBlocks( + cur_extent.start_block(), buffer_.data(), buffer_.size())); + buffer_.clear(); + if (!next_extent()) { + CHECK_EQ(count, bytes_to_copy) << "Exhausted all blocks, but still have " + << count - bytes_to_copy << " bytes left"; + } + } + return bytes_to_copy; +} + // Returns true on success. // This will construct a COW_REPLACE operation and forward it to CowWriter. It // is important that caller does not perform SOURCE_COPY operation on this // class, otherwise raw data will be stored. Caller should find ways to use // COW_COPY whenever possible. bool SnapshotExtentWriter::Write(const void* bytes, size_t count) { - // TODO(zhangkelvin) Implement this + if (count == 0) { + return true; + } + CHECK_NE(extents_.size(), 0); + + auto data = static_cast(bytes); + while (count > 0) { + auto bytes_written = ConsumeWithBuffer(data, count); + data += bytes_written; + count -= bytes_written; + } return true; } +bool SnapshotExtentWriter::next_extent() { + cur_extent_idx_++; + return cur_extent_idx_ < static_cast(extents_.size()); +} } // namespace chromeos_update_engine diff --git a/payload_consumer/snapshot_extent_writer.h b/payload_consumer/snapshot_extent_writer.h index 43a83173..fb4b548a 100644 --- a/payload_consumer/snapshot_extent_writer.h +++ b/payload_consumer/snapshot_extent_writer.h @@ -13,14 +13,17 @@ // See the License for the specific language governing permissions and // limitations under the License. // + #include #include #include #include "update_engine/payload_consumer/extent_writer.h" +#include "update_engine/update_metadata.pb.h" namespace chromeos_update_engine { + class SnapshotExtentWriter : public chromeos_update_engine::ExtentWriter { public: explicit SnapshotExtentWriter(android::snapshot::ICowWriter* cow_writer); @@ -37,11 +40,16 @@ class SnapshotExtentWriter : public chromeos_update_engine::ExtentWriter { bool Write(const void* bytes, size_t count) override; private: + bool next_extent(); + size_t ConsumeWithBuffer(const uint8_t* bytes, size_t count); // It's a non-owning pointer, because PartitionWriter owns the CowWruter. This // allows us to use a single instance of CowWriter for all operations applied // to the same partition. - [[maybe_unused]] android::snapshot::ICowWriter* cow_writer_; - [[maybe_unused]] google::protobuf::RepeatedPtrField extents_; - [[maybe_unused]] std::vector buffer_; + android::snapshot::ICowWriter* cow_writer_; + google::protobuf::RepeatedPtrField extents_; + size_t cur_extent_idx_; + std::vector buffer_; + size_t block_size_; }; + } // namespace chromeos_update_engine diff --git a/payload_consumer/snapshot_extent_writer_unittest.cc b/payload_consumer/snapshot_extent_writer_unittest.cc new file mode 100644 index 00000000..0e22482b --- /dev/null +++ b/payload_consumer/snapshot_extent_writer_unittest.cc @@ -0,0 +1,180 @@ +// +// Copyright (C) 2020 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "update_engine/payload_consumer/snapshot_extent_writer.h" +#include "update_engine/payload_generator/delta_diff_generator.h" +#include "update_engine/update_metadata.pb.h" + +namespace chromeos_update_engine { + +class FakeCowWriter : public android::snapshot::ICowWriter { + public: + struct CowOp { + enum { COW_COPY, COW_REPLACE, COW_ZERO } type; + std::vector data; + union { + size_t source_block; + size_t num_blocks; + }; + }; + using ICowWriter::ICowWriter; + ~FakeCowWriter() = default; + + bool EmitCopy(uint64_t new_block, uint64_t old_block) override { + operations_[new_block] = {.type = CowOp::COW_COPY, + .source_block = static_cast(old_block)}; + return true; + } + bool EmitRawBlocks(uint64_t new_block_start, + const void* data, + size_t size) override { + auto&& op = operations_[new_block_start]; + const auto uint8_ptr = static_cast(data); + op.data.insert(op.data.end(), uint8_ptr, uint8_ptr + size); + return true; + } + bool EmitZeroBlocks(uint64_t new_block_start, uint64_t num_blocks) override { + operations_[new_block_start] = {.type = CowOp::COW_ZERO}; + return true; + } + bool Finalize() override { + finalize_called_ = true; + return true; + } + + bool EmitLabel(uint64_t label) { + label_count_++; + return true; + } + + // Return number of bytes the cow image occupies on disk. + uint64_t GetCowSize() override { + return std::accumulate( + operations_.begin(), operations_.end(), 0, [](auto&& acc, auto&& op) { + return acc + op.second.data.size(); + }); + } + bool Contains(size_t block) { + return operations_.find(block) != operations_.end(); + } + bool finalize_called_ = true; + size_t label_count_ = 0; + std::map operations_; +}; + +class SnapshotExtentWriterTest : public ::testing::Test { + public: + void SetUp() override {} + + protected: + android::snapshot::CowOptions options_ = { + .block_size = static_cast(kBlockSize)}; + FakeCowWriter cow_writer_{options_}; + SnapshotExtentWriter writer_{&cow_writer_}; +}; + +void AddExtent(google::protobuf::RepeatedPtrField* extents, + size_t start_block, + size_t num_blocks) { + auto&& extent = extents->Add(); + extent->set_start_block(start_block); + extent->set_num_blocks(num_blocks); +} + +TEST_F(SnapshotExtentWriterTest, BufferWrites) { + google::protobuf::RepeatedPtrField extents; + AddExtent(&extents, 123, 1); + writer_.Init(nullptr, extents, kBlockSize); + + std::vector buf(kBlockSize, 0); + buf[123] = 231; + buf[231] = 123; + buf[buf.size() - 1] = 255; + + writer_.Write(buf.data(), kBlockSize - 1); + ASSERT_TRUE(cow_writer_.operations_.empty()) + << "Haven't send data of a complete block yet, CowWriter should not be " + "invoked."; + writer_.Write(buf.data() + kBlockSize - 1, 1); + ASSERT_TRUE(cow_writer_.Contains(123)) + << "Once a block of data is sent to SnapshotExtentWriter, it should " + "forward data to cow_writer."; + ASSERT_EQ(cow_writer_.operations_.size(), 1U); + ASSERT_EQ(buf, cow_writer_.operations_[123].data); +} + +TEST_F(SnapshotExtentWriterTest, NonBufferedWrites) { + google::protobuf::RepeatedPtrField extents; + AddExtent(&extents, 123, 1); + AddExtent(&extents, 125, 1); + writer_.Init(nullptr, extents, kBlockSize); + + std::vector buf(kBlockSize * 2, 0); + buf[123] = 231; + buf[231] = 123; + buf[buf.size() - 1] = 255; + + writer_.Write(buf.data(), buf.size()); + ASSERT_TRUE(cow_writer_.Contains(123)); + ASSERT_TRUE(cow_writer_.Contains(125)); + + ASSERT_EQ(cow_writer_.operations_.size(), 2U); + auto actual_data = cow_writer_.operations_[123].data; + actual_data.insert(actual_data.end(), + cow_writer_.operations_[125].data.begin(), + cow_writer_.operations_[125].data.end()); + ASSERT_EQ(buf, actual_data); +} + +TEST_F(SnapshotExtentWriterTest, WriteAcrossBlockBoundary) { + google::protobuf::RepeatedPtrField extents; + AddExtent(&extents, 123, 1); + AddExtent(&extents, 125, 2); + writer_.Init(nullptr, extents, kBlockSize); + + std::vector buf(kBlockSize * 3); + std::memset(buf.data(), 0, buf.size()); + buf[123] = 231; + buf[231] = 123; + buf[buf.size() - 1] = 255; + buf[kBlockSize - 1] = 254; + + writer_.Write(buf.data(), kBlockSize - 1); + ASSERT_TRUE(cow_writer_.operations_.empty()) + << "Haven't send data of a complete block yet, CowWriter should not be " + "invoked."; + writer_.Write(buf.data() + kBlockSize - 1, 1 + kBlockSize * 2); + ASSERT_TRUE(cow_writer_.Contains(123)); + ASSERT_TRUE(cow_writer_.Contains(125)); + + ASSERT_EQ(cow_writer_.operations_.size(), 2U); + auto actual_data = cow_writer_.operations_[123].data; + actual_data.insert(actual_data.end(), + cow_writer_.operations_[125].data.begin(), + cow_writer_.operations_[125].data.end()); + ASSERT_EQ(buf, actual_data); +} +} // namespace chromeos_update_engine From 9d87d6d80b82872f8f0d15279d74e64105f1a6f2 Mon Sep 17 00:00:00 2001 From: Kelvin Zhang Date: Fri, 23 Oct 2020 17:03:59 -0400 Subject: [PATCH 434/624] Add [un]mapPartitions APIs to dynamic partition control During postinstall stage, update_engine needs all target partitions(system_b, vendor_b) to be mounted. We add 2 APIs to dynamic partition control, 1 for mounting all partitions, and UnmapAllPartitions for unmap all partitions. Test: Treehugger Bug: 168554689 Change-Id: I1047851f6cdae7f39f50bd1d50e6d3bfc0d9a7f5 --- common/dynamic_partition_control_interface.h | 5 ++++ common/dynamic_partition_control_stub.cc | 8 ++++++ common/dynamic_partition_control_stub.h | 3 ++ dynamic_partition_control_android.cc | 30 ++++++++++++++------ dynamic_partition_control_android.h | 7 ++++- mock_dynamic_partition_control.h | 2 ++ 6 files changed, 45 insertions(+), 10 deletions(-) diff --git a/common/dynamic_partition_control_interface.h b/common/dynamic_partition_control_interface.h index 530b0af4..c8a2274c 100644 --- a/common/dynamic_partition_control_interface.h +++ b/common/dynamic_partition_control_interface.h @@ -154,6 +154,11 @@ class DynamicPartitionControlInterface { const std::string& unsuffixed_partition_name, const std::optional&, bool is_append = false) = 0; + + // Create virtual block devices for all partitions. + virtual bool MapAllPartitions() = 0; + // Unmap virtual block devices for all partitions. + virtual bool UnmapAllPartitions() = 0; }; } // namespace chromeos_update_engine diff --git a/common/dynamic_partition_control_stub.cc b/common/dynamic_partition_control_stub.cc index 64ab201d..cfc9e2ee 100644 --- a/common/dynamic_partition_control_stub.cc +++ b/common/dynamic_partition_control_stub.cc @@ -96,4 +96,12 @@ DynamicPartitionControlStub::OpenCowWriter( return nullptr; } +bool DynamicPartitionControlStub::MapAllPartitions() { + return false; +} + +bool DynamicPartitionControlStub::UnmapAllPartitions() { + return false; +} + } // namespace chromeos_update_engine diff --git a/common/dynamic_partition_control_stub.h b/common/dynamic_partition_control_stub.h index a939cfbb..1fc8a350 100644 --- a/common/dynamic_partition_control_stub.h +++ b/common/dynamic_partition_control_stub.h @@ -62,6 +62,9 @@ class DynamicPartitionControlStub : public DynamicPartitionControlInterface { const std::string& unsuffixed_partition_name, const std::optional&, bool is_append) override; + + bool MapAllPartitions() override; + bool UnmapAllPartitions() override; }; } // namespace chromeos_update_engine diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc index 06e57453..7c331f0a 100644 --- a/dynamic_partition_control_android.cc +++ b/dynamic_partition_control_android.cc @@ -273,9 +273,9 @@ bool DynamicPartitionControlAndroid::UnmapPartitionOnDeviceMapper( return true; } -void DynamicPartitionControlAndroid::UnmapAllPartitions() { +bool DynamicPartitionControlAndroid::UnmapAllPartitions() { if (mapped_devices_.empty()) { - return; + return false; } // UnmapPartitionOnDeviceMapper removes objects from mapped_devices_, hence // a copy is needed for the loop. @@ -284,6 +284,7 @@ void DynamicPartitionControlAndroid::UnmapAllPartitions() { for (const auto& partition_name : mapped) { ignore_result(UnmapPartitionOnDeviceMapper(partition_name)); } + return true; } void DynamicPartitionControlAndroid::Cleanup() { @@ -1240,16 +1241,12 @@ DynamicPartitionControlAndroid::OpenCowWriter( bool is_append) { auto suffix = SlotSuffixForSlotNumber(target_slot_); - std::string device_dir_str; - if (!GetDeviceDir(&device_dir_str)) { - LOG(ERROR) << "Failed to get device dir!"; + auto super_device = GetSuperDevice(); + if (!super_device.has_value()) { return nullptr; } - base::FilePath device_dir(device_dir_str); - auto super_device = - device_dir.Append(GetSuperPartitionName(target_slot_)).value(); CreateLogicalPartitionParams params = { - .block_device = super_device, + .block_device = super_device->value(), .metadata_slot = target_slot_, .partition_name = partition_name + suffix, .force_writable = true, @@ -1259,4 +1256,19 @@ DynamicPartitionControlAndroid::OpenCowWriter( return snapshot_->OpenSnapshotWriter(params, std::move(source_path)); } +std::optional DynamicPartitionControlAndroid::GetSuperDevice() { + std::string device_dir_str; + if (!GetDeviceDir(&device_dir_str)) { + LOG(ERROR) << "Failed to get device dir!"; + return {}; + } + base::FilePath device_dir(device_dir_str); + auto super_device = device_dir.Append(GetSuperPartitionName(target_slot_)); + return super_device; +} + +bool DynamicPartitionControlAndroid::MapAllPartitions() { + return snapshot_->MapAllSnapshots(); +} + } // namespace chromeos_update_engine diff --git a/dynamic_partition_control_android.h b/dynamic_partition_control_android.h index 9bffb59b..4f3964b2 100644 --- a/dynamic_partition_control_android.h +++ b/dynamic_partition_control_android.h @@ -90,6 +90,8 @@ class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { const std::optional& source_path, bool is_append) override; + bool UnmapAllPartitions() override; + protected: // These functions are exposed for testing. @@ -201,11 +203,14 @@ class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { const DeltaArchiveManifest& manifest, bool delete_source); + bool MapAllPartitions() override; + private: friend class DynamicPartitionControlAndroidTest; friend class SnapshotPartitionTestP; - void UnmapAllPartitions(); + std::optional GetSuperDevice(); + bool MapPartitionInternal(const std::string& super_device, const std::string& target_partition_name, uint32_t slot, diff --git a/mock_dynamic_partition_control.h b/mock_dynamic_partition_control.h index 5144cbbc..d5853cab 100644 --- a/mock_dynamic_partition_control.h +++ b/mock_dynamic_partition_control.h @@ -90,6 +90,8 @@ class MockDynamicPartitionControlAndroid const std::optional& source_path, bool is_append), (override)); + MOCK_METHOD(bool, MapAllPartitions, (), (override)); + MOCK_METHOD(bool, UnmapAllPartitions, (), (override)); void set_fake_mapped_devices(const std::set& fake) override { DynamicPartitionControlAndroid::set_fake_mapped_devices(fake); From 8b1e0dcbca7bbcae0f84124f2898e4f3b7dab895 Mon Sep 17 00:00:00 2001 From: Kelvin Zhang Date: Mon, 26 Oct 2020 12:27:53 -0400 Subject: [PATCH 435/624] Mount all partitions before running postinstall For Virtual AB Compression, we need to mount snapshotted partitions manually. Test: treehugger Change-Id: I15af8375e393ce2539d2075b467e8dbe94bec247 --- common/error_code.h | 1 + common/error_code_utils.cc | 2 ++ metrics_utils.cc | 2 ++ payload_consumer/postinstall_runner_action.cc | 14 ++++++++++++++ 4 files changed, 19 insertions(+) diff --git a/common/error_code.h b/common/error_code.h index 7d9cfff8..a8898886 100644 --- a/common/error_code.h +++ b/common/error_code.h @@ -86,6 +86,7 @@ enum class ErrorCode : int { kNotEnoughSpace = 60, kDeviceCorrupted = 61, kPackageExcludedFromUpdate = 62, + kPostInstallMountError = 63, // VERY IMPORTANT! When adding new error codes: // diff --git a/common/error_code_utils.cc b/common/error_code_utils.cc index cda4c7ec..421544af 100644 --- a/common/error_code_utils.cc +++ b/common/error_code_utils.cc @@ -173,6 +173,8 @@ string ErrorCodeToString(ErrorCode code) { return "ErrorCode::kDeviceCorrupted"; case ErrorCode::kPackageExcludedFromUpdate: return "ErrorCode::kPackageExcludedFromUpdate"; + case ErrorCode::kPostInstallMountError: + return "ErrorCode::kPostInstallMountError"; // Don't add a default case to let the compiler warn about newly added // error codes which should be added here. } diff --git a/metrics_utils.cc b/metrics_utils.cc index 2211a67b..a947a7e4 100644 --- a/metrics_utils.cc +++ b/metrics_utils.cc @@ -95,6 +95,7 @@ metrics::AttemptResult GetAttemptResult(ErrorCode code) { case ErrorCode::kPostinstallRunnerError: case ErrorCode::kPostinstallBootedFromFirmwareB: case ErrorCode::kPostinstallFirmwareRONotUpdatable: + case ErrorCode::kPostInstallMountError: return metrics::AttemptResult::kPostInstallFailed; case ErrorCode::kUserCanceled: @@ -189,6 +190,7 @@ metrics::DownloadErrorCode GetDownloadErrorCode(ErrorCode code) { case ErrorCode::kOmahaResponseHandlerError: case ErrorCode::kFilesystemCopierError: case ErrorCode::kPostinstallRunnerError: + case ErrorCode::kPostInstallMountError: case ErrorCode::kPayloadMismatchedType: case ErrorCode::kInstallDeviceOpenError: case ErrorCode::kKernelDeviceOpenError: diff --git a/payload_consumer/postinstall_runner_action.cc b/payload_consumer/postinstall_runner_action.cc index 91c3a640..d51241f2 100644 --- a/payload_consumer/postinstall_runner_action.cc +++ b/payload_consumer/postinstall_runner_action.cc @@ -56,8 +56,22 @@ using std::vector; void PostinstallRunnerAction::PerformAction() { CHECK(HasInputObject()); + CHECK(boot_control_); install_plan_ = GetInputObject(); + auto dynamic_control = boot_control_->GetDynamicPartitionControl(); + CHECK(dynamic_control); + + // Mount snapshot partitions for Virtual AB Compression Compression. + if (dynamic_control->GetVirtualAbCompressionFeatureFlag().IsEnabled()) { + // Before calling MapAllPartitions to map snapshot devices, all CowWriters + // must be closed, and MapAllPartitions() should be called. + dynamic_control->UnmapAllPartitions(); + if (!dynamic_control->MapAllPartitions()) { + return CompletePostinstall(ErrorCode::kPostInstallMountError); + } + } + // We always powerwash when rolling back, however policy can determine // if this is a full/normal powerwash, or a special rollback powerwash // that retains a small amount of system state such as enrollment and From 24e1d72aa2a1024417121b0d91bf9a6f3b883a96 Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Wed, 28 Oct 2020 17:47:58 -0700 Subject: [PATCH 436/624] update_engine: remove system_id and system_version These two variables and their use cases was introduced for android, but eventually android abandon it for some other solution. So this are stale now. Also removed the implemnation of ImageProperties for Android as it doesn't seem Android is actually using it. BUG=b:171829801 TEST=unittests Change-Id: Ic793cfa5031d69b5390acefb1b8cd75291708890 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2505885 Reviewed-by: Jae Hoon Kim Reviewed-by: Tianjie Xu Reviewed-by: Amin Hassani Tested-by: Amin Hassani Commit-Queue: Amin Hassani --- image_properties.h | 4 - image_properties_android.cc | 246 ---------------------- image_properties_android_unittest.cc | 123 ----------- omaha_request_action.cc | 5 - omaha_request_action_unittest.cc | 29 --- omaha_request_builder_xml.cc | 10 - omaha_request_params.cc | 9 +- omaha_request_params.h | 7 - omaha_response.h | 1 - omaha_response_handler_action.cc | 1 - omaha_response_handler_action_unittest.cc | 2 - payload_consumer/install_plan.cc | 5 - payload_consumer/install_plan.h | 2 - update_attempter_unittest.cc | 1 - 14 files changed, 1 insertion(+), 444 deletions(-) delete mode 100644 image_properties_android.cc delete mode 100644 image_properties_android_unittest.cc diff --git a/image_properties.h b/image_properties.h index 49fe82fc..0887ca8a 100644 --- a/image_properties.h +++ b/image_properties.h @@ -33,13 +33,9 @@ struct ImageProperties { std::string product_id; // The canary-channel product id. std::string canary_product_id; - // The system id for the Android Things SoM, empty for Chrome OS. - std::string system_id; // The product version of this image. std::string version; - // The system version of this image. - std::string system_version; // The version of all product components in key values pairs. std::string product_components; diff --git a/image_properties_android.cc b/image_properties_android.cc deleted file mode 100644 index 2d418b3d..00000000 --- a/image_properties_android.cc +++ /dev/null @@ -1,246 +0,0 @@ -// -// Copyright (C) 2015 The Android Open Source Project -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -#include "update_engine/image_properties.h" - -#include - -#include - -#include -#include -#include -#include -#include -#include - -#include "update_engine/common/boot_control_interface.h" -#include "update_engine/common/constants.h" -#include "update_engine/common/platform_constants.h" -#include "update_engine/common/prefs_interface.h" -#include "update_engine/common/utils.h" -#include "update_engine/system_state.h" - -using android::base::GetProperty; -using std::string; - -namespace chromeos_update_engine { - -namespace { - -// Build time properties name used in Android Things. -const char kProductId[] = "product_id"; -const char kProductVersion[] = "product_version"; -const char kSystemId[] = "system_id"; -const char kSystemVersion[] = "system_version"; - -// The path to the product_components file which stores the version of each -// components in OEM partition. -const char kProductComponentsPath[] = "/oem/os-release.d/product_components"; - -// Prefs used to store the powerwash settings. -const char kPrefsImgPropPowerwashAllowed[] = "img-prop-powerwash-allowed"; - -// System properties that identifies the "board". -const char kPropProductName[] = "ro.product.name"; -const char kPropBuildFingerprint[] = "ro.build.fingerprint"; -const char kPropBuildType[] = "ro.build.type"; - -// Default channel from factory.prop -const char kPropDefaultChannel[] = "ro.update.default_channel"; - -// A prefix added to the path, used for testing. -const char* root_prefix = nullptr; - -string GetStringWithDefault(const brillo::OsReleaseReader& osrelease, - const string& key, - const string& default_value) { - string result; - if (osrelease.GetString(key, &result)) - return result; - LOG(INFO) << "Cannot load ImageProperty " << key << ", using default value " - << default_value; - return default_value; -} - -// Open misc partition for read or write and output the fd in |out_fd|. -bool OpenMisc(bool write, int* out_fd) { - string misc_device; - int flags = write ? O_WRONLY | O_SYNC : O_RDONLY; - if (root_prefix) { - // Use a file for unittest and create one if doesn't exist. - misc_device = base::FilePath(root_prefix).Append("misc").value(); - if (write) - flags |= O_CREAT; - } else { - string err; - misc_device = get_bootloader_message_blk_device(&err); - if (misc_device.empty()) { - LOG(ERROR) << "Unable to get misc block device: " << err; - return false; - } - } - - int fd = HANDLE_EINTR(open(misc_device.c_str(), flags, 0600)); - if (fd < 0) { - PLOG(ERROR) << "Opening misc failed"; - return false; - } - *out_fd = fd; - return true; -} - -// The offset and size of the channel field in misc partition. -constexpr size_t kChannelOffset = - BOOTLOADER_MESSAGE_OFFSET_IN_MISC + - offsetof(bootloader_message_ab, update_channel); -constexpr size_t kChannelSize = sizeof(bootloader_message_ab::update_channel); - -// Read channel from misc partition to |out_channel|, return false if unable to -// read misc or no channel is set in misc. -bool ReadChannelFromMisc(string* out_channel) { - int fd; - TEST_AND_RETURN_FALSE(OpenMisc(false, &fd)); - ScopedFdCloser fd_closer(&fd); - char channel[kChannelSize] = {0}; - ssize_t bytes_read = 0; - if (!utils::PReadAll( - fd, channel, kChannelSize - 1, kChannelOffset, &bytes_read) || - bytes_read != kChannelSize - 1) { - PLOG(ERROR) << "Reading update channel from misc failed"; - return false; - } - if (channel[0] == '\0') { - LOG(INFO) << "No channel set in misc."; - return false; - } - if (!base::EndsWith(channel, "-channel", base::CompareCase::SENSITIVE)) { - LOG(ERROR) << "Channel " << channel << " doesn't end with -channel."; - return false; - } - out_channel->assign(channel); - return true; -} - -// Write |in_channel| to misc partition, return false if failed to write. -bool WriteChannelToMisc(const string& in_channel) { - int fd; - TEST_AND_RETURN_FALSE(OpenMisc(true, &fd)); - ScopedFdCloser fd_closer(&fd); - if (in_channel.size() >= kChannelSize) { - LOG(ERROR) << "Channel name is too long: " << in_channel - << ", the maximum length is " << kChannelSize - 1; - return false; - } - char channel[kChannelSize] = {0}; - memcpy(channel, in_channel.data(), in_channel.size()); - if (!utils::PWriteAll(fd, channel, kChannelSize, kChannelOffset)) { - PLOG(ERROR) << "Writing update channel to misc failed"; - return false; - } - return true; -} - -string GetTargetChannel() { - string channel; - if (!ReadChannelFromMisc(&channel)) - channel = GetProperty(kPropDefaultChannel, "stable-channel"); - return channel; -} -} // namespace - -namespace test { -void SetImagePropertiesRootPrefix(const char* test_root_prefix) { - root_prefix = test_root_prefix; -} -} // namespace test - -ImageProperties LoadImageProperties(SystemState* system_state) { - ImageProperties result; - - brillo::OsReleaseReader osrelease; - if (root_prefix) - osrelease.LoadTestingOnly(base::FilePath(root_prefix)); - else - osrelease.Load(); - result.product_id = - GetStringWithDefault(osrelease, kProductId, "invalid-product"); - result.system_id = GetStringWithDefault( - osrelease, kSystemId, "developer-boards:brillo-starter-board"); - // Update the system id to match the prefix of product id for testing. - string prefix, not_used, system_id; - if (brillo::string_utils::SplitAtFirst( - result.product_id, ":", &prefix, ¬_used, false) && - brillo::string_utils::SplitAtFirst( - result.system_id, ":", ¬_used, &system_id, false)) { - result.system_id = prefix + ":" + system_id; - } - result.canary_product_id = result.product_id; - result.version = GetStringWithDefault(osrelease, kProductVersion, "0.0.0.0"); - result.system_version = - GetStringWithDefault(osrelease, kSystemVersion, "0.0.0.0"); - // Can't read it with OsReleaseReader because it has multiple lines. - utils::ReadFile(kProductComponentsPath, &result.product_components); - - result.board = GetProperty(kPropProductName, "brillo"); - result.build_fingerprint = GetProperty(kPropBuildFingerprint, "none"); - result.build_type = GetProperty(kPropBuildType, ""); - - // Android doesn't have channel information in system image, we try to read - // the channel of current slot from prefs and then fallback to use the - // persisted target channel as current channel. - string current_channel_key = - kPrefsChannelOnSlotPrefix + - std::to_string(system_state->boot_control()->GetCurrentSlot()); - string current_channel; - if (!system_state->prefs()->Exists(current_channel_key) || - !system_state->prefs()->GetString(current_channel_key, ¤t_channel)) - current_channel = GetTargetChannel(); - result.current_channel = current_channel; - result.allow_arbitrary_channels = true; - - // Brillo only supports the official omaha URL. - result.omaha_url = constants::kOmahaDefaultProductionURL; - - return result; -} - -MutableImageProperties LoadMutableImageProperties(SystemState* system_state) { - MutableImageProperties result; - result.target_channel = GetTargetChannel(); - if (!system_state->prefs()->GetBoolean(kPrefsImgPropPowerwashAllowed, - &result.is_powerwash_allowed)) { - result.is_powerwash_allowed = false; - } - return result; -} - -bool StoreMutableImageProperties(SystemState* system_state, - const MutableImageProperties& properties) { - bool ret = true; - if (!WriteChannelToMisc(properties.target_channel)) - ret = false; - if (!system_state->prefs()->SetBoolean(kPrefsImgPropPowerwashAllowed, - properties.is_powerwash_allowed)) - ret = false; - return ret; -} - -void LogImageProperties() { - // TODO(*): Implement this. -} - -} // namespace chromeos_update_engine diff --git a/image_properties_android_unittest.cc b/image_properties_android_unittest.cc deleted file mode 100644 index 607284a0..00000000 --- a/image_properties_android_unittest.cc +++ /dev/null @@ -1,123 +0,0 @@ -// -// Copyright (C) 2017 The Android Open Source Project -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -#include "update_engine/image_properties.h" - -#include - -#include -#include -#include - -#include "update_engine/common/constants.h" -#include "update_engine/common/fake_prefs.h" -#include "update_engine/common/test_utils.h" -#include "update_engine/fake_system_state.h" - -using chromeos_update_engine::test_utils::WriteFileString; -using std::string; - -namespace chromeos_update_engine { - -class ImagePropertiesTest : public ::testing::Test { - protected: - void SetUp() override { - // Create a uniquely named test directory. - ASSERT_TRUE(tempdir_.CreateUniqueTempDir()); - osrelease_dir_ = tempdir_.GetPath().Append("etc/os-release.d"); - EXPECT_TRUE(base::CreateDirectory(osrelease_dir_)); - test::SetImagePropertiesRootPrefix(tempdir_.GetPath().value().c_str()); - } - - void WriteOsRelease(const string& key, const string& value) { - ASSERT_TRUE(WriteFileString(osrelease_dir_.Append(key).value(), value)); - } - - void WriteChannel(const string& channel) { - string misc(2080, '\0'); - misc += channel; - misc.resize(4096); - ASSERT_TRUE( - WriteFileString(tempdir_.GetPath().Append("misc").value(), misc)); - } - - FakeSystemState fake_system_state_; - - base::ScopedTempDir tempdir_; - base::FilePath osrelease_dir_; -}; - -TEST_F(ImagePropertiesTest, SimpleTest) { - WriteOsRelease("product_id", "abc"); - WriteOsRelease("system_id", "def"); - WriteOsRelease("product_version", "1.2.3.4"); - WriteOsRelease("system_version", "5.6.7.8"); - ImageProperties props = LoadImageProperties(&fake_system_state_); - EXPECT_EQ("abc", props.product_id); - EXPECT_EQ("def", props.system_id); - EXPECT_EQ("1.2.3.4", props.version); - EXPECT_EQ("5.6.7.8", props.system_version); - EXPECT_EQ("stable-channel", props.current_channel); - EXPECT_EQ(constants::kOmahaDefaultProductionURL, props.omaha_url); -} - -TEST_F(ImagePropertiesTest, IDPrefixTest) { - WriteOsRelease("product_id", "abc:def"); - WriteOsRelease("system_id", "foo:bar"); - ImageProperties props = LoadImageProperties(&fake_system_state_); - EXPECT_EQ("abc:def", props.product_id); - EXPECT_EQ("abc:bar", props.system_id); -} - -TEST_F(ImagePropertiesTest, IDInvalidPrefixTest) { - WriteOsRelease("product_id", "def"); - WriteOsRelease("system_id", "foo:bar"); - ImageProperties props = LoadImageProperties(&fake_system_state_); - EXPECT_EQ("def", props.product_id); - EXPECT_EQ("foo:bar", props.system_id); - - WriteOsRelease("product_id", "abc:def"); - WriteOsRelease("system_id", "bar"); - props = LoadImageProperties(&fake_system_state_); - EXPECT_EQ("abc:def", props.product_id); - EXPECT_EQ("bar", props.system_id); -} - -TEST_F(ImagePropertiesTest, LoadChannelTest) { - WriteChannel("unittest-channel"); - ImageProperties props = LoadImageProperties(&fake_system_state_); - EXPECT_EQ("unittest-channel", props.current_channel); -} - -TEST_F(ImagePropertiesTest, DefaultStableChannelTest) { - WriteChannel(""); - ImageProperties props = LoadImageProperties(&fake_system_state_); - EXPECT_EQ("stable-channel", props.current_channel); -} - -TEST_F(ImagePropertiesTest, StoreLoadMutableChannelTest) { - FakePrefs prefs; - fake_system_state_.set_prefs(&prefs); - WriteChannel("previous-channel"); - MutableImageProperties props; - props.target_channel = "new-channel"; - EXPECT_TRUE(StoreMutableImageProperties(&fake_system_state_, props)); - MutableImageProperties loaded_props = - LoadMutableImageProperties(&fake_system_state_); - EXPECT_EQ(props.target_channel, loaded_props.target_channel); -} - -} // namespace chromeos_update_engine diff --git a/omaha_request_action.cc b/omaha_request_action.cc index 161cf43c..4d236152 100644 --- a/omaha_request_action.cc +++ b/omaha_request_action.cc @@ -865,11 +865,6 @@ bool OmahaRequestAction::ParseParams(OmahaParserData* parser_data, if (app.id == params_->GetAppId()) { // this is the app (potentially the only app) output_object->version = app.manifest_version; - } else if (!params_->system_app_id().empty() && - app.id == params_->system_app_id()) { - // this is the system app (this check is intentionally skipped if there is - // no system_app_id set) - output_object->system_version = app.manifest_version; } else if (params_->is_install() && app.manifest_version != params_->app_version()) { LOG(WARNING) << "An app has a different version (" << app.manifest_version diff --git a/omaha_request_action_unittest.cc b/omaha_request_action_unittest.cc index 61e988bb..43b20c1d 100644 --- a/omaha_request_action_unittest.cc +++ b/omaha_request_action_unittest.cc @@ -648,7 +648,6 @@ TEST_F(OmahaRequestActionTest, ValidUpdateTest) { EXPECT_TRUE(response.update_exists); EXPECT_EQ(fake_update_response_.version, response.version); - EXPECT_EQ("", response.system_version); EXPECT_EQ(fake_update_response_.GetPayloadUrl(), response.packages[0].payload_urls[0]); EXPECT_EQ(fake_update_response_.more_info_url, response.more_info_url); @@ -711,32 +710,6 @@ TEST_F(OmahaRequestActionTest, MultiAppUpdateTest) { EXPECT_EQ(false, response.packages[1].is_delta); } -TEST_F(OmahaRequestActionTest, MultiAppAndSystemUpdateTest) { - fake_update_response_.multi_app = true; - // Trigger the lining up of the app and system versions. - request_params_.set_system_app_id(fake_update_response_.app_id2); - tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); - - ASSERT_TRUE(TestUpdateCheck()); - - EXPECT_TRUE(response.update_exists); - EXPECT_EQ(fake_update_response_.version, response.version); - EXPECT_EQ(fake_update_response_.version2, response.system_version); - EXPECT_EQ(fake_update_response_.GetPayloadUrl(), - response.packages[0].payload_urls[0]); - EXPECT_EQ(fake_update_response_.codebase2 + "package3", - response.packages[1].payload_urls[0]); - EXPECT_EQ(fake_update_response_.hash, response.packages[0].hash); - EXPECT_EQ(fake_update_response_.size, response.packages[0].size); - EXPECT_EQ(11u, response.packages[0].metadata_size); - EXPECT_EQ(true, response.packages[0].is_delta); - ASSERT_EQ(2u, response.packages.size()); - EXPECT_EQ(string("hash3"), response.packages[1].hash); - EXPECT_EQ(333u, response.packages[1].size); - EXPECT_EQ(33u, response.packages[1].metadata_size); - EXPECT_EQ(false, response.packages[1].is_delta); -} - TEST_F(OmahaRequestActionTest, MultiAppPartialUpdateTest) { fake_update_response_.multi_app = true; fake_update_response_.multi_app_self_update = true; @@ -746,7 +719,6 @@ TEST_F(OmahaRequestActionTest, MultiAppPartialUpdateTest) { EXPECT_TRUE(response.update_exists); EXPECT_EQ(fake_update_response_.version, response.version); - EXPECT_EQ("", response.system_version); EXPECT_EQ(fake_update_response_.GetPayloadUrl(), response.packages[0].payload_urls[0]); EXPECT_EQ(fake_update_response_.hash, response.packages[0].hash); @@ -768,7 +740,6 @@ TEST_F(OmahaRequestActionTest, MultiAppMultiPackageUpdateTest) { EXPECT_TRUE(response.update_exists); EXPECT_EQ(fake_update_response_.version, response.version); - EXPECT_EQ("", response.system_version); EXPECT_EQ(fake_update_response_.GetPayloadUrl(), response.packages[0].payload_urls[0]); EXPECT_EQ(fake_update_response_.codebase + "package2", diff --git a/omaha_request_builder_xml.cc b/omaha_request_builder_xml.cc index 6660afb4..c8758ab9 100644 --- a/omaha_request_builder_xml.cc +++ b/omaha_request_builder_xml.cc @@ -422,16 +422,6 @@ string OmahaRequestBuilderXml::GetApps() const { .app_params = {.active_counting_type = OmahaRequestParams::kDayBased, .send_ping = include_ping_}}; app_xml += GetApp(product_app); - if (!params_->system_app_id().empty()) { - OmahaAppData system_app = { - .id = params_->system_app_id(), - .version = params_->system_version(), - .skip_update = false, - .is_dlc = false, - .app_params = {.active_counting_type = OmahaRequestParams::kDayBased, - .send_ping = include_ping_}}; - app_xml += GetApp(system_app); - } for (const auto& it : params_->dlc_apps_params()) { OmahaAppData dlc_app_data = { .id = it.first, diff --git a/omaha_request_params.cc b/omaha_request_params.cc index 5a487207..ce6fd27d 100644 --- a/omaha_request_params.cc +++ b/omaha_request_params.cc @@ -78,14 +78,7 @@ bool OmahaRequestParams::Init(const string& app_version, LOG(INFO) << "Running from channel " << image_props_.current_channel; os_platform_ = constants::kOmahaPlatformName; - if (!image_props_.system_version.empty()) { - if (app_version == "ForcedUpdate") { - image_props_.system_version = app_version; - } - os_version_ = image_props_.system_version; - } else { - os_version_ = OmahaRequestParams::kOsVersion; - } + os_version_ = OmahaRequestParams::kOsVersion; if (!app_version.empty()) image_props_.version = app_version; diff --git a/omaha_request_params.h b/omaha_request_params.h index ed3cc80e..1bf7ae7a 100644 --- a/omaha_request_params.h +++ b/omaha_request_params.h @@ -94,10 +94,6 @@ class OmahaRequestParams { inline std::string canary_app_id() const { return image_props_.canary_product_id; } - inline std::string system_app_id() const { return image_props_.system_id; } - inline void set_system_app_id(const std::string& system_app_id) { - image_props_.system_id = system_app_id; - } inline void set_app_id(const std::string& app_id) { image_props_.product_id = app_id; image_props_.canary_product_id = app_id; @@ -110,9 +106,6 @@ class OmahaRequestParams { image_props_.version = version; } inline std::string app_version() const { return image_props_.version; } - inline std::string system_version() const { - return image_props_.system_version; - } inline std::string product_components() const { return image_props_.product_components; } diff --git a/omaha_response.h b/omaha_response.h index 77f90831..f50c14eb 100644 --- a/omaha_response.h +++ b/omaha_response.h @@ -38,7 +38,6 @@ struct OmahaResponse { // These are only valid if update_exists is true: std::string version; - std::string system_version; struct Package { // The ordered list of URLs in the Omaha response. Each item is a complete diff --git a/omaha_response_handler_action.cc b/omaha_response_handler_action.cc index 92e0a72c..67de64b5 100644 --- a/omaha_response_handler_action.cc +++ b/omaha_response_handler_action.cc @@ -75,7 +75,6 @@ void OmahaResponseHandlerAction::PerformAction() { // |OmahaRequestAction| and keep the enforcement of exclusions for updates. install_plan_.download_url = current_url; install_plan_.version = response.version; - install_plan_.system_version = response.system_version; OmahaRequestParams* const params = system_state_->request_params(); PayloadStateInterface* const payload_state = system_state_->payload_state(); diff --git a/omaha_response_handler_action_unittest.cc b/omaha_response_handler_action_unittest.cc index 9613e8de..530c4af0 100644 --- a/omaha_response_handler_action_unittest.cc +++ b/omaha_response_handler_action_unittest.cc @@ -919,7 +919,6 @@ TEST_F(OmahaResponseHandlerActionTest, SystemVersionTest) { OmahaResponse in; in.update_exists = true; in.version = "a.b.c.d"; - in.system_version = "b.c.d.e"; in.packages.push_back({.payload_urls = {"http://package/1"}, .size = 1, .hash = kPayloadHashHex}); @@ -936,7 +935,6 @@ TEST_F(OmahaResponseHandlerActionTest, SystemVersionTest) { EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash); EXPECT_EQ(expected_hash_, install_plan.payloads[1].hash); EXPECT_EQ(in.version, install_plan.version); - EXPECT_EQ(in.system_version, install_plan.system_version); } TEST_F(OmahaResponseHandlerActionTest, TestDeferredByPolicy) { diff --git a/payload_consumer/install_plan.cc b/payload_consumer/install_plan.cc index c7ef7b20..4a37836f 100644 --- a/payload_consumer/install_plan.cc +++ b/payload_consumer/install_plan.cc @@ -82,11 +82,6 @@ void InstallPlan::Dump() const { } string version_str = base::StringPrintf(", version: %s", version.c_str()); - if (!system_version.empty()) { - version_str += - base::StringPrintf(", system_version: %s", system_version.c_str()); - } - string url_str = download_url; if (base::StartsWith( url_str, "fd://", base::CompareCase::INSENSITIVE_ASCII)) { diff --git a/payload_consumer/install_plan.h b/payload_consumer/install_plan.h index 5534fb33..ee1a72b7 100644 --- a/payload_consumer/install_plan.h +++ b/payload_consumer/install_plan.h @@ -54,8 +54,6 @@ struct InstallPlan { bool is_resume{false}; std::string download_url; // url to download from std::string version; // version we are installing. - // system version, if present and separate from version - std::string system_version; struct Payload { std::vector payload_urls; // URLs to download the payload diff --git a/update_attempter_unittest.cc b/update_attempter_unittest.cc index 767bb824..8935bebb 100644 --- a/update_attempter_unittest.cc +++ b/update_attempter_unittest.cc @@ -1722,7 +1722,6 @@ TEST_F(UpdateAttempterTest, UpdateDeferredByPolicyTest) { // but the update is being deferred by the Policy. OmahaResponseHandlerAction response_action(&fake_system_state_); response_action.install_plan_.version = "a.b.c.d"; - response_action.install_plan_.system_version = "b.c.d.e"; response_action.install_plan_.payloads.push_back( {.size = 1234ULL, .type = InstallPayloadType::kFull}); // Inform the UpdateAttempter that the OmahaResponseHandlerAction has From 4c42a43b8552ef1ecaf8af641faff997665da1ef Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Mon, 2 Nov 2020 18:19:18 -0800 Subject: [PATCH 437/624] Fix hang on host apply payload When applying some payload binaries, delta_generator might hang because before the loop runs, StartProcessing already finishes, thus BreakLoop does nothing. Test: extract GKI from APEX finishes and does not hang Bug: 171519321 Change-Id: Iad9c12402d23ce2e34f15db553445ea134f2a36d --- payload_generator/generate_delta_main.cc | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/payload_generator/generate_delta_main.cc b/payload_generator/generate_delta_main.cc index 1944847e..2e32f1bf 100644 --- a/payload_generator/generate_delta_main.cc +++ b/payload_generator/generate_delta_main.cc @@ -18,6 +18,7 @@ #include #include +#include #include #include #include @@ -234,7 +235,9 @@ bool ApplyPayload(const string& payload_file, processor.EnqueueAction(std::move(install_plan_action)); processor.EnqueueAction(std::move(download_action)); processor.EnqueueAction(std::move(filesystem_verifier_action)); - processor.StartProcessing(); + loop.PostTask(FROM_HERE, + base::Bind(&ActionProcessor::StartProcessing, + base::Unretained(&processor))); loop.Run(); CHECK_EQ(delegate.code_, ErrorCode::kSuccess); LOG(INFO) << "Completed applying " << (config.is_delta ? "delta" : "full") From ec7bc11c3103d9310568a391271be52baef273a7 Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Thu, 29 Oct 2020 16:47:58 -0700 Subject: [PATCH 438/624] update_engine: Create cros vs. aosp boundary clear Its time to make the boundary between Chrome OS and Android code more clear. This CL moves all CrOS only code to "chromeos" directory and the same for Android (in "android" directory). This way we would easily know which code is uses in which project and can keep the code cleaner and more maintainable. One big remaining problem is download_action* files. It seems like DownloadAction class does a lot of things that chrome OS needs and it depends on a lot of Chrome OS stuff, but Android is also using thie Action in a way that circumvent the Chrome OS stuff. For example Android checks for SystemState to be nullptr to not do things. This is really fragile and needs to change. Probably Android Team has to implement their own DownloadAction of some sort and not re use the Chrome OS one in a very fragile way. Removed a few android files that have not been used anywhere. Changed some clang-format and lint issues in order to pass preupload. BUG=b:171829801 TEST=cros_workon_make --board reef --test update_engine Change-Id: I3fff1d4a100a065a5c1484a845241b5521614d9f Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2508965 Tested-by: Amin Hassani Auto-Submit: Amin Hassani Reviewed-by: Jae Hoon Kim Reviewed-by: Tianjie Xu Reviewed-by: Kelvin Zhang Commit-Queue: Amin Hassani --- Android.bp | 53 ++++---- BUILD.gn | 106 ++++++++-------- .../binder_service_android.cc | 4 +- .../binder_service_android.h | 10 +- .../binder_service_android_common.h | 6 +- .../binder_service_stable_android.cc | 4 +- .../binder_service_stable_android.h | 10 +- .../boot_control_android.cc | 4 +- .../boot_control_android.h | 8 +- .../cleanup_previous_update_action.cc | 2 +- .../cleanup_previous_update_action.h | 6 +- daemon_android.cc => aosp/daemon_android.cc | 4 +- daemon_android.h => aosp/daemon_android.h | 14 +-- .../daemon_state_android.cc | 4 +- .../daemon_state_android.h | 14 +-- .../dynamic_partition_control_android.cc | 6 +- .../dynamic_partition_control_android.h | 6 +- ...amic_partition_control_android_unittest.cc | 19 ++- .../dynamic_partition_test_utils.h | 6 +- .../dynamic_partition_utils.cc | 2 +- .../dynamic_partition_utils.h | 6 +- .../hardware_android.cc | 2 +- hardware_android.h => aosp/hardware_android.h | 6 +- logging_android.cc => aosp/logging_android.cc | 0 .../metrics_reporter_android.cc | 2 +- .../metrics_reporter_android.h | 10 +- .../mock_dynamic_partition_control.h | 2 +- .../network_selector_android.cc | 6 +- .../network_selector_android.h | 8 +- .../platform_constants_android.cc | 0 .../service_delegate_android_interface.h | 6 +- sideload_main.cc => aosp/sideload_main.cc | 4 +- .../update_attempter_android.cc | 12 +- .../update_attempter_android.h | 18 +-- .../update_attempter_android_unittest.cc | 6 +- .../update_engine_client_android.cc | 0 .../cleanup_previous_update_action_delegate.h | 6 +- .../connection_utils.cc | 2 +- .../connection_utils.h | 6 +- daemon_base.h => common/daemon_base.h | 6 +- .../daemon_state_interface.h | 8 +- .../download_action.h | 8 +- common/http_fetcher.h | 2 +- logging.h => common/logging.h | 0 .../metrics_constants.h | 6 +- .../metrics_reporter_interface.h | 10 +- .../metrics_reporter_stub.cc | 2 +- .../metrics_reporter_stub.h | 10 +- .../mock_download_action.h | 8 +- common/mock_excluder.h | 6 +- .../mock_metrics_reporter.h | 8 +- .../mock_service_observer.h | 8 +- .../network_selector.h | 8 +- .../network_selector_interface.h | 6 +- .../network_selector_stub.cc | 6 +- .../network_selector_stub.h | 8 +- .../service_observer_interface.h | 6 +- system_state.h => common/system_state.h | 6 +- connection_manager_android.cc | 43 ------- connection_manager_android.h | 44 ------- .../boot_control_chromeos.cc | 2 +- .../boot_control_chromeos.h | 6 +- .../boot_control_chromeos_unittest.cc | 2 +- .../chrome_browser_proxy_resolver.cc | 4 +- .../chrome_browser_proxy_resolver.h | 6 +- common_service.cc => cros/common_service.cc | 14 +-- common_service.h => cros/common_service.h | 8 +- .../common_service_unittest.cc | 6 +- .../connection_manager.cc | 10 +- .../connection_manager.h | 10 +- .../connection_manager_interface.h | 8 +- .../connection_manager_unittest.cc | 6 +- daemon_chromeos.cc => cros/daemon_chromeos.cc | 4 +- daemon_chromeos.h => cros/daemon_chromeos.h | 12 +- dbus_connection.cc => cros/dbus_connection.cc | 2 +- dbus_connection.h => cros/dbus_connection.h | 6 +- dbus_service.cc => cros/dbus_service.cc | 4 +- dbus_service.h => cros/dbus_service.h | 12 +- dbus_test_utils.h => cros/dbus_test_utils.h | 6 +- .../dlcservice_chromeos.cc | 4 +- .../dlcservice_chromeos.h | 6 +- .../excluder_chromeos.cc | 4 +- .../excluder_chromeos.h | 6 +- .../excluder_chromeos_unittest.cc | 2 +- fake_p2p_manager.h => cros/fake_p2p_manager.h | 8 +- .../fake_p2p_manager_configuration.h | 8 +- .../fake_shill_proxy.cc | 2 +- fake_shill_proxy.h => cros/fake_shill_proxy.h | 8 +- .../fake_system_state.cc | 2 +- .../fake_system_state.h | 22 ++-- .../hardware_chromeos.cc | 6 +- .../hardware_chromeos.h | 6 +- .../hardware_chromeos_unittest.cc | 2 +- image_properties.h => cros/image_properties.h | 6 +- .../image_properties_chromeos.cc | 4 +- .../image_properties_chromeos_unittest.cc | 4 +- logging.cc => cros/logging.cc | 2 +- .../metrics_reporter_omaha.cc | 68 ++++++++--- .../metrics_reporter_omaha.h | 38 +++++- .../metrics_reporter_omaha_unittest.cc | 115 +++++++++++++++++- .../mock_connection_manager.h | 8 +- .../mock_omaha_request_params.h | 8 +- mock_p2p_manager.h => cros/mock_p2p_manager.h | 8 +- .../mock_payload_state.h | 10 +- .../mock_power_manager.h | 8 +- .../mock_update_attempter.h | 8 +- .../omaha_request_action.cc | 16 +-- .../omaha_request_action.h | 12 +- .../omaha_request_action_fuzzer.cc | 4 +- .../omaha_request_action_unittest.cc | 16 +-- .../omaha_request_builder_xml.cc | 4 +- .../omaha_request_builder_xml.h | 12 +- .../omaha_request_builder_xml_unittest.cc | 4 +- .../omaha_request_params.cc | 4 +- .../omaha_request_params.h | 10 +- .../omaha_request_params_unittest.cc | 4 +- omaha_response.h => cros/omaha_response.h | 6 +- .../omaha_response_handler_action.cc | 8 +- .../omaha_response_handler_action.h | 10 +- .../omaha_response_handler_action_unittest.cc | 6 +- omaha_utils.cc => cros/omaha_utils.cc | 2 +- omaha_utils.h => cros/omaha_utils.h | 6 +- .../omaha_utils_unittest.cc | 2 +- p2p_manager.cc => cros/p2p_manager.cc | 2 +- p2p_manager.h => cros/p2p_manager.h | 6 +- .../p2p_manager_unittest.cc | 4 +- payload_state.cc => cros/payload_state.cc | 12 +- payload_state.h => cros/payload_state.h | 10 +- .../payload_state_interface.h | 8 +- .../payload_state_unittest.cc | 8 +- .../platform_constants_chromeos.cc | 0 .../power_manager_chromeos.cc | 4 +- .../power_manager_chromeos.h | 8 +- .../power_manager_interface.h | 6 +- .../real_system_state.cc | 8 +- .../real_system_state.h | 24 ++-- .../requisition_util.cc | 2 +- requisition_util.h => cros/requisition_util.h | 6 +- .../requisition_util_unittest.cc | 2 +- shill_proxy.cc => cros/shill_proxy.cc | 4 +- shill_proxy.h => cros/shill_proxy.h | 8 +- .../shill_proxy_interface.h | 6 +- .../update_attempter.cc | 22 ++-- update_attempter.h => cros/update_attempter.h | 26 ++-- .../update_attempter_unittest.cc | 12 +- .../update_engine_client.cc | 2 +- .../download_action.cc => download_action.cc | 8 +- ....cc => download_action_android_unittest.cc | 2 +- ...unittest.cc => download_action_unittest.cc | 10 +- hardware_android_unittest.cc | 67 ---------- main.cc | 4 +- metrics_utils.cc | 46 +------ metrics_utils.h | 29 +---- metrics_utils_unittest.cc | 112 ----------------- mock_boot_control_hal.h | 49 -------- mock_libcurl_http_fetcher.h | 2 +- payload_consumer/delta_performer.cc | 4 +- payload_consumer/delta_performer_fuzzer.cc | 2 +- .../delta_performer_integration_test.cc | 4 +- payload_consumer/delta_performer_unittest.cc | 2 +- .../mock_file_writer.h | 0 .../postinstall_runner_action_unittest.cc | 2 +- payload_generator/generate_delta_main.cc | 2 +- power_manager_android.cc | 36 ------ power_manager_android.h | 40 ------ update_boot_flags_action_unittest.cc | 14 +-- update_manager/boxed_value.cc | 2 +- update_manager/real_device_policy_provider.cc | 2 +- .../real_device_policy_provider_unittest.cc | 2 +- update_manager/real_shill_provider.h | 2 +- .../real_shill_provider_unittest.cc | 4 +- update_manager/real_system_provider.cc | 2 +- update_manager/real_system_provider.h | 2 +- .../real_system_provider_unittest.cc | 2 +- update_manager/real_updater_provider.cc | 4 +- update_manager/real_updater_provider.h | 2 +- .../real_updater_provider_unittest.cc | 6 +- update_manager/shill_provider.h | 2 +- update_manager/staging_utils.cc | 2 +- update_manager/state_factory.cc | 4 +- update_manager/state_factory.h | 2 +- 181 files changed, 803 insertions(+), 1088 deletions(-) rename binder_service_android.cc => aosp/binder_service_android.cc (98%) rename binder_service_android.h => aosp/binder_service_android.h (92%) rename binder_service_android_common.h => aosp/binder_service_android_common.h (87%) rename binder_service_stable_android.cc => aosp/binder_service_stable_android.cc (97%) rename binder_service_stable_android.h => aosp/binder_service_stable_android.h (89%) rename boot_control_android.cc => aosp/boot_control_android.cc (98%) rename boot_control_android.h => aosp/boot_control_android.h (92%) rename cleanup_previous_update_action.cc => aosp/cleanup_previous_update_action.cc (99%) rename cleanup_previous_update_action.h => aosp/cleanup_previous_update_action.h (94%) rename daemon_android.cc => aosp/daemon_android.cc (95%) rename daemon_android.h => aosp/daemon_android.h (80%) rename daemon_state_android.cc => aosp/daemon_state_android.cc (96%) rename daemon_state_android.h => aosp/daemon_state_android.h (84%) rename dynamic_partition_control_android.cc => aosp/dynamic_partition_control_android.cc (99%) rename dynamic_partition_control_android.h => aosp/dynamic_partition_control_android.h (98%) rename dynamic_partition_control_android_unittest.cc => aosp/dynamic_partition_control_android_unittest.cc (98%) rename dynamic_partition_test_utils.h => aosp/dynamic_partition_test_utils.h (98%) rename dynamic_partition_utils.cc => aosp/dynamic_partition_utils.cc (95%) rename dynamic_partition_utils.h => aosp/dynamic_partition_utils.h (85%) rename hardware_android.cc => aosp/hardware_android.cc (99%) rename hardware_android.h => aosp/hardware_android.h (95%) rename logging_android.cc => aosp/logging_android.cc (100%) rename metrics_reporter_android.cc => aosp/metrics_reporter_android.cc (99%) rename metrics_reporter_android.h => aosp/metrics_reporter_android.h (92%) rename mock_dynamic_partition_control.h => aosp/mock_dynamic_partition_control.h (98%) rename network_selector_android.cc => aosp/network_selector_android.cc (88%) rename network_selector_android.h => aosp/network_selector_android.h (82%) rename {common => aosp}/platform_constants_android.cc (100%) rename service_delegate_android_interface.h => aosp/service_delegate_android_interface.h (96%) rename sideload_main.cc => aosp/sideload_main.cc (98%) rename update_attempter_android.cc => aosp/update_attempter_android.cc (99%) rename update_attempter_android.h => aosp/update_attempter_android.h (94%) rename update_attempter_android_unittest.cc => aosp/update_attempter_android_unittest.cc (97%) rename update_engine_client_android.cc => aosp/update_engine_client_android.cc (100%) rename connection_utils.cc => common/connection_utils.cc (97%) rename connection_utils.h => common/connection_utils.h (89%) rename daemon_base.h => common/daemon_base.h (88%) rename daemon_state_interface.h => common/daemon_state_interface.h (85%) rename {payload_consumer => common}/download_action.h (97%) rename logging.h => common/logging.h (100%) rename metrics_constants.h => common/metrics_constants.h (97%) rename metrics_reporter_interface.h => common/metrics_reporter_interface.h (97%) rename metrics_reporter_stub.cc => common/metrics_reporter_stub.cc (94%) rename metrics_reporter_stub.h => common/metrics_reporter_stub.h (92%) rename {payload_consumer => common}/mock_download_action.h (81%) rename mock_metrics_reporter.h => common/mock_metrics_reporter.h (94%) rename mock_service_observer.h => common/mock_service_observer.h (81%) rename network_selector.h => common/network_selector.h (81%) rename network_selector_interface.h => common/network_selector_interface.h (88%) rename network_selector_stub.cc => common/network_selector_stub.cc (88%) rename network_selector_stub.h => common/network_selector_stub.h (82%) rename service_observer_interface.h => common/service_observer_interface.h (88%) rename system_state.h => common/system_state.h (96%) delete mode 100644 connection_manager_android.cc delete mode 100644 connection_manager_android.h rename boot_control_chromeos.cc => cros/boot_control_chromeos.cc (99%) rename boot_control_chromeos.h => cros/boot_control_chromeos.h (96%) rename boot_control_chromeos_unittest.cc => cros/boot_control_chromeos_unittest.cc (98%) rename chrome_browser_proxy_resolver.cc => cros/chrome_browser_proxy_resolver.cc (95%) rename chrome_browser_proxy_resolver.h => cros/chrome_browser_proxy_resolver.h (91%) rename common_service.cc => cros/common_service.cc (97%) rename common_service.h => cros/common_service.h (97%) rename common_service_unittest.cc => cros/common_service_unittest.cc (97%) rename connection_manager.cc => cros/connection_manager.cc (96%) rename connection_manager.h => cros/connection_manager.h (89%) rename connection_manager_interface.h => cros/connection_manager_interface.h (90%) rename connection_manager_unittest.cc => cros/connection_manager_unittest.cc (98%) rename daemon_chromeos.cc => cros/daemon_chromeos.cc (96%) rename daemon_chromeos.h => cros/daemon_chromeos.h (84%) rename dbus_connection.cc => cros/dbus_connection.cc (97%) rename dbus_connection.h => cros/dbus_connection.h (88%) rename dbus_service.cc => cros/dbus_service.cc (98%) rename dbus_service.h => cros/dbus_service.h (96%) rename dbus_test_utils.h => cros/dbus_test_utils.h (95%) rename dlcservice_chromeos.cc => cros/dlcservice_chromeos.cc (95%) rename dlcservice_chromeos.h => cros/dlcservice_chromeos.h (92%) rename excluder_chromeos.cc => cros/excluder_chromeos.cc (94%) rename excluder_chromeos.h => cros/excluder_chromeos.h (91%) rename excluder_chromeos_unittest.cc => cros/excluder_chromeos_unittest.cc (97%) rename fake_p2p_manager.h => cros/fake_p2p_manager.h (94%) rename fake_p2p_manager_configuration.h => cros/fake_p2p_manager_configuration.h (93%) rename fake_shill_proxy.cc => cros/fake_shill_proxy.cc (97%) rename fake_shill_proxy.h => cros/fake_shill_proxy.h (90%) rename fake_system_state.cc => cros/fake_system_state.cc (96%) rename fake_system_state.h => cros/fake_system_state.h (94%) rename hardware_chromeos.cc => cros/hardware_chromeos.cc (98%) rename hardware_chromeos.h => cros/hardware_chromeos.h (95%) rename hardware_chromeos_unittest.cc => cros/hardware_chromeos_unittest.cc (98%) rename image_properties.h => cros/image_properties.h (96%) rename image_properties_chromeos.cc => cros/image_properties_chromeos.cc (98%) rename image_properties_chromeos_unittest.cc => cros/image_properties_chromeos_unittest.cc (98%) rename logging.cc => cros/logging.cc (98%) rename metrics_reporter_omaha.cc => cros/metrics_reporter_omaha.cc (92%) rename metrics_reporter_omaha.h => cros/metrics_reporter_omaha.h (80%) rename metrics_reporter_omaha_unittest.cc => cros/metrics_reporter_omaha_unittest.cc (80%) rename mock_connection_manager.h => cros/mock_connection_manager.h (85%) rename mock_omaha_request_params.h => cros/mock_omaha_request_params.h (92%) rename mock_p2p_manager.h => cros/mock_p2p_manager.h (95%) rename mock_payload_state.h => cros/mock_payload_state.h (92%) rename mock_power_manager.h => cros/mock_power_manager.h (80%) rename mock_update_attempter.h => cros/mock_update_attempter.h (90%) rename omaha_request_action.cc => cros/omaha_request_action.cc (99%) rename omaha_request_action.h => cros/omaha_request_action.h (97%) rename omaha_request_action_fuzzer.cc => cros/omaha_request_action_fuzzer.cc (94%) rename omaha_request_action_unittest.cc => cros/omaha_request_action_unittest.cc (99%) rename omaha_request_builder_xml.cc => cros/omaha_request_builder_xml.cc (99%) rename omaha_request_builder_xml.h => cros/omaha_request_builder_xml.h (95%) rename omaha_request_builder_xml_unittest.cc => cros/omaha_request_builder_xml_unittest.cc (99%) rename omaha_request_params.cc => cros/omaha_request_params.cc (99%) rename omaha_request_params.h => cros/omaha_request_params.h (98%) rename omaha_request_params_unittest.cc => cros/omaha_request_params_unittest.cc (98%) rename omaha_response.h => cros/omaha_response.h (96%) rename omaha_response_handler_action.cc => cros/omaha_response_handler_action.cc (98%) rename omaha_response_handler_action.h => cros/omaha_response_handler_action.h (91%) rename omaha_response_handler_action_unittest.cc => cros/omaha_response_handler_action_unittest.cc (99%) rename omaha_utils.cc => cros/omaha_utils.cc (96%) rename omaha_utils.h => cros/omaha_utils.h (89%) rename omaha_utils_unittest.cc => cros/omaha_utils_unittest.cc (96%) rename p2p_manager.cc => cros/p2p_manager.cc (99%) rename p2p_manager.h => cros/p2p_manager.h (98%) rename p2p_manager_unittest.cc => cros/p2p_manager_unittest.cc (99%) rename payload_state.cc => cros/payload_state.cc (99%) rename payload_state.h => cros/payload_state.h (99%) rename payload_state_interface.h => cros/payload_state_interface.h (97%) rename payload_state_unittest.cc => cros/payload_state_unittest.cc (99%) rename {common => cros}/platform_constants_chromeos.cc (100%) rename power_manager_chromeos.cc => cros/power_manager_chromeos.cc (93%) rename power_manager_chromeos.h => cros/power_manager_chromeos.h (84%) rename power_manager_interface.h => cros/power_manager_interface.h (88%) rename real_system_state.cc => cros/real_system_state.cc (98%) rename real_system_state.h => cros/real_system_state.h (90%) rename requisition_util.cc => cros/requisition_util.cc (97%) rename requisition_util.h => cros/requisition_util.h (86%) rename requisition_util_unittest.cc => cros/requisition_util_unittest.cc (98%) rename shill_proxy.cc => cros/shill_proxy.cc (93%) rename shill_proxy.h => cros/shill_proxy.h (89%) rename shill_proxy_interface.h => cros/shill_proxy_interface.h (92%) rename update_attempter.cc => cros/update_attempter.cc (99%) rename update_attempter.h => cros/update_attempter.h (97%) rename update_attempter_unittest.cc => cros/update_attempter_unittest.cc (99%) rename update_engine_client.cc => cros/update_engine_client.cc (99%) rename payload_consumer/download_action.cc => download_action.cc (98%) rename payload_consumer/download_action_android_unittest.cc => download_action_android_unittest.cc (98%) rename payload_consumer/download_action_unittest.cc => download_action_unittest.cc (98%) delete mode 100644 hardware_android_unittest.cc delete mode 100644 mock_boot_control_hal.h rename mock_file_writer.h => payload_consumer/mock_file_writer.h (100%) delete mode 100644 power_manager_android.cc delete mode 100644 power_manager_android.h diff --git a/Android.bp b/Android.bp index a8b5fc28..9cf7bd44 100644 --- a/Android.bp +++ b/Android.bp @@ -143,6 +143,7 @@ cc_library_static { recovery_available: true, srcs: [ + "aosp/platform_constants_android.cc", "common/action_processor.cc", "common/boot_control_stub.cc", "common/clock.cc", @@ -156,17 +157,16 @@ cc_library_static { "common/http_fetcher.cc", "common/hwid_override.cc", "common/multi_range_http_fetcher.cc", - "common/platform_constants_android.cc", "common/prefs.cc", "common/proxy_resolver.cc", "common/subprocess.cc", "common/terminator.cc", "common/utils.cc", + "download_action.cc", "payload_consumer/bzip_extent_writer.cc", "payload_consumer/cached_file_descriptor.cc", "payload_consumer/certificate_parser_android.cc", "payload_consumer/delta_performer.cc", - "payload_consumer/download_action.cc", "payload_consumer/extent_reader.cc", "payload_consumer/extent_writer.cc", "payload_consumer/file_descriptor.cc", @@ -240,10 +240,10 @@ cc_library_static { recovery_available: true, srcs: [ - "boot_control_android.cc", - "cleanup_previous_update_action.cc", - "dynamic_partition_control_android.cc", - "dynamic_partition_utils.cc", + "aosp/boot_control_android.cc", + "aosp/cleanup_previous_update_action.cc", + "aosp/dynamic_partition_control_android.cc", + "aosp/dynamic_partition_utils.cc", ], } @@ -299,17 +299,17 @@ cc_library_static { srcs: [ ":libupdate_engine_aidl", - "binder_service_android.cc", - "binder_service_stable_android.cc", + "aosp/binder_service_android.cc", + "aosp/binder_service_stable_android.cc", + "aosp/daemon_android.cc", + "aosp/daemon_state_android.cc", + "aosp/hardware_android.cc", + "aosp/logging_android.cc", + "aosp/network_selector_android.cc", + "aosp/update_attempter_android.cc", "certificate_checker.cc", - "daemon_android.cc", - "daemon_state_android.cc", - "hardware_android.cc", "libcurl_http_fetcher.cc", - "logging_android.cc", "metrics_utils.cc", - "network_selector_android.cc", - "update_attempter_android.cc", "update_boot_flags_action.cc", "update_status_utils.cc", ], @@ -331,7 +331,7 @@ cc_binary { "otacerts", ], - srcs: ["main.cc", "metrics_reporter_android.cc"], + srcs: ["main.cc", "aosp/metrics_reporter_android.cc"], init_rc: ["update_engine.rc"], } @@ -356,13 +356,13 @@ cc_binary { include_dirs: ["external/cros/system_api/dbus"], srcs: [ - "hardware_android.cc", - "logging_android.cc", - "metrics_reporter_stub.cc", + "aosp/hardware_android.cc", + "aosp/logging_android.cc", + "aosp/sideload_main.cc", + "aosp/update_attempter_android.cc", + "common/metrics_reporter_stub.cc", + "common/network_selector_stub.cc", "metrics_utils.cc", - "network_selector_stub.cc", - "sideload_main.cc", - "update_attempter_android.cc", "update_boot_flags_action.cc", "update_status_utils.cc", ], @@ -432,8 +432,8 @@ cc_binary { srcs: [ ":libupdate_engine_aidl", + "aosp/update_engine_client_android.cc", "common/error_code_utils.cc", - "update_engine_client_android.cc", "update_status_utils.cc", ], } @@ -665,6 +665,8 @@ cc_test { test_suites: ["device-tests"], srcs: [ + "aosp/dynamic_partition_control_android_unittest.cc", + "aosp/update_attempter_android_unittest.cc", "certificate_checker_unittest.cc", "common/action_pipe_unittest.cc", "common/action_processor_unittest.cc", @@ -675,6 +677,7 @@ cc_test { "common/hash_calculator_unittest.cc", "common/http_fetcher_unittest.cc", "common/hwid_override_unittest.cc", + "common/metrics_reporter_stub.cc", "common/mock_http_fetcher.cc", "common/prefs_unittest.cc", "common/proxy_resolver_unittest.cc", @@ -682,15 +685,13 @@ cc_test { "common/terminator_unittest.cc", "common/test_utils.cc", "common/utils_unittest.cc", - "dynamic_partition_control_android_unittest.cc", + "download_action_android_unittest.cc", "libcurl_http_fetcher_unittest.cc", - "hardware_android_unittest.cc", "payload_consumer/bzip_extent_writer_unittest.cc", "payload_consumer/cached_file_descriptor_unittest.cc", "payload_consumer/certificate_parser_android_unittest.cc", "payload_consumer/delta_performer_integration_test.cc", "payload_consumer/delta_performer_unittest.cc", - "payload_consumer/download_action_android_unittest.cc", "payload_consumer/extent_reader_unittest.cc", "payload_consumer/extent_writer_unittest.cc", "payload_consumer/fake_file_descriptor.cc", @@ -722,9 +723,7 @@ cc_test { "payload_generator/squashfs_filesystem_unittest.cc", "payload_generator/zip_unittest.cc", "testrunner.cc", - "update_attempter_android_unittest.cc", "update_status_utils_unittest.cc", - "metrics_reporter_stub.cc", ], } diff --git a/BUILD.gn b/BUILD.gn index 8f065133..90913cba 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -143,17 +143,17 @@ static_library("libpayload_consumer") { "common/http_fetcher.cc", "common/hwid_override.cc", "common/multi_range_http_fetcher.cc", - "common/platform_constants_chromeos.cc", "common/prefs.cc", "common/proxy_resolver.cc", "common/subprocess.cc", "common/terminator.cc", "common/utils.cc", + "cros/platform_constants_chromeos.cc", + "download_action.cc", "payload_consumer/bzip_extent_writer.cc", "payload_consumer/cached_file_descriptor.cc", "payload_consumer/certificate_parser_stub.cc", "payload_consumer/delta_performer.cc", - "payload_consumer/download_action.cc", "payload_consumer/extent_reader.cc", "payload_consumer/extent_writer.cc", "payload_consumer/file_descriptor.cc", @@ -195,32 +195,32 @@ static_library("libpayload_consumer") { # with Omaha and expose a DBus daemon. static_library("libupdate_engine") { sources = [ - "boot_control_chromeos.cc", "certificate_checker.cc", - "common_service.cc", - "connection_manager.cc", - "connection_utils.cc", - "daemon_chromeos.cc", - "dbus_connection.cc", - "dbus_service.cc", - "hardware_chromeos.cc", - "image_properties_chromeos.cc", + "common/connection_utils.cc", + "cros/boot_control_chromeos.cc", + "cros/common_service.cc", + "cros/connection_manager.cc", + "cros/daemon_chromeos.cc", + "cros/dbus_connection.cc", + "cros/dbus_service.cc", + "cros/hardware_chromeos.cc", + "cros/image_properties_chromeos.cc", + "cros/logging.cc", + "cros/metrics_reporter_omaha.cc", + "cros/omaha_request_action.cc", + "cros/omaha_request_builder_xml.cc", + "cros/omaha_request_params.cc", + "cros/omaha_response_handler_action.cc", + "cros/omaha_utils.cc", + "cros/p2p_manager.cc", + "cros/payload_state.cc", + "cros/power_manager_chromeos.cc", + "cros/real_system_state.cc", + "cros/requisition_util.cc", + "cros/shill_proxy.cc", + "cros/update_attempter.cc", "libcurl_http_fetcher.cc", - "logging.cc", - "metrics_reporter_omaha.cc", "metrics_utils.cc", - "omaha_request_action.cc", - "omaha_request_builder_xml.cc", - "omaha_request_params.cc", - "omaha_response_handler_action.cc", - "omaha_utils.cc", - "p2p_manager.cc", - "payload_state.cc", - "power_manager_chromeos.cc", - "real_system_state.cc", - "requisition_util.cc", - "shill_proxy.cc", - "update_attempter.cc", "update_boot_flags_action.cc", "update_manager/boxed_value.cc", "update_manager/chromeos_policy.cc", @@ -279,7 +279,7 @@ static_library("libupdate_engine") { } if (use.chrome_network_proxy) { - sources += [ "chrome_browser_proxy_resolver.cc" ] + sources += [ "cros/chrome_browser_proxy_resolver.cc" ] } if (use.chrome_kiosk_app) { @@ -288,8 +288,8 @@ static_library("libupdate_engine") { if (use.dlc) { sources += [ - "dlcservice_chromeos.cc", - "excluder_chromeos.cc", + "cros/dlcservice_chromeos.cc", + "cros/excluder_chromeos.cc", ] } else { sources += [ @@ -324,8 +324,8 @@ static_library("libupdate_engine_client") { executable("update_engine_client") { sources = [ "common/error_code_utils.cc", - "omaha_utils.cc", - "update_engine_client.cc", + "cros/omaha_utils.cc", + "cros/update_engine_client.cc", ] configs += [ ":target_defaults" ] deps = [ ":libupdate_engine_client" ] @@ -394,8 +394,8 @@ if (use.test || use.fuzzer) { "common/fake_prefs.cc", "common/mock_http_fetcher.cc", "common/test_utils.cc", - "fake_shill_proxy.cc", - "fake_system_state.cc", + "cros/fake_shill_proxy.cc", + "cros/fake_system_state.cc", "payload_consumer/fake_file_descriptor.cc", "payload_generator/fake_filesystem.cc", "update_manager/umtest_utils.cc", @@ -420,8 +420,8 @@ if (use.test) { openssl_pem_out_dir = "include/update_engine" sources = [ "unittest_key.pem", - "unittest_key_RSA4096.pem", "unittest_key2.pem", + "unittest_key_RSA4096.pem", ] } @@ -429,9 +429,7 @@ if (use.test) { openssl_pem_in_dir = "." openssl_pem_out_dir = "include/update_engine" openssl_pem_algorithm = "ec" - sources = [ - "unittest_key_EC.pem", - ] + sources = [ "unittest_key_EC.pem" ] } # Unpacks sample images used for testing. @@ -470,7 +468,6 @@ if (use.test) { # Main unittest file. executable("update_engine_unittests") { sources = [ - "boot_control_chromeos_unittest.cc", "certificate_checker_unittest.cc", "common/action_pipe_unittest.cc", "common/action_processor_unittest.cc", @@ -484,24 +481,28 @@ if (use.test) { "common/subprocess_unittest.cc", "common/terminator_unittest.cc", "common/utils_unittest.cc", - "common_service_unittest.cc", - "connection_manager_unittest.cc", - "hardware_chromeos_unittest.cc", - "image_properties_chromeos_unittest.cc", + "cros/boot_control_chromeos_unittest.cc", + "cros/common_service_unittest.cc", + "cros/connection_manager_unittest.cc", + "cros/hardware_chromeos_unittest.cc", + "cros/image_properties_chromeos_unittest.cc", + "cros/metrics_reporter_omaha_unittest.cc", + "cros/omaha_request_action_unittest.cc", + "cros/omaha_request_builder_xml_unittest.cc", + "cros/omaha_request_params_unittest.cc", + "cros/omaha_response_handler_action_unittest.cc", + "cros/omaha_utils_unittest.cc", + "cros/p2p_manager_unittest.cc", + "cros/payload_state_unittest.cc", + "cros/requisition_util_unittest.cc", + "cros/update_attempter_unittest.cc", + "download_action_unittest.cc", "libcurl_http_fetcher_unittest.cc", - "metrics_reporter_omaha_unittest.cc", "metrics_utils_unittest.cc", - "omaha_request_action_unittest.cc", - "omaha_request_builder_xml_unittest.cc", - "omaha_request_params_unittest.cc", - "omaha_response_handler_action_unittest.cc", - "omaha_utils_unittest.cc", - "p2p_manager_unittest.cc", "payload_consumer/bzip_extent_writer_unittest.cc", "payload_consumer/cached_file_descriptor_unittest.cc", "payload_consumer/delta_performer_integration_test.cc", "payload_consumer/delta_performer_unittest.cc", - "payload_consumer/download_action_unittest.cc", "payload_consumer/extent_reader_unittest.cc", "payload_consumer/extent_writer_unittest.cc", "payload_consumer/file_descriptor_utils_unittest.cc", @@ -526,10 +527,7 @@ if (use.test) { "payload_generator/payload_signer_unittest.cc", "payload_generator/squashfs_filesystem_unittest.cc", "payload_generator/zip_unittest.cc", - "payload_state_unittest.cc", - "requisition_util_unittest.cc", "testrunner.cc", - "update_attempter_unittest.cc", "update_boot_flags_action_unittest.cc", "update_manager/boxed_value_unittest.cc", "update_manager/chromeos_policy_unittest.cc", @@ -551,7 +549,7 @@ if (use.test) { "update_status_utils_unittest.cc", ] if (use.dlc) { - sources += [ "excluder_chromeos_unittest.cc" ] + sources += [ "cros/excluder_chromeos_unittest.cc" ] } # //common-mk:test should be on the top. @@ -594,7 +592,7 @@ if (use.fuzzer) { ] } executable("update_engine_omaha_request_action_fuzzer") { - sources = [ "omaha_request_action_fuzzer.cc" ] + sources = [ "cros/omaha_request_action_fuzzer.cc" ] configs += [ "//common-mk/common_fuzzer", ":target_defaults", diff --git a/binder_service_android.cc b/aosp/binder_service_android.cc similarity index 98% rename from binder_service_android.cc rename to aosp/binder_service_android.cc index 0c8bc2f4..ed76c4a8 100644 --- a/binder_service_android.cc +++ b/aosp/binder_service_android.cc @@ -14,7 +14,7 @@ // limitations under the License. // -#include "update_engine/binder_service_android.h" +#include "update_engine/aosp/binder_service_android.h" #include @@ -24,7 +24,7 @@ #include #include -#include "update_engine/binder_service_android_common.h" +#include "update_engine/aosp/binder_service_android_common.h" using android::binder::Status; using android::os::IUpdateEngineCallback; diff --git a/binder_service_android.h b/aosp/binder_service_android.h similarity index 92% rename from binder_service_android.h rename to aosp/binder_service_android.h index 5f282252..f41fbdf2 100644 --- a/binder_service_android.h +++ b/aosp/binder_service_android.h @@ -14,8 +14,8 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_BINDER_SERVICE_ANDROID_H_ -#define UPDATE_ENGINE_BINDER_SERVICE_ANDROID_H_ +#ifndef UPDATE_ENGINE_AOSP_BINDER_SERVICE_ANDROID_H_ +#define UPDATE_ENGINE_AOSP_BINDER_SERVICE_ANDROID_H_ #include @@ -28,8 +28,8 @@ #include "android/os/BnUpdateEngine.h" #include "android/os/IUpdateEngineCallback.h" -#include "update_engine/service_delegate_android_interface.h" -#include "update_engine/service_observer_interface.h" +#include "update_engine/aosp/service_delegate_android_interface.h" +#include "update_engine/common/service_observer_interface.h" namespace chromeos_update_engine { @@ -96,4 +96,4 @@ class BinderUpdateEngineAndroidService : public android::os::BnUpdateEngine, } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_BINDER_SERVICE_ANDROID_H_ +#endif // UPDATE_ENGINE_AOSP_BINDER_SERVICE_ANDROID_H_ diff --git a/binder_service_android_common.h b/aosp/binder_service_android_common.h similarity index 87% rename from binder_service_android_common.h rename to aosp/binder_service_android_common.h index fc621d97..223b32ed 100644 --- a/binder_service_android_common.h +++ b/aosp/binder_service_android_common.h @@ -14,8 +14,8 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_BINDER_SERVICE_ANDROID_COMMON_H_ -#define UPDATE_ENGINE_BINDER_SERVICE_ANDROID_COMMON_H_ +#ifndef UPDATE_ENGINE_AOSP_BINDER_SERVICE_ANDROID_COMMON_H_ +#define UPDATE_ENGINE_AOSP_BINDER_SERVICE_ANDROID_COMMON_H_ #include #include @@ -42,4 +42,4 @@ static inline std::vector ToVecString( } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_BINDER_SERVICE_ANDROID_COMMON_H_ +#endif // UPDATE_ENGINE_AOSP_BINDER_SERVICE_ANDROID_COMMON_H_ diff --git a/binder_service_stable_android.cc b/aosp/binder_service_stable_android.cc similarity index 97% rename from binder_service_stable_android.cc rename to aosp/binder_service_stable_android.cc index a12b349b..17b35eea 100644 --- a/binder_service_stable_android.cc +++ b/aosp/binder_service_stable_android.cc @@ -14,7 +14,7 @@ // limitations under the License. // -#include "update_engine/binder_service_stable_android.h" +#include "update_engine/aosp/binder_service_stable_android.h" #include @@ -24,7 +24,7 @@ #include #include -#include "update_engine/binder_service_android_common.h" +#include "update_engine/aosp/binder_service_android_common.h" using android::binder::Status; using android::os::IUpdateEngineStableCallback; diff --git a/binder_service_stable_android.h b/aosp/binder_service_stable_android.h similarity index 89% rename from binder_service_stable_android.h rename to aosp/binder_service_stable_android.h index 16677980..212afaa6 100644 --- a/binder_service_stable_android.h +++ b/aosp/binder_service_stable_android.h @@ -14,8 +14,8 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_BINDER_SERVICE_STABLE_ANDROID_H_ -#define UPDATE_ENGINE_BINDER_SERVICE_STABLE_ANDROID_H_ +#ifndef UPDATE_ENGINE_AOSP_BINDER_SERVICE_STABLE_ANDROID_H_ +#define UPDATE_ENGINE_AOSP_BINDER_SERVICE_STABLE_ANDROID_H_ #include @@ -28,8 +28,8 @@ #include "android/os/BnUpdateEngineStable.h" #include "android/os/IUpdateEngineStableCallback.h" -#include "update_engine/service_delegate_android_interface.h" -#include "update_engine/service_observer_interface.h" +#include "update_engine/aosp/service_delegate_android_interface.h" +#include "update_engine/common/service_observer_interface.h" namespace chromeos_update_engine { @@ -82,4 +82,4 @@ class BinderUpdateEngineAndroidStableService } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_BINDER_SERVICE_STABLE_ANDROID_H_ +#endif // UPDATE_ENGINE_AOSP_BINDER_SERVICE_STABLE_ANDROID_H_ diff --git a/boot_control_android.cc b/aosp/boot_control_android.cc similarity index 98% rename from boot_control_android.cc rename to aosp/boot_control_android.cc index dee5fa84..bda65be9 100644 --- a/boot_control_android.cc +++ b/aosp/boot_control_android.cc @@ -14,7 +14,7 @@ // limitations under the License. // -#include "update_engine/boot_control_android.h" +#include "update_engine/aosp/boot_control_android.h" #include #include @@ -25,8 +25,8 @@ #include #include +#include "update_engine/aosp/dynamic_partition_control_android.h" #include "update_engine/common/utils.h" -#include "update_engine/dynamic_partition_control_android.h" using std::string; diff --git a/boot_control_android.h b/aosp/boot_control_android.h similarity index 92% rename from boot_control_android.h rename to aosp/boot_control_android.h index 5009dbd3..e288723d 100644 --- a/boot_control_android.h +++ b/aosp/boot_control_android.h @@ -14,8 +14,8 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_BOOT_CONTROL_ANDROID_H_ -#define UPDATE_ENGINE_BOOT_CONTROL_ANDROID_H_ +#ifndef UPDATE_ENGINE_AOSP_BOOT_CONTROL_ANDROID_H_ +#define UPDATE_ENGINE_AOSP_BOOT_CONTROL_ANDROID_H_ #include #include @@ -24,9 +24,9 @@ #include #include +#include "update_engine/aosp/dynamic_partition_control_android.h" #include "update_engine/common/boot_control.h" #include "update_engine/common/dynamic_partition_control_interface.h" -#include "update_engine/dynamic_partition_control_android.h" namespace chromeos_update_engine { @@ -70,4 +70,4 @@ class BootControlAndroid : public BootControlInterface { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_BOOT_CONTROL_ANDROID_H_ +#endif // UPDATE_ENGINE_AOSP_BOOT_CONTROL_ANDROID_H_ diff --git a/cleanup_previous_update_action.cc b/aosp/cleanup_previous_update_action.cc similarity index 99% rename from cleanup_previous_update_action.cc rename to aosp/cleanup_previous_update_action.cc index 1a2476f0..278b1015 100644 --- a/cleanup_previous_update_action.cc +++ b/aosp/cleanup_previous_update_action.cc @@ -13,7 +13,7 @@ // See the License for the specific language governing permissions and // limitations under the License. // -#include "update_engine/cleanup_previous_update_action.h" +#include "update_engine/aosp/cleanup_previous_update_action.h" #include // NOLINT(build/c++11) -- for merge times #include diff --git a/cleanup_previous_update_action.h b/aosp/cleanup_previous_update_action.h similarity index 94% rename from cleanup_previous_update_action.h rename to aosp/cleanup_previous_update_action.h index 6f6ce078..73cef269 100644 --- a/cleanup_previous_update_action.h +++ b/aosp/cleanup_previous_update_action.h @@ -14,8 +14,8 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_CLEANUP_PREVIOUS_UPDATE_ACTION_H_ -#define UPDATE_ENGINE_CLEANUP_PREVIOUS_UPDATE_ACTION_H_ +#ifndef UPDATE_ENGINE_AOSP_CLEANUP_PREVIOUS_UPDATE_ACTION_H_ +#define UPDATE_ENGINE_AOSP_CLEANUP_PREVIOUS_UPDATE_ACTION_H_ #include // NOLINT(build/c++11) -- for merge times #include @@ -92,4 +92,4 @@ class CleanupPreviousUpdateAction : public Action { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_CLEANUP_PREVIOUS_UPDATE_ACTION_H_ +#endif // UPDATE_ENGINE_AOSP_CLEANUP_PREVIOUS_UPDATE_ACTION_H_ diff --git a/daemon_android.cc b/aosp/daemon_android.cc similarity index 95% rename from daemon_android.cc rename to aosp/daemon_android.cc index 313d7ddc..c102e3b0 100644 --- a/daemon_android.cc +++ b/aosp/daemon_android.cc @@ -14,13 +14,13 @@ // limitations under the License. // -#include "update_engine/daemon_android.h" +#include "update_engine/aosp/daemon_android.h" #include #include -#include "update_engine/daemon_state_android.h" +#include "update_engine/aosp/daemon_state_android.h" using std::unique_ptr; diff --git a/daemon_android.h b/aosp/daemon_android.h similarity index 80% rename from daemon_android.h rename to aosp/daemon_android.h index f0c028ec..38a86891 100644 --- a/daemon_android.h +++ b/aosp/daemon_android.h @@ -14,18 +14,18 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_DAEMON_ANDROID_H_ -#define UPDATE_ENGINE_DAEMON_ANDROID_H_ +#ifndef UPDATE_ENGINE_AOSP_DAEMON_ANDROID_H_ +#define UPDATE_ENGINE_AOSP_DAEMON_ANDROID_H_ #include #include -#include "update_engine/binder_service_android.h" -#include "update_engine/binder_service_stable_android.h" +#include "update_engine/aosp/binder_service_android.h" +#include "update_engine/aosp/binder_service_stable_android.h" +#include "update_engine/common/daemon_base.h" +#include "update_engine/common/daemon_state_interface.h" #include "update_engine/common/subprocess.h" -#include "update_engine/daemon_base.h" -#include "update_engine/daemon_state_interface.h" namespace chromeos_update_engine { @@ -55,4 +55,4 @@ class DaemonAndroid : public DaemonBase { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_DAEMON_ANDROID_H_ +#endif // UPDATE_ENGINE_AOSP_DAEMON_ANDROID_H_ diff --git a/daemon_state_android.cc b/aosp/daemon_state_android.cc similarity index 96% rename from daemon_state_android.cc rename to aosp/daemon_state_android.cc index 3376e64a..9bdd1750 100644 --- a/daemon_state_android.cc +++ b/aosp/daemon_state_android.cc @@ -14,15 +14,15 @@ // limitations under the License. // -#include "update_engine/daemon_state_android.h" +#include "update_engine/aosp/daemon_state_android.h" #include +#include "update_engine/aosp/update_attempter_android.h" #include "update_engine/common/boot_control.h" #include "update_engine/common/boot_control_stub.h" #include "update_engine/common/hardware.h" #include "update_engine/common/prefs.h" -#include "update_engine/update_attempter_android.h" namespace chromeos_update_engine { diff --git a/daemon_state_android.h b/aosp/daemon_state_android.h similarity index 84% rename from daemon_state_android.h rename to aosp/daemon_state_android.h index 928a14eb..dea3a23f 100644 --- a/daemon_state_android.h +++ b/aosp/daemon_state_android.h @@ -14,20 +14,20 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_DAEMON_STATE_ANDROID_H_ -#define UPDATE_ENGINE_DAEMON_STATE_ANDROID_H_ +#ifndef UPDATE_ENGINE_AOSP_DAEMON_STATE_ANDROID_H_ +#define UPDATE_ENGINE_AOSP_DAEMON_STATE_ANDROID_H_ #include #include +#include "update_engine/aosp/service_delegate_android_interface.h" +#include "update_engine/aosp/update_attempter_android.h" #include "update_engine/certificate_checker.h" #include "update_engine/common/boot_control_interface.h" +#include "update_engine/common/daemon_state_interface.h" #include "update_engine/common/hardware_interface.h" #include "update_engine/common/prefs_interface.h" -#include "update_engine/daemon_state_interface.h" -#include "update_engine/service_delegate_android_interface.h" -#include "update_engine/service_observer_interface.h" -#include "update_engine/update_attempter_android.h" +#include "update_engine/common/service_observer_interface.h" namespace chromeos_update_engine { @@ -73,4 +73,4 @@ class DaemonStateAndroid : public DaemonStateInterface { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_DAEMON_STATE_ANDROID_H_ +#endif // UPDATE_ENGINE_AOSP_DAEMON_STATE_ANDROID_H_ diff --git a/dynamic_partition_control_android.cc b/aosp/dynamic_partition_control_android.cc similarity index 99% rename from dynamic_partition_control_android.cc rename to aosp/dynamic_partition_control_android.cc index ccb99ba4..e0459657 100644 --- a/dynamic_partition_control_android.cc +++ b/aosp/dynamic_partition_control_android.cc @@ -14,7 +14,7 @@ // limitations under the License. // -#include "update_engine/dynamic_partition_control_android.h" +#include "update_engine/aosp/dynamic_partition_control_android.h" #include // NOLINT(build/c++11) - using libsnapshot / liblp API #include @@ -39,10 +39,10 @@ #include #include -#include "update_engine/cleanup_previous_update_action.h" +#include "update_engine/aosp/cleanup_previous_update_action.h" +#include "update_engine/aosp/dynamic_partition_utils.h" #include "update_engine/common/boot_control_interface.h" #include "update_engine/common/utils.h" -#include "update_engine/dynamic_partition_utils.h" #include "update_engine/payload_consumer/delta_performer.h" using android::base::GetBoolProperty; diff --git a/dynamic_partition_control_android.h b/aosp/dynamic_partition_control_android.h similarity index 98% rename from dynamic_partition_control_android.h rename to aosp/dynamic_partition_control_android.h index 49967f6c..79f87d9d 100644 --- a/dynamic_partition_control_android.h +++ b/aosp/dynamic_partition_control_android.h @@ -14,8 +14,8 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_DYNAMIC_PARTITION_CONTROL_ANDROID_H_ -#define UPDATE_ENGINE_DYNAMIC_PARTITION_CONTROL_ANDROID_H_ +#ifndef UPDATE_ENGINE_AOSP_DYNAMIC_PARTITION_CONTROL_ANDROID_H_ +#define UPDATE_ENGINE_AOSP_DYNAMIC_PARTITION_CONTROL_ANDROID_H_ #include #include @@ -291,4 +291,4 @@ class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_DYNAMIC_PARTITION_CONTROL_ANDROID_H_ +#endif // UPDATE_ENGINE_AOSP_DYNAMIC_PARTITION_CONTROL_ANDROID_H_ diff --git a/dynamic_partition_control_android_unittest.cc b/aosp/dynamic_partition_control_android_unittest.cc similarity index 98% rename from dynamic_partition_control_android_unittest.cc rename to aosp/dynamic_partition_control_android_unittest.cc index c1e0dafc..5d6463be 100644 --- a/dynamic_partition_control_android_unittest.cc +++ b/aosp/dynamic_partition_control_android_unittest.cc @@ -14,7 +14,7 @@ // limitations under the License. // -#include "update_engine/dynamic_partition_control_android.h" +#include "update_engine/aosp/dynamic_partition_control_android.h" #include #include @@ -26,10 +26,10 @@ #include #include +#include "update_engine/aosp/dynamic_partition_test_utils.h" +#include "update_engine/aosp/mock_dynamic_partition_control.h" #include "update_engine/common/mock_prefs.h" #include "update_engine/common/test_utils.h" -#include "update_engine/dynamic_partition_test_utils.h" -#include "update_engine/mock_dynamic_partition_control.h" using android::dm::DmDeviceState; using android::snapshot::MockSnapshotManager; @@ -792,11 +792,11 @@ TEST_F(DynamicPartitionControlAndroidTest, ResetUpdate) { } TEST_F(DynamicPartitionControlAndroidTest, IsAvbNotEnabledInFstab) { - // clang-format off std::string fstab_content = - "system /postinstall ext4 ro,nosuid,nodev,noexec slotselect_other,logical\n" // NOLINT(whitespace/line_length) - "/dev/block/by-name/system /postinstall ext4 ro,nosuid,nodev,noexec slotselect_other\n"; // NOLINT(whitespace/line_length) - // clang-format on + "system /postinstall ext4 ro,nosuid,nodev,noexec " + "slotselect_other,logical\n" + "/dev/block/by-name/system /postinstall ext4 " + "ro,nosuid,nodev,noexec slotselect_other\n"; ScopedTempFile fstab; ASSERT_TRUE(test_utils::WriteFileString(fstab.path(), fstab_content)); ASSERT_THAT(dynamicControl().RealIsAvbEnabledInFstab(fstab.path()), @@ -804,10 +804,9 @@ TEST_F(DynamicPartitionControlAndroidTest, IsAvbNotEnabledInFstab) { } TEST_F(DynamicPartitionControlAndroidTest, IsAvbEnabledInFstab) { - // clang-format off std::string fstab_content = - "system /postinstall ext4 ro,nosuid,nodev,noexec slotselect_other,logical,avb_keys=/foo\n"; // NOLINT(whitespace/line_length) - // clang-format on + "system /postinstall ext4 ro,nosuid,nodev,noexec " + "slotselect_other,logical,avb_keys=/foo\n"; ScopedTempFile fstab; ASSERT_TRUE(test_utils::WriteFileString(fstab.path(), fstab_content)); ASSERT_THAT(dynamicControl().RealIsAvbEnabledInFstab(fstab.path()), diff --git a/dynamic_partition_test_utils.h b/aosp/dynamic_partition_test_utils.h similarity index 98% rename from dynamic_partition_test_utils.h rename to aosp/dynamic_partition_test_utils.h index d701dce8..c7be1cb7 100644 --- a/dynamic_partition_test_utils.h +++ b/aosp/dynamic_partition_test_utils.h @@ -14,8 +14,8 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_DYNAMIC_PARTITION_TEST_UTILS_H_ -#define UPDATE_ENGINE_DYNAMIC_PARTITION_TEST_UTILS_H_ +#ifndef UPDATE_ENGINE_AOSP_DYNAMIC_PARTITION_TEST_UTILS_H_ +#define UPDATE_ENGINE_AOSP_DYNAMIC_PARTITION_TEST_UTILS_H_ #include @@ -285,4 +285,4 @@ inline std::ostream& operator<<(std::ostream& os, const TestParam& param) { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_DYNAMIC_PARTITION_TEST_UTILS_H_ +#endif // UPDATE_ENGINE_AOSP_DYNAMIC_PARTITION_TEST_UTILS_H_ diff --git a/dynamic_partition_utils.cc b/aosp/dynamic_partition_utils.cc similarity index 95% rename from dynamic_partition_utils.cc rename to aosp/dynamic_partition_utils.cc index f9bd886b..6b77a45c 100644 --- a/dynamic_partition_utils.cc +++ b/aosp/dynamic_partition_utils.cc @@ -14,7 +14,7 @@ // limitations under the License. // -#include "update_engine/dynamic_partition_utils.h" +#include "update_engine/aosp/dynamic_partition_utils.h" #include diff --git a/dynamic_partition_utils.h b/aosp/dynamic_partition_utils.h similarity index 85% rename from dynamic_partition_utils.h rename to aosp/dynamic_partition_utils.h index 09fce00c..5a51d5e1 100644 --- a/dynamic_partition_utils.h +++ b/aosp/dynamic_partition_utils.h @@ -14,8 +14,8 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_DYNAMIC_PARTITION_UTILS_H_ -#define UPDATE_ENGINE_DYNAMIC_PARTITION_UTILS_H_ +#ifndef UPDATE_ENGINE_AOSP_DYNAMIC_PARTITION_UTILS_H_ +#define UPDATE_ENGINE_AOSP_DYNAMIC_PARTITION_UTILS_H_ #include @@ -30,4 +30,4 @@ void DeleteGroupsWithSuffix(android::fs_mgr::MetadataBuilder* builder, } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_DYNAMIC_PARTITION_UTILS_H_ +#endif // UPDATE_ENGINE_AOSP_DYNAMIC_PARTITION_UTILS_H_ diff --git a/hardware_android.cc b/aosp/hardware_android.cc similarity index 99% rename from hardware_android.cc rename to aosp/hardware_android.cc index 8d1fdfdd..3b0d9a8a 100644 --- a/hardware_android.cc +++ b/aosp/hardware_android.cc @@ -14,7 +14,7 @@ // limitations under the License. // -#include "update_engine/hardware_android.h" +#include "update_engine/aosp/hardware_android.h" #include #include diff --git a/hardware_android.h b/aosp/hardware_android.h similarity index 95% rename from hardware_android.h rename to aosp/hardware_android.h index d7e39f3b..5e09fb36 100644 --- a/hardware_android.h +++ b/aosp/hardware_android.h @@ -14,8 +14,8 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_HARDWARE_ANDROID_H_ -#define UPDATE_ENGINE_HARDWARE_ANDROID_H_ +#ifndef UPDATE_ENGINE_AOSP_HARDWARE_ANDROID_H_ +#define UPDATE_ENGINE_AOSP_HARDWARE_ANDROID_H_ #include #include @@ -77,4 +77,4 @@ class HardwareAndroid : public HardwareInterface { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_HARDWARE_ANDROID_H_ +#endif // UPDATE_ENGINE_AOSP_HARDWARE_ANDROID_H_ diff --git a/logging_android.cc b/aosp/logging_android.cc similarity index 100% rename from logging_android.cc rename to aosp/logging_android.cc diff --git a/metrics_reporter_android.cc b/aosp/metrics_reporter_android.cc similarity index 99% rename from metrics_reporter_android.cc rename to aosp/metrics_reporter_android.cc index d8fa6e5b..ea3bb6d2 100644 --- a/metrics_reporter_android.cc +++ b/aosp/metrics_reporter_android.cc @@ -14,7 +14,7 @@ // limitations under the License. // -#include "update_engine/metrics_reporter_android.h" +#include "update_engine/aosp/metrics_reporter_android.h" #include diff --git a/metrics_reporter_android.h b/aosp/metrics_reporter_android.h similarity index 92% rename from metrics_reporter_android.h rename to aosp/metrics_reporter_android.h index 7770619c..4a173bf3 100644 --- a/metrics_reporter_android.h +++ b/aosp/metrics_reporter_android.h @@ -14,14 +14,14 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_METRICS_REPORTER_ANDROID_H_ -#define UPDATE_ENGINE_METRICS_REPORTER_ANDROID_H_ +#ifndef UPDATE_ENGINE_AOSP_METRICS_REPORTER_ANDROID_H_ +#define UPDATE_ENGINE_AOSP_METRICS_REPORTER_ANDROID_H_ #include #include "update_engine/common/error_code.h" -#include "update_engine/metrics_constants.h" -#include "update_engine/metrics_reporter_interface.h" +#include "update_engine/common/metrics_constants.h" +#include "update_engine/common/metrics_reporter_interface.h" namespace chromeos_update_engine { @@ -98,4 +98,4 @@ class MetricsReporterAndroid : public MetricsReporterInterface { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_METRICS_REPORTER_ANDROID_H_ +#endif // UPDATE_ENGINE_AOSP_METRICS_REPORTER_ANDROID_H_ diff --git a/mock_dynamic_partition_control.h b/aosp/mock_dynamic_partition_control.h similarity index 98% rename from mock_dynamic_partition_control.h rename to aosp/mock_dynamic_partition_control.h index e85df327..382106e8 100644 --- a/mock_dynamic_partition_control.h +++ b/aosp/mock_dynamic_partition_control.h @@ -22,9 +22,9 @@ #include +#include "update_engine/aosp/dynamic_partition_control_android.h" #include "update_engine/common/boot_control_interface.h" #include "update_engine/common/dynamic_partition_control_interface.h" -#include "update_engine/dynamic_partition_control_android.h" namespace chromeos_update_engine { diff --git a/network_selector_android.cc b/aosp/network_selector_android.cc similarity index 88% rename from network_selector_android.cc rename to aosp/network_selector_android.cc index 55ba7991..a7db4154 100644 --- a/network_selector_android.cc +++ b/aosp/network_selector_android.cc @@ -14,7 +14,7 @@ // limitations under the License. // -#include "update_engine/network_selector_android.h" +#include "update_engine/aosp/network_selector_android.h" #include @@ -25,14 +25,14 @@ namespace chromeos_update_engine { namespace network { -// Factory defined in network_selector.h. +// Factory defined in common/network_selector.h. std::unique_ptr CreateNetworkSelector() { return std::make_unique(); } } // namespace network -// Defined in network_selector_interface.h. +// Defined in common/network_selector_interface.h. const NetworkId kDefaultNetworkId = NETWORK_UNSPECIFIED; bool NetworkSelectorAndroid::SetProcessNetwork(NetworkId network_id) { diff --git a/network_selector_android.h b/aosp/network_selector_android.h similarity index 82% rename from network_selector_android.h rename to aosp/network_selector_android.h index 135536c2..b79d1b36 100644 --- a/network_selector_android.h +++ b/aosp/network_selector_android.h @@ -14,12 +14,12 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_NETWORK_SELECTOR_ANDROID_H_ -#define UPDATE_ENGINE_NETWORK_SELECTOR_ANDROID_H_ +#ifndef UPDATE_ENGINE_AOSP_NETWORK_SELECTOR_ANDROID_H_ +#define UPDATE_ENGINE_AOSP_NETWORK_SELECTOR_ANDROID_H_ #include -#include "update_engine/network_selector_interface.h" +#include "update_engine/common/network_selector_interface.h" namespace chromeos_update_engine { @@ -37,4 +37,4 @@ class NetworkSelectorAndroid final : public NetworkSelectorInterface { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_NETWORK_SELECTOR_ANDROID_H_ +#endif // UPDATE_ENGINE_AOSP_NETWORK_SELECTOR_ANDROID_H_ diff --git a/common/platform_constants_android.cc b/aosp/platform_constants_android.cc similarity index 100% rename from common/platform_constants_android.cc rename to aosp/platform_constants_android.cc diff --git a/service_delegate_android_interface.h b/aosp/service_delegate_android_interface.h similarity index 96% rename from service_delegate_android_interface.h rename to aosp/service_delegate_android_interface.h index 34a97123..3c287940 100644 --- a/service_delegate_android_interface.h +++ b/aosp/service_delegate_android_interface.h @@ -14,8 +14,8 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_SERVICE_DELEGATE_ANDROID_INTERFACE_H_ -#define UPDATE_ENGINE_SERVICE_DELEGATE_ANDROID_INTERFACE_H_ +#ifndef UPDATE_ENGINE_AOSP_SERVICE_DELEGATE_ANDROID_INTERFACE_H_ +#define UPDATE_ENGINE_AOSP_SERVICE_DELEGATE_ANDROID_INTERFACE_H_ #include @@ -124,4 +124,4 @@ class ServiceDelegateAndroidInterface { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_SERVICE_DELEGATE_ANDROID_INTERFACE_H_ +#endif // UPDATE_ENGINE_AOSP_SERVICE_DELEGATE_ANDROID_INTERFACE_H_ diff --git a/sideload_main.cc b/aosp/sideload_main.cc similarity index 98% rename from sideload_main.cc rename to aosp/sideload_main.cc index 27967cda..3cbc0c7f 100644 --- a/sideload_main.cc +++ b/aosp/sideload_main.cc @@ -28,15 +28,15 @@ #include #include +#include "update_engine/aosp/update_attempter_android.h" #include "update_engine/common/boot_control.h" #include "update_engine/common/error_code_utils.h" #include "update_engine/common/hardware.h" +#include "update_engine/common/logging.h" #include "update_engine/common/prefs.h" #include "update_engine/common/subprocess.h" #include "update_engine/common/terminator.h" #include "update_engine/common/utils.h" -#include "update_engine/logging.h" -#include "update_engine/update_attempter_android.h" using std::string; using std::vector; diff --git a/update_attempter_android.cc b/aosp/update_attempter_android.cc similarity index 99% rename from update_attempter_android.cc rename to aosp/update_attempter_android.cc index 7fc13e11..57430fea 100644 --- a/update_attempter_android.cc +++ b/aosp/update_attempter_android.cc @@ -14,7 +14,7 @@ // limitations under the License. // -#include "update_engine/update_attempter_android.h" +#include "update_engine/aosp/update_attempter_android.h" #include #include @@ -31,18 +31,18 @@ #include #include -#include "update_engine/cleanup_previous_update_action.h" +#include "update_engine/aosp/cleanup_previous_update_action.h" #include "update_engine/common/constants.h" +#include "update_engine/common/daemon_state_interface.h" +#include "update_engine/common/download_action.h" #include "update_engine/common/error_code_utils.h" #include "update_engine/common/file_fetcher.h" +#include "update_engine/common/metrics_reporter_interface.h" +#include "update_engine/common/network_selector.h" #include "update_engine/common/utils.h" -#include "update_engine/daemon_state_interface.h" -#include "update_engine/metrics_reporter_interface.h" #include "update_engine/metrics_utils.h" -#include "update_engine/network_selector.h" #include "update_engine/payload_consumer/certificate_parser_interface.h" #include "update_engine/payload_consumer/delta_performer.h" -#include "update_engine/payload_consumer/download_action.h" #include "update_engine/payload_consumer/file_descriptor.h" #include "update_engine/payload_consumer/file_descriptor_utils.h" #include "update_engine/payload_consumer/filesystem_verifier_action.h" diff --git a/update_attempter_android.h b/aosp/update_attempter_android.h similarity index 94% rename from update_attempter_android.h rename to aosp/update_attempter_android.h index 55003a09..499f8f6b 100644 --- a/update_attempter_android.h +++ b/aosp/update_attempter_android.h @@ -14,8 +14,8 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_UPDATE_ATTEMPTER_ANDROID_H_ -#define UPDATE_ENGINE_UPDATE_ATTEMPTER_ANDROID_H_ +#ifndef UPDATE_ENGINE_AOSP_UPDATE_ATTEMPTER_ANDROID_H_ +#define UPDATE_ENGINE_AOSP_UPDATE_ATTEMPTER_ANDROID_H_ #include @@ -26,21 +26,21 @@ #include #include +#include "update_engine/aosp/service_delegate_android_interface.h" #include "update_engine/client_library/include/update_engine/update_status.h" #include "update_engine/common/action_processor.h" #include "update_engine/common/boot_control_interface.h" #include "update_engine/common/clock.h" +#include "update_engine/common/daemon_state_interface.h" +#include "update_engine/common/download_action.h" #include "update_engine/common/hardware_interface.h" +#include "update_engine/common/metrics_reporter_interface.h" +#include "update_engine/common/network_selector_interface.h" #include "update_engine/common/prefs_interface.h" -#include "update_engine/daemon_state_interface.h" -#include "update_engine/metrics_reporter_interface.h" +#include "update_engine/common/service_observer_interface.h" #include "update_engine/metrics_utils.h" -#include "update_engine/network_selector_interface.h" -#include "update_engine/payload_consumer/download_action.h" #include "update_engine/payload_consumer/filesystem_verifier_action.h" #include "update_engine/payload_consumer/postinstall_runner_action.h" -#include "update_engine/service_delegate_android_interface.h" -#include "update_engine/service_observer_interface.h" namespace chromeos_update_engine { @@ -246,4 +246,4 @@ class UpdateAttempterAndroid } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_UPDATE_ATTEMPTER_ANDROID_H_ +#endif // UPDATE_ENGINE_AOSP_UPDATE_ATTEMPTER_ANDROID_H_ diff --git a/update_attempter_android_unittest.cc b/aosp/update_attempter_android_unittest.cc similarity index 97% rename from update_attempter_android_unittest.cc rename to aosp/update_attempter_android_unittest.cc index 721b7352..bb44450d 100644 --- a/update_attempter_android_unittest.cc +++ b/aosp/update_attempter_android_unittest.cc @@ -14,7 +14,7 @@ // limitations under the License. // -#include "update_engine/update_attempter_android.h" +#include "update_engine/aosp/update_attempter_android.h" #include #include @@ -24,15 +24,15 @@ #include #include +#include "update_engine/aosp/daemon_state_android.h" #include "update_engine/common/fake_boot_control.h" #include "update_engine/common/fake_clock.h" #include "update_engine/common/fake_hardware.h" #include "update_engine/common/fake_prefs.h" #include "update_engine/common/mock_action_processor.h" +#include "update_engine/common/mock_metrics_reporter.h" #include "update_engine/common/test_utils.h" #include "update_engine/common/utils.h" -#include "update_engine/daemon_state_android.h" -#include "update_engine/mock_metrics_reporter.h" using base::Time; using base::TimeDelta; diff --git a/update_engine_client_android.cc b/aosp/update_engine_client_android.cc similarity index 100% rename from update_engine_client_android.cc rename to aosp/update_engine_client_android.cc diff --git a/common/cleanup_previous_update_action_delegate.h b/common/cleanup_previous_update_action_delegate.h index 7dad9c50..8daf860e 100644 --- a/common/cleanup_previous_update_action_delegate.h +++ b/common/cleanup_previous_update_action_delegate.h @@ -14,8 +14,8 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_CLEANUP_PREVIOUS_UPDATE_ACTION_DELEGETE_H_ -#define UPDATE_ENGINE_CLEANUP_PREVIOUS_UPDATE_ACTION_DELEGETE_H_ +#ifndef UPDATE_ENGINE_COMMON_CLEANUP_PREVIOUS_UPDATE_ACTION_DELEGETE_H_ +#define UPDATE_ENGINE_COMMON_CLEANUP_PREVIOUS_UPDATE_ACTION_DELEGETE_H_ namespace chromeos_update_engine { @@ -29,4 +29,4 @@ class CleanupPreviousUpdateActionDelegateInterface { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_CLEANUP_PREVIOUS_UPDATE_ACTION_DELEGETE_H_ +#endif // UPDATE_ENGINE_COMMON_CLEANUP_PREVIOUS_UPDATE_ACTION_DELEGETE_H_ diff --git a/connection_utils.cc b/common/connection_utils.cc similarity index 97% rename from connection_utils.cc rename to common/connection_utils.cc index 5af7341f..44e51286 100644 --- a/connection_utils.cc +++ b/common/connection_utils.cc @@ -14,7 +14,7 @@ // limitations under the License. // -#include "update_engine/connection_utils.h" +#include "update_engine/common/connection_utils.h" #include diff --git a/connection_utils.h b/common/connection_utils.h similarity index 89% rename from connection_utils.h rename to common/connection_utils.h index 4e71fcf7..5d63fb22 100644 --- a/connection_utils.h +++ b/common/connection_utils.h @@ -14,8 +14,8 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_CONNECTION_UTILS_H_ -#define UPDATE_ENGINE_CONNECTION_UTILS_H_ +#ifndef UPDATE_ENGINE_COMMON_CONNECTION_UTILS_H_ +#define UPDATE_ENGINE_COMMON_CONNECTION_UTILS_H_ #include @@ -47,4 +47,4 @@ const char* StringForConnectionType(ConnectionType type); } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_CONNECTION_UTILS_H_ +#endif // UPDATE_ENGINE_COMMON_CONNECTION_UTILS_H_ diff --git a/daemon_base.h b/common/daemon_base.h similarity index 88% rename from daemon_base.h rename to common/daemon_base.h index 742a0ba2..4bc5ef7d 100644 --- a/daemon_base.h +++ b/common/daemon_base.h @@ -14,8 +14,8 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_DAEMON_BASE_H_ -#define UPDATE_ENGINE_DAEMON_BASE_H_ +#ifndef UPDATE_ENGINE_COMMON_DAEMON_BASE_H_ +#define UPDATE_ENGINE_COMMON_DAEMON_BASE_H_ #include @@ -37,4 +37,4 @@ class DaemonBase : public brillo::Daemon { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_DAEMON_BASE_H_ +#endif // UPDATE_ENGINE_COMMON_DAEMON_BASE_H_ diff --git a/daemon_state_interface.h b/common/daemon_state_interface.h similarity index 85% rename from daemon_state_interface.h rename to common/daemon_state_interface.h index 23568168..9509fa2f 100644 --- a/daemon_state_interface.h +++ b/common/daemon_state_interface.h @@ -14,10 +14,10 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_DAEMON_STATE_INTERFACE_H_ -#define UPDATE_ENGINE_DAEMON_STATE_INTERFACE_H_ +#ifndef UPDATE_ENGINE_COMMON_DAEMON_STATE_INTERFACE_H_ +#define UPDATE_ENGINE_COMMON_DAEMON_STATE_INTERFACE_H_ -#include "update_engine/service_observer_interface.h" +#include "update_engine/common/service_observer_interface.h" #include #include @@ -46,4 +46,4 @@ class DaemonStateInterface { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_DAEMON_STATE_INTERFACE_H_ +#endif // UPDATE_ENGINE_COMMON_DAEMON_STATE_INTERFACE_H_ diff --git a/payload_consumer/download_action.h b/common/download_action.h similarity index 97% rename from payload_consumer/download_action.h rename to common/download_action.h index 69284438..c167c2d2 100644 --- a/payload_consumer/download_action.h +++ b/common/download_action.h @@ -14,8 +14,8 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_DOWNLOAD_ACTION_H_ -#define UPDATE_ENGINE_PAYLOAD_CONSUMER_DOWNLOAD_ACTION_H_ +#ifndef UPDATE_ENGINE_COMMON_DOWNLOAD_ACTION_H_ +#define UPDATE_ENGINE_COMMON_DOWNLOAD_ACTION_H_ #include #include @@ -28,9 +28,9 @@ #include "update_engine/common/boot_control_interface.h" #include "update_engine/common/http_fetcher.h" #include "update_engine/common/multi_range_http_fetcher.h" +#include "update_engine/common/system_state.h" #include "update_engine/payload_consumer/delta_performer.h" #include "update_engine/payload_consumer/install_plan.h" -#include "update_engine/system_state.h" // The Download Action downloads a specified url to disk. The url should point // to an update in a delta payload format. The payload will be piped into a @@ -200,4 +200,4 @@ static_assert(8 == sizeof(off_t), "off_t not 64 bit"); } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_PAYLOAD_CONSUMER_DOWNLOAD_ACTION_H_ +#endif // UPDATE_ENGINE_COMMON_DOWNLOAD_ACTION_H_ diff --git a/common/http_fetcher.h b/common/http_fetcher.h index f74a0f05..7fa5f098 100644 --- a/common/http_fetcher.h +++ b/common/http_fetcher.h @@ -28,8 +28,8 @@ #include #include "update_engine/common/http_common.h" +#include "update_engine/common/metrics_constants.h" #include "update_engine/common/proxy_resolver.h" -#include "update_engine/metrics_constants.h" // This class is a simple wrapper around an HTTP library (libcurl). We can // easily mock out this interface for testing. diff --git a/logging.h b/common/logging.h similarity index 100% rename from logging.h rename to common/logging.h diff --git a/metrics_constants.h b/common/metrics_constants.h similarity index 97% rename from metrics_constants.h rename to common/metrics_constants.h index 679680c5..b7633b90 100644 --- a/metrics_constants.h +++ b/common/metrics_constants.h @@ -14,8 +14,8 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_METRICS_CONSTANTS_H_ -#define UPDATE_ENGINE_METRICS_CONSTANTS_H_ +#ifndef UPDATE_ENGINE_COMMON_METRICS_CONSTANTS_H_ +#define UPDATE_ENGINE_COMMON_METRICS_CONSTANTS_H_ namespace chromeos_update_engine { @@ -144,4 +144,4 @@ enum class RollbackResult { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_METRICS_CONSTANTS_H_ +#endif // UPDATE_ENGINE_COMMON_METRICS_CONSTANTS_H_ diff --git a/metrics_reporter_interface.h b/common/metrics_reporter_interface.h similarity index 97% rename from metrics_reporter_interface.h rename to common/metrics_reporter_interface.h index 180a6803..d7c53472 100644 --- a/metrics_reporter_interface.h +++ b/common/metrics_reporter_interface.h @@ -14,8 +14,8 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_METRICS_REPORTER_INTERFACE_H_ -#define UPDATE_ENGINE_METRICS_REPORTER_INTERFACE_H_ +#ifndef UPDATE_ENGINE_COMMON_METRICS_REPORTER_INTERFACE_H_ +#define UPDATE_ENGINE_COMMON_METRICS_REPORTER_INTERFACE_H_ #include #include @@ -24,8 +24,8 @@ #include "update_engine/common/constants.h" #include "update_engine/common/error_code.h" -#include "update_engine/metrics_constants.h" -#include "update_engine/system_state.h" +#include "update_engine/common/metrics_constants.h" +#include "update_engine/common/system_state.h" namespace chromeos_update_engine { @@ -244,4 +244,4 @@ class MetricsReporterInterface { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_METRICS_REPORTER_INTERFACE_H_ +#endif // UPDATE_ENGINE_COMMON_METRICS_REPORTER_INTERFACE_H_ diff --git a/metrics_reporter_stub.cc b/common/metrics_reporter_stub.cc similarity index 94% rename from metrics_reporter_stub.cc rename to common/metrics_reporter_stub.cc index 81664a53..dcb4e8cd 100644 --- a/metrics_reporter_stub.cc +++ b/common/metrics_reporter_stub.cc @@ -14,7 +14,7 @@ // limitations under the License. // -#include "update_engine/metrics_reporter_stub.h" +#include "update_engine/common/metrics_reporter_stub.h" #include diff --git a/metrics_reporter_stub.h b/common/metrics_reporter_stub.h similarity index 92% rename from metrics_reporter_stub.h rename to common/metrics_reporter_stub.h index 0cfeea0c..1470aaab 100644 --- a/metrics_reporter_stub.h +++ b/common/metrics_reporter_stub.h @@ -14,14 +14,14 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_METRICS_REPORTER_STUB_H_ -#define UPDATE_ENGINE_METRICS_REPORTER_STUB_H_ +#ifndef UPDATE_ENGINE_COMMON_METRICS_REPORTER_STUB_H_ +#define UPDATE_ENGINE_COMMON_METRICS_REPORTER_STUB_H_ #include #include "update_engine/common/error_code.h" -#include "update_engine/metrics_constants.h" -#include "update_engine/metrics_reporter_interface.h" +#include "update_engine/common/metrics_constants.h" +#include "update_engine/common/metrics_reporter_interface.h" namespace chromeos_update_engine { @@ -98,4 +98,4 @@ class MetricsReporterStub : public MetricsReporterInterface { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_METRICS_REPORTER_STUB_H_ +#endif // UPDATE_ENGINE_COMMON_METRICS_REPORTER_STUB_H_ diff --git a/payload_consumer/mock_download_action.h b/common/mock_download_action.h similarity index 81% rename from payload_consumer/mock_download_action.h rename to common/mock_download_action.h index 3abb809e..ecda9a31 100644 --- a/payload_consumer/mock_download_action.h +++ b/common/mock_download_action.h @@ -14,15 +14,15 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_MOCK_DOWNLOAD_ACTION_H_ -#define UPDATE_ENGINE_PAYLOAD_CONSUMER_MOCK_DOWNLOAD_ACTION_H_ +#ifndef UPDATE_ENGINE_COMMON_MOCK_DOWNLOAD_ACTION_H_ +#define UPDATE_ENGINE_COMMON_MOCK_DOWNLOAD_ACTION_H_ #include #include +#include "update_engine/common/download_action.h" #include "update_engine/common/error_code.h" -#include "update_engine/payload_consumer/download_action.h" namespace chromeos_update_engine { @@ -38,4 +38,4 @@ class MockDownloadActionDelegate : public DownloadActionDelegate { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_PAYLOAD_CONSUMER_MOCK_DOWNLOAD_ACTION_H_ +#endif // UPDATE_ENGINE_COMMON_MOCK_DOWNLOAD_ACTION_H_ diff --git a/common/mock_excluder.h b/common/mock_excluder.h index bc547729..560ba0d8 100644 --- a/common/mock_excluder.h +++ b/common/mock_excluder.h @@ -14,8 +14,8 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_MOCK_APP_EXCLUDER_H_ -#define UPDATE_ENGINE_MOCK_APP_EXCLUDER_H_ +#ifndef UPDATE_ENGINE_COMMON_MOCK_APP_EXCLUDER_H_ +#define UPDATE_ENGINE_COMMON_MOCK_APP_EXCLUDER_H_ #include "update_engine/common/excluder_interface.h" @@ -34,4 +34,4 @@ class MockExcluder : public ExcluderInterface { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_MOCK_APP_EXCLUDER_H_ +#endif // UPDATE_ENGINE_COMMON_MOCK_APP_EXCLUDER_H_ diff --git a/mock_metrics_reporter.h b/common/mock_metrics_reporter.h similarity index 94% rename from mock_metrics_reporter.h rename to common/mock_metrics_reporter.h index baf3a784..922d1ee2 100644 --- a/mock_metrics_reporter.h +++ b/common/mock_metrics_reporter.h @@ -14,14 +14,14 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_MOCK_METRICS_REPORTER_H_ -#define UPDATE_ENGINE_MOCK_METRICS_REPORTER_H_ +#ifndef UPDATE_ENGINE_COMMON_MOCK_METRICS_REPORTER_H_ +#define UPDATE_ENGINE_COMMON_MOCK_METRICS_REPORTER_H_ #include #include -#include "update_engine/metrics_reporter_interface.h" +#include "update_engine/common/metrics_reporter_interface.h" namespace chromeos_update_engine { @@ -96,4 +96,4 @@ class MockMetricsReporter : public MetricsReporterInterface { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_MOCK_METRICS_REPORTER_H_ +#endif // UPDATE_ENGINE_COMMON_MOCK_METRICS_REPORTER_H_ diff --git a/mock_service_observer.h b/common/mock_service_observer.h similarity index 81% rename from mock_service_observer.h rename to common/mock_service_observer.h index e434eabe..2c895f98 100644 --- a/mock_service_observer.h +++ b/common/mock_service_observer.h @@ -14,11 +14,11 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_MOCK_SERVICE_OBSERVER_H_ -#define UPDATE_ENGINE_MOCK_SERVICE_OBSERVER_H_ +#ifndef UPDATE_ENGINE_COMMON_MOCK_SERVICE_OBSERVER_H_ +#define UPDATE_ENGINE_COMMON_MOCK_SERVICE_OBSERVER_H_ #include -#include "update_engine/service_observer_interface.h" +#include "update_engine/common/service_observer_interface.h" namespace chromeos_update_engine { @@ -32,4 +32,4 @@ class MockServiceObserver : public ServiceObserverInterface { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_MOCK_SERVICE_OBSERVER_H_ +#endif // UPDATE_ENGINE_COMMON_MOCK_SERVICE_OBSERVER_H_ diff --git a/network_selector.h b/common/network_selector.h similarity index 81% rename from network_selector.h rename to common/network_selector.h index 22aed8e1..bfc09c54 100644 --- a/network_selector.h +++ b/common/network_selector.h @@ -14,12 +14,12 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_NETWORK_SELECTOR_H_ -#define UPDATE_ENGINE_NETWORK_SELECTOR_H_ +#ifndef UPDATE_ENGINE_COMMON_NETWORK_SELECTOR_H_ +#define UPDATE_ENGINE_COMMON_NETWORK_SELECTOR_H_ #include -#include "update_engine/network_selector_interface.h" +#include "update_engine/common/network_selector_interface.h" namespace chromeos_update_engine { namespace network { @@ -30,4 +30,4 @@ std::unique_ptr CreateNetworkSelector(); } // namespace network } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_NETWORK_SELECTOR_H_ +#endif // UPDATE_ENGINE_COMMON_NETWORK_SELECTOR_H_ diff --git a/network_selector_interface.h b/common/network_selector_interface.h similarity index 88% rename from network_selector_interface.h rename to common/network_selector_interface.h index bd0948f9..42ce32e2 100644 --- a/network_selector_interface.h +++ b/common/network_selector_interface.h @@ -14,8 +14,8 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_NETWORK_SELECTOR_INTERFACE_H_ -#define UPDATE_ENGINE_NETWORK_SELECTOR_INTERFACE_H_ +#ifndef UPDATE_ENGINE_COMMON_NETWORK_SELECTOR_INTERFACE_H_ +#define UPDATE_ENGINE_COMMON_NETWORK_SELECTOR_INTERFACE_H_ #include @@ -45,4 +45,4 @@ class NetworkSelectorInterface { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_NETWORK_SELECTOR_INTERFACE_H_ +#endif // UPDATE_ENGINE_COMMON_NETWORK_SELECTOR_INTERFACE_H_ diff --git a/network_selector_stub.cc b/common/network_selector_stub.cc similarity index 88% rename from network_selector_stub.cc rename to common/network_selector_stub.cc index 67925f41..24c0e251 100644 --- a/network_selector_stub.cc +++ b/common/network_selector_stub.cc @@ -14,7 +14,7 @@ // limitations under the License. // -#include "update_engine/network_selector_stub.h" +#include "update_engine/common/network_selector_stub.h" #include @@ -24,14 +24,14 @@ namespace chromeos_update_engine { namespace network { -// Factory defined in network_selector.h. +// Factory defined in common/network_selector.h. std::unique_ptr CreateNetworkSelector() { return std::make_unique(); } } // namespace network -// Defined in network_selector_interface.h. +// Defined in common/network_selector_interface.h. const NetworkId kDefaultNetworkId = 0; bool NetworkSelectorStub::SetProcessNetwork(NetworkId network_id) { diff --git a/network_selector_stub.h b/common/network_selector_stub.h similarity index 82% rename from network_selector_stub.h rename to common/network_selector_stub.h index b3f7b487..b32df919 100644 --- a/network_selector_stub.h +++ b/common/network_selector_stub.h @@ -14,12 +14,12 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_NETWORK_SELECTOR_STUB_H_ -#define UPDATE_ENGINE_NETWORK_SELECTOR_STUB_H_ +#ifndef UPDATE_ENGINE_COMMON_NETWORK_SELECTOR_STUB_H_ +#define UPDATE_ENGINE_COMMON_NETWORK_SELECTOR_STUB_H_ #include -#include "update_engine/network_selector_interface.h" +#include "update_engine/common/network_selector_interface.h" namespace chromeos_update_engine { @@ -37,4 +37,4 @@ class NetworkSelectorStub final : public NetworkSelectorInterface { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_NETWORK_SELECTOR_STUB_H_ +#endif // UPDATE_ENGINE_COMMON_NETWORK_SELECTOR_STUB_H_ diff --git a/service_observer_interface.h b/common/service_observer_interface.h similarity index 88% rename from service_observer_interface.h rename to common/service_observer_interface.h index 4edb0ac2..c4712314 100644 --- a/service_observer_interface.h +++ b/common/service_observer_interface.h @@ -14,8 +14,8 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_SERVICE_OBSERVER_INTERFACE_H_ -#define UPDATE_ENGINE_SERVICE_OBSERVER_INTERFACE_H_ +#ifndef UPDATE_ENGINE_COMMON_SERVICE_OBSERVER_INTERFACE_H_ +#define UPDATE_ENGINE_COMMON_SERVICE_OBSERVER_INTERFACE_H_ #include #include @@ -43,4 +43,4 @@ class ServiceObserverInterface { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_SERVICE_OBSERVER_INTERFACE_H_ +#endif // UPDATE_ENGINE_COMMON_SERVICE_OBSERVER_INTERFACE_H_ diff --git a/system_state.h b/common/system_state.h similarity index 96% rename from system_state.h rename to common/system_state.h index f46cbcf5..7a670466 100644 --- a/system_state.h +++ b/common/system_state.h @@ -14,8 +14,8 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_SYSTEM_STATE_H_ -#define UPDATE_ENGINE_SYSTEM_STATE_H_ +#ifndef UPDATE_ENGINE_COMMON_SYSTEM_STATE_H_ +#define UPDATE_ENGINE_COMMON_SYSTEM_STATE_H_ namespace chromeos_update_manager { @@ -117,4 +117,4 @@ class SystemState { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_SYSTEM_STATE_H_ +#endif // UPDATE_ENGINE_COMMON_SYSTEM_STATE_H_ diff --git a/connection_manager_android.cc b/connection_manager_android.cc deleted file mode 100644 index 9d0c57bb..00000000 --- a/connection_manager_android.cc +++ /dev/null @@ -1,43 +0,0 @@ -// -// Copyright (C) 2016 The Android Open Source Project -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -#include "update_engine/connection_manager_android.h" - -#include - -namespace chromeos_update_engine { - -namespace connection_manager { -std::unique_ptr CreateConnectionManager( - SystemState* system_state) { - return std::unique_ptr( - new ConnectionManagerAndroid()); -} -} // namespace connection_manager - -bool ConnectionManagerAndroid::GetConnectionProperties( - ConnectionType* out_type, ConnectionTethering* out_tethering) { - return false; -} -bool ConnectionManagerAndroid::IsUpdateAllowedOver( - ConnectionType type, ConnectionTethering tethering) const { - return true; -} -bool ConnectionManagerAndroid::IsAllowedConnectionTypesForUpdateSet() const { - return false; -} - -} // namespace chromeos_update_engine diff --git a/connection_manager_android.h b/connection_manager_android.h deleted file mode 100644 index 006f4ead..00000000 --- a/connection_manager_android.h +++ /dev/null @@ -1,44 +0,0 @@ -// -// Copyright (C) 2016 The Android Open Source Project -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -#ifndef UPDATE_ENGINE_CONNECTION_MANAGER_ANDROID_H_ -#define UPDATE_ENGINE_CONNECTION_MANAGER_ANDROID_H_ - -#include - -#include "update_engine/connection_manager_interface.h" - -namespace chromeos_update_engine { - -// TODO(senj): Remove this class and use ShillProvider from the UpdateManager. -class ConnectionManagerAndroid : public ConnectionManagerInterface { - public: - ConnectionManagerAndroid() = default; - ~ConnectionManagerAndroid() override = default; - - // ConnectionManagerInterface overrides. - bool GetConnectionProperties(ConnectionType* out_type, - ConnectionTethering* out_tethering) override; - bool IsUpdateAllowedOver(ConnectionType type, - ConnectionTethering tethering) const override; - bool IsAllowedConnectionTypesForUpdateSet() const override; - - DISALLOW_COPY_AND_ASSIGN(ConnectionManagerAndroid); -}; - -} // namespace chromeos_update_engine - -#endif // UPDATE_ENGINE_CONNECTION_MANAGER_ANDROID_H_ diff --git a/boot_control_chromeos.cc b/cros/boot_control_chromeos.cc similarity index 99% rename from boot_control_chromeos.cc rename to cros/boot_control_chromeos.cc index da2c8916..17659ae7 100644 --- a/boot_control_chromeos.cc +++ b/cros/boot_control_chromeos.cc @@ -14,7 +14,7 @@ // limitations under the License. // -#include "update_engine/boot_control_chromeos.h" +#include "update_engine/cros/boot_control_chromeos.h" #include #include diff --git a/boot_control_chromeos.h b/cros/boot_control_chromeos.h similarity index 96% rename from boot_control_chromeos.h rename to cros/boot_control_chromeos.h index 42716720..0dff2c02 100644 --- a/boot_control_chromeos.h +++ b/cros/boot_control_chromeos.h @@ -14,8 +14,8 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_BOOT_CONTROL_CHROMEOS_H_ -#define UPDATE_ENGINE_BOOT_CONTROL_CHROMEOS_H_ +#ifndef UPDATE_ENGINE_CROS_BOOT_CONTROL_CHROMEOS_H_ +#define UPDATE_ENGINE_CROS_BOOT_CONTROL_CHROMEOS_H_ #include #include @@ -101,4 +101,4 @@ class BootControlChromeOS : public BootControlInterface { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_BOOT_CONTROL_CHROMEOS_H_ +#endif // UPDATE_ENGINE_CROS_BOOT_CONTROL_CHROMEOS_H_ diff --git a/boot_control_chromeos_unittest.cc b/cros/boot_control_chromeos_unittest.cc similarity index 98% rename from boot_control_chromeos_unittest.cc rename to cros/boot_control_chromeos_unittest.cc index 1c40dcec..fc1dd1e5 100644 --- a/boot_control_chromeos_unittest.cc +++ b/cros/boot_control_chromeos_unittest.cc @@ -14,7 +14,7 @@ // limitations under the License. // -#include "update_engine/boot_control_chromeos.h" +#include "update_engine/cros/boot_control_chromeos.h" #include diff --git a/chrome_browser_proxy_resolver.cc b/cros/chrome_browser_proxy_resolver.cc similarity index 95% rename from chrome_browser_proxy_resolver.cc rename to cros/chrome_browser_proxy_resolver.cc index bfb58f7b..3ea8a9b1 100644 --- a/chrome_browser_proxy_resolver.cc +++ b/cros/chrome_browser_proxy_resolver.cc @@ -14,7 +14,7 @@ // limitations under the License. // -#include "update_engine/chrome_browser_proxy_resolver.h" +#include "update_engine/cros/chrome_browser_proxy_resolver.h" #include @@ -23,7 +23,7 @@ #include #include -#include "update_engine/dbus_connection.h" +#include "update_engine/cros/dbus_connection.h" namespace chromeos_update_engine { diff --git a/chrome_browser_proxy_resolver.h b/cros/chrome_browser_proxy_resolver.h similarity index 91% rename from chrome_browser_proxy_resolver.h rename to cros/chrome_browser_proxy_resolver.h index 10a55fb2..76848ef3 100644 --- a/chrome_browser_proxy_resolver.h +++ b/cros/chrome_browser_proxy_resolver.h @@ -14,8 +14,8 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_CHROME_BROWSER_PROXY_RESOLVER_H_ -#define UPDATE_ENGINE_CHROME_BROWSER_PROXY_RESOLVER_H_ +#ifndef UPDATE_ENGINE_CROS_CHROME_BROWSER_PROXY_RESOLVER_H_ +#define UPDATE_ENGINE_CROS_CHROME_BROWSER_PROXY_RESOLVER_H_ #include #include @@ -63,4 +63,4 @@ class ChromeBrowserProxyResolver : public ProxyResolver { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_CHROME_BROWSER_PROXY_RESOLVER_H_ +#endif // UPDATE_ENGINE_CROS_CHROME_BROWSER_PROXY_RESOLVER_H_ diff --git a/common_service.cc b/cros/common_service.cc similarity index 97% rename from common_service.cc rename to cros/common_service.cc index 85fb9e4f..aecad8bd 100644 --- a/common_service.cc +++ b/cros/common_service.cc @@ -14,7 +14,7 @@ // limitations under the License. // -#include "update_engine/common_service.h" +#include "update_engine/cros/common_service.h" #include @@ -30,12 +30,12 @@ #include "update_engine/common/hardware_interface.h" #include "update_engine/common/prefs.h" #include "update_engine/common/utils.h" -#include "update_engine/connection_manager_interface.h" -#include "update_engine/omaha_request_params.h" -#include "update_engine/omaha_utils.h" -#include "update_engine/p2p_manager.h" -#include "update_engine/payload_state_interface.h" -#include "update_engine/update_attempter.h" +#include "update_engine/cros/connection_manager_interface.h" +#include "update_engine/cros/omaha_request_params.h" +#include "update_engine/cros/omaha_utils.h" +#include "update_engine/cros/p2p_manager.h" +#include "update_engine/cros/payload_state_interface.h" +#include "update_engine/cros/update_attempter.h" using base::StringPrintf; using brillo::ErrorPtr; diff --git a/common_service.h b/cros/common_service.h similarity index 97% rename from common_service.h rename to cros/common_service.h index cfcece51..6169d9cb 100644 --- a/common_service.h +++ b/cros/common_service.h @@ -14,8 +14,8 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_COMMON_SERVICE_H_ -#define UPDATE_ENGINE_COMMON_SERVICE_H_ +#ifndef UPDATE_ENGINE_CROS_SERVICE_H_ +#define UPDATE_ENGINE_CROS_SERVICE_H_ #include @@ -26,7 +26,7 @@ #include #include "update_engine/client_library/include/update_engine/update_status.h" -#include "update_engine/system_state.h" +#include "update_engine/common/system_state.h" namespace chromeos_update_engine { @@ -167,4 +167,4 @@ class UpdateEngineService { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_COMMON_SERVICE_H_ +#endif // UPDATE_ENGINE_CROS_SERVICE_H_ diff --git a/common_service_unittest.cc b/cros/common_service_unittest.cc similarity index 97% rename from common_service_unittest.cc rename to cros/common_service_unittest.cc index 3dc8a227..733ec0af 100644 --- a/common_service_unittest.cc +++ b/cros/common_service_unittest.cc @@ -14,7 +14,7 @@ // limitations under the License. // -#include "update_engine/common_service.h" +#include "update_engine/cros/common_service.h" #include #include @@ -25,8 +25,8 @@ #include #include "update_engine/common/fake_prefs.h" -#include "update_engine/fake_system_state.h" -#include "update_engine/omaha_utils.h" +#include "update_engine/cros/fake_system_state.h" +#include "update_engine/cros/omaha_utils.h" using std::string; using std::vector; diff --git a/connection_manager.cc b/cros/connection_manager.cc similarity index 96% rename from connection_manager.cc rename to cros/connection_manager.cc index fe43f37b..331f76bd 100644 --- a/connection_manager.cc +++ b/cros/connection_manager.cc @@ -14,7 +14,7 @@ // limitations under the License. // -#include "update_engine/connection_manager.h" +#include "update_engine/cros/connection_manager.h" #include #include @@ -26,12 +26,12 @@ #include #include +#include "update_engine/common/connection_utils.h" #include "update_engine/common/prefs.h" +#include "update_engine/common/system_state.h" #include "update_engine/common/utils.h" -#include "update_engine/connection_utils.h" -#include "update_engine/shill_proxy.h" -#include "update_engine/system_state.h" -#include "update_engine/update_attempter.h" +#include "update_engine/cros/shill_proxy.h" +#include "update_engine/cros/update_attempter.h" using org::chromium::flimflam::ManagerProxyInterface; using org::chromium::flimflam::ServiceProxyInterface; diff --git a/connection_manager.h b/cros/connection_manager.h similarity index 89% rename from connection_manager.h rename to cros/connection_manager.h index d8527a31..b1fb961f 100644 --- a/connection_manager.h +++ b/cros/connection_manager.h @@ -14,8 +14,8 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_CONNECTION_MANAGER_H_ -#define UPDATE_ENGINE_CONNECTION_MANAGER_H_ +#ifndef UPDATE_ENGINE_CROS_CONNECTION_MANAGER_H_ +#define UPDATE_ENGINE_CROS_CONNECTION_MANAGER_H_ #include #include @@ -23,8 +23,8 @@ #include #include -#include "update_engine/connection_manager_interface.h" -#include "update_engine/shill_proxy_interface.h" +#include "update_engine/cros/connection_manager_interface.h" +#include "update_engine/cros/shill_proxy_interface.h" namespace chromeos_update_engine { @@ -66,4 +66,4 @@ class ConnectionManager : public ConnectionManagerInterface { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_CONNECTION_MANAGER_H_ +#endif // UPDATE_ENGINE_CROS_CONNECTION_MANAGER_H_ diff --git a/connection_manager_interface.h b/cros/connection_manager_interface.h similarity index 90% rename from connection_manager_interface.h rename to cros/connection_manager_interface.h index 9f779891..6dd9fbdd 100644 --- a/connection_manager_interface.h +++ b/cros/connection_manager_interface.h @@ -14,14 +14,14 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_CONNECTION_MANAGER_INTERFACE_H_ -#define UPDATE_ENGINE_CONNECTION_MANAGER_INTERFACE_H_ +#ifndef UPDATE_ENGINE_CROS_CONNECTION_MANAGER_INTERFACE_H_ +#define UPDATE_ENGINE_CROS_CONNECTION_MANAGER_INTERFACE_H_ #include #include -#include "update_engine/connection_utils.h" +#include "update_engine/common/connection_utils.h" namespace chromeos_update_engine { @@ -65,4 +65,4 @@ std::unique_ptr CreateConnectionManager( } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_CONNECTION_MANAGER_INTERFACE_H_ +#endif // UPDATE_ENGINE_CROS_CONNECTION_MANAGER_INTERFACE_H_ diff --git a/connection_manager_unittest.cc b/cros/connection_manager_unittest.cc similarity index 98% rename from connection_manager_unittest.cc rename to cros/connection_manager_unittest.cc index 97436c9c..3f1ee5a3 100644 --- a/connection_manager_unittest.cc +++ b/cros/connection_manager_unittest.cc @@ -14,7 +14,7 @@ // limitations under the License. // -#include "update_engine/connection_manager.h" +#include "update_engine/cros/connection_manager.h" #include #include @@ -32,8 +32,8 @@ #include #include "update_engine/common/test_utils.h" -#include "update_engine/fake_shill_proxy.h" -#include "update_engine/fake_system_state.h" +#include "update_engine/cros/fake_shill_proxy.h" +#include "update_engine/cros/fake_system_state.h" using chromeos_update_engine::connection_utils::StringForConnectionType; using org::chromium::flimflam::ManagerProxyMock; diff --git a/daemon_chromeos.cc b/cros/daemon_chromeos.cc similarity index 96% rename from daemon_chromeos.cc rename to cros/daemon_chromeos.cc index 21740d81..a7cad8cc 100644 --- a/daemon_chromeos.cc +++ b/cros/daemon_chromeos.cc @@ -14,14 +14,14 @@ // limitations under the License. // -#include "update_engine/daemon_chromeos.h" +#include "update_engine/cros/daemon_chromeos.h" #include #include #include -#include "update_engine/real_system_state.h" +#include "update_engine/cros/real_system_state.h" using brillo::Daemon; using std::unique_ptr; diff --git a/daemon_chromeos.h b/cros/daemon_chromeos.h similarity index 84% rename from daemon_chromeos.h rename to cros/daemon_chromeos.h index 657e7971..5d568c70 100644 --- a/daemon_chromeos.h +++ b/cros/daemon_chromeos.h @@ -14,15 +14,15 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_DAEMON_CHROMEOS_H_ -#define UPDATE_ENGINE_DAEMON_CHROMEOS_H_ +#ifndef UPDATE_ENGINE_CROS_DAEMON_CHROMEOS_H_ +#define UPDATE_ENGINE_CROS_DAEMON_CHROMEOS_H_ #include +#include "update_engine/common/daemon_base.h" +#include "update_engine/common/daemon_state_interface.h" #include "update_engine/common/subprocess.h" -#include "update_engine/daemon_base.h" -#include "update_engine/daemon_state_interface.h" -#include "update_engine/dbus_service.h" +#include "update_engine/cros/dbus_service.h" namespace chromeos_update_engine { @@ -56,4 +56,4 @@ class DaemonChromeOS : public DaemonBase { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_DAEMON_CHROMEOS_H_ +#endif // UPDATE_ENGINE_CROS_DAEMON_CHROMEOS_H_ diff --git a/dbus_connection.cc b/cros/dbus_connection.cc similarity index 97% rename from dbus_connection.cc rename to cros/dbus_connection.cc index cf17ec99..6808bae6 100644 --- a/dbus_connection.cc +++ b/cros/dbus_connection.cc @@ -14,7 +14,7 @@ // limitations under the License. // -#include "update_engine/dbus_connection.h" +#include "update_engine/cros/dbus_connection.h" #include diff --git a/dbus_connection.h b/cros/dbus_connection.h similarity index 88% rename from dbus_connection.h rename to cros/dbus_connection.h index c3205ba5..8f0d6f1f 100644 --- a/dbus_connection.h +++ b/cros/dbus_connection.h @@ -14,8 +14,8 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_DBUS_CONNECTION_H_ -#define UPDATE_ENGINE_DBUS_CONNECTION_H_ +#ifndef UPDATE_ENGINE_CROS_DBUS_CONNECTION_H_ +#define UPDATE_ENGINE_CROS_DBUS_CONNECTION_H_ #include #include @@ -41,4 +41,4 @@ class DBusConnection { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_DBUS_CONNECTION_H_ +#endif // UPDATE_ENGINE_CROS_DBUS_CONNECTION_H_ diff --git a/dbus_service.cc b/cros/dbus_service.cc similarity index 98% rename from dbus_service.cc rename to cros/dbus_service.cc index a282d1e3..d115195f 100644 --- a/dbus_service.cc +++ b/cros/dbus_service.cc @@ -14,14 +14,14 @@ // limitations under the License. // -#include "update_engine/dbus_service.h" +#include "update_engine/cros/dbus_service.h" #include #include #include -#include "update_engine/dbus_connection.h" +#include "update_engine/cros/dbus_connection.h" #include "update_engine/proto_bindings/update_engine.pb.h" #include "update_engine/update_status_utils.h" diff --git a/dbus_service.h b/cros/dbus_service.h similarity index 96% rename from dbus_service.h rename to cros/dbus_service.h index 873909ee..9e4457fb 100644 --- a/dbus_service.h +++ b/cros/dbus_service.h @@ -14,8 +14,8 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_DBUS_SERVICE_H_ -#define UPDATE_ENGINE_DBUS_SERVICE_H_ +#ifndef UPDATE_ENGINE_CROS_DBUS_SERVICE_H_ +#define UPDATE_ENGINE_CROS_DBUS_SERVICE_H_ #include @@ -27,9 +27,9 @@ #include #include -#include "update_engine/common_service.h" -#include "update_engine/service_observer_interface.h" -#include "update_engine/update_attempter.h" +#include "update_engine/common/service_observer_interface.h" +#include "update_engine/cros/common_service.h" +#include "update_engine/cros/update_attempter.h" #include "dbus_bindings/org.chromium.UpdateEngineInterface.h" @@ -191,4 +191,4 @@ class UpdateEngineAdaptor : public org::chromium::UpdateEngineInterfaceAdaptor, } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_DBUS_SERVICE_H_ +#endif // UPDATE_ENGINE_CROS_DBUS_SERVICE_H_ diff --git a/dbus_test_utils.h b/cros/dbus_test_utils.h similarity index 95% rename from dbus_test_utils.h rename to cros/dbus_test_utils.h index 72fd4e01..1116c528 100644 --- a/dbus_test_utils.h +++ b/cros/dbus_test_utils.h @@ -14,8 +14,8 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_DBUS_TEST_UTILS_H_ -#define UPDATE_ENGINE_DBUS_TEST_UTILS_H_ +#ifndef UPDATE_ENGINE_CROS_DBUS_TEST_UTILS_H_ +#define UPDATE_ENGINE_CROS_DBUS_TEST_UTILS_H_ #include #include @@ -88,4 +88,4 @@ ACTION_P(GrabCallbacks, mock_signal_handler) { } // namespace dbus_test_utils } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_DBUS_TEST_UTILS_H_ +#endif // UPDATE_ENGINE_CROS_DBUS_TEST_UTILS_H_ diff --git a/dlcservice_chromeos.cc b/cros/dlcservice_chromeos.cc similarity index 95% rename from dlcservice_chromeos.cc rename to cros/dlcservice_chromeos.cc index 08482ee5..e510c1d1 100644 --- a/dlcservice_chromeos.cc +++ b/cros/dlcservice_chromeos.cc @@ -14,14 +14,14 @@ // limitations under the License. // -#include "update_engine/dlcservice_chromeos.h" +#include "update_engine/cros/dlcservice_chromeos.h" #include #include // NOLINTNEXTLINE(build/include_alpha) "dbus-proxies.h" needs "dlcservice.pb.h" #include -#include "update_engine/dbus_connection.h" +#include "update_engine/cros/dbus_connection.h" using std::string; using std::vector; diff --git a/dlcservice_chromeos.h b/cros/dlcservice_chromeos.h similarity index 92% rename from dlcservice_chromeos.h rename to cros/dlcservice_chromeos.h index 8828e1af..3f11b126 100644 --- a/dlcservice_chromeos.h +++ b/cros/dlcservice_chromeos.h @@ -14,8 +14,8 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_DLCSERVICE_CHROMEOS_H_ -#define UPDATE_ENGINE_DLCSERVICE_CHROMEOS_H_ +#ifndef UPDATE_ENGINE_CROS_DLCSERVICE_CHROMEOS_H_ +#define UPDATE_ENGINE_CROS_DLCSERVICE_CHROMEOS_H_ #include #include @@ -52,4 +52,4 @@ class DlcServiceChromeOS : public DlcServiceInterface { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_DLCSERVICE_CHROMEOS_H_ +#endif // UPDATE_ENGINE_CROS_DLCSERVICE_CHROMEOS_H_ diff --git a/excluder_chromeos.cc b/cros/excluder_chromeos.cc similarity index 94% rename from excluder_chromeos.cc rename to cros/excluder_chromeos.cc index bfd6f046..47965254 100644 --- a/excluder_chromeos.cc +++ b/cros/excluder_chromeos.cc @@ -14,7 +14,7 @@ // limitations under the License. // -#include "update_engine/excluder_chromeos.h" +#include "update_engine/cros/excluder_chromeos.h" #include #include @@ -26,7 +26,7 @@ #include "update_engine/common/constants.h" #include "update_engine/common/prefs.h" -#include "update_engine/system_state.h" +#include "update_engine/common/system_state.h" using std::string; using std::vector; diff --git a/excluder_chromeos.h b/cros/excluder_chromeos.h similarity index 91% rename from excluder_chromeos.h rename to cros/excluder_chromeos.h index e4c1a529..2480066a 100644 --- a/excluder_chromeos.h +++ b/cros/excluder_chromeos.h @@ -14,8 +14,8 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_EXCLUDER_CHROMEOS_H_ -#define UPDATE_ENGINE_EXCLUDER_CHROMEOS_H_ +#ifndef UPDATE_ENGINE_CROS_EXCLUDER_CHROMEOS_H_ +#define UPDATE_ENGINE_CROS_EXCLUDER_CHROMEOS_H_ #include @@ -49,4 +49,4 @@ class ExcluderChromeOS : public ExcluderInterface { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_EXCLUDER_CHROMEOS_H_ +#endif // UPDATE_ENGINE_CROS_EXCLUDER_CHROMEOS_H_ diff --git a/excluder_chromeos_unittest.cc b/cros/excluder_chromeos_unittest.cc similarity index 97% rename from excluder_chromeos_unittest.cc rename to cros/excluder_chromeos_unittest.cc index dba77e47..3602e56b 100644 --- a/excluder_chromeos_unittest.cc +++ b/cros/excluder_chromeos_unittest.cc @@ -14,7 +14,7 @@ // limitations under the License. // -#include "update_engine/excluder_chromeos.h" +#include "update_engine/cros/excluder_chromeos.h" #include diff --git a/fake_p2p_manager.h b/cros/fake_p2p_manager.h similarity index 94% rename from fake_p2p_manager.h rename to cros/fake_p2p_manager.h index 1f8ae95f..1011b7ed 100644 --- a/fake_p2p_manager.h +++ b/cros/fake_p2p_manager.h @@ -14,12 +14,12 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_FAKE_P2P_MANAGER_H_ -#define UPDATE_ENGINE_FAKE_P2P_MANAGER_H_ +#ifndef UPDATE_ENGINE_CROS_FAKE_P2P_MANAGER_H_ +#define UPDATE_ENGINE_CROS_FAKE_P2P_MANAGER_H_ #include -#include "update_engine/p2p_manager.h" +#include "update_engine/cros/p2p_manager.h" namespace chromeos_update_engine { @@ -109,4 +109,4 @@ class FakeP2PManager : public P2PManager { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_FAKE_P2P_MANAGER_H_ +#endif // UPDATE_ENGINE_CROS_FAKE_P2P_MANAGER_H_ diff --git a/fake_p2p_manager_configuration.h b/cros/fake_p2p_manager_configuration.h similarity index 93% rename from fake_p2p_manager_configuration.h rename to cros/fake_p2p_manager_configuration.h index f5b0e80d..8d50ac85 100644 --- a/fake_p2p_manager_configuration.h +++ b/cros/fake_p2p_manager_configuration.h @@ -14,10 +14,10 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_FAKE_P2P_MANAGER_CONFIGURATION_H_ -#define UPDATE_ENGINE_FAKE_P2P_MANAGER_CONFIGURATION_H_ +#ifndef UPDATE_ENGINE_CROS_FAKE_P2P_MANAGER_CONFIGURATION_H_ +#define UPDATE_ENGINE_CROS_FAKE_P2P_MANAGER_CONFIGURATION_H_ -#include "update_engine/p2p_manager.h" +#include "update_engine/cros/p2p_manager.h" #include #include @@ -99,4 +99,4 @@ class FakeP2PManagerConfiguration : public P2PManager::Configuration { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_FAKE_P2P_MANAGER_CONFIGURATION_H_ +#endif // UPDATE_ENGINE_CROS_FAKE_P2P_MANAGER_CONFIGURATION_H_ diff --git a/fake_shill_proxy.cc b/cros/fake_shill_proxy.cc similarity index 97% rename from fake_shill_proxy.cc rename to cros/fake_shill_proxy.cc index de965119..2d05a6b1 100644 --- a/fake_shill_proxy.cc +++ b/cros/fake_shill_proxy.cc @@ -14,7 +14,7 @@ // limitations under the License. // -#include "update_engine/fake_shill_proxy.h" +#include "update_engine/cros/fake_shill_proxy.h" #include diff --git a/fake_shill_proxy.h b/cros/fake_shill_proxy.h similarity index 90% rename from fake_shill_proxy.h rename to cros/fake_shill_proxy.h index ae17eaa3..8c15a9dc 100644 --- a/fake_shill_proxy.h +++ b/cros/fake_shill_proxy.h @@ -14,8 +14,8 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_FAKE_SHILL_PROXY_H_ -#define UPDATE_ENGINE_FAKE_SHILL_PROXY_H_ +#ifndef UPDATE_ENGINE_CROS_FAKE_SHILL_PROXY_H_ +#define UPDATE_ENGINE_CROS_FAKE_SHILL_PROXY_H_ #include #include @@ -25,7 +25,7 @@ #include #include -#include "update_engine/shill_proxy_interface.h" +#include "update_engine/cros/shill_proxy_interface.h" namespace chromeos_update_engine { @@ -63,4 +63,4 @@ class FakeShillProxy : public ShillProxyInterface { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_FAKE_SHILL_PROXY_H_ +#endif // UPDATE_ENGINE_CROS_FAKE_SHILL_PROXY_H_ diff --git a/fake_system_state.cc b/cros/fake_system_state.cc similarity index 96% rename from fake_system_state.cc rename to cros/fake_system_state.cc index 1bfcafa1..9dfdc5ba 100644 --- a/fake_system_state.cc +++ b/cros/fake_system_state.cc @@ -14,7 +14,7 @@ // limitations under the License. // -#include "update_engine/fake_system_state.h" +#include "update_engine/cros/fake_system_state.h" namespace chromeos_update_engine { diff --git a/fake_system_state.h b/cros/fake_system_state.h similarity index 94% rename from fake_system_state.h rename to cros/fake_system_state.h index 24b1eec2..2f92b7c5 100644 --- a/fake_system_state.h +++ b/cros/fake_system_state.h @@ -14,8 +14,8 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_FAKE_SYSTEM_STATE_H_ -#define UPDATE_ENGINE_FAKE_SYSTEM_STATE_H_ +#ifndef UPDATE_ENGINE_CROS_FAKE_SYSTEM_STATE_H_ +#define UPDATE_ENGINE_CROS_FAKE_SYSTEM_STATE_H_ #include #include @@ -25,15 +25,15 @@ #include "update_engine/common/fake_boot_control.h" #include "update_engine/common/fake_clock.h" #include "update_engine/common/fake_hardware.h" +#include "update_engine/common/mock_metrics_reporter.h" #include "update_engine/common/mock_prefs.h" -#include "update_engine/mock_connection_manager.h" -#include "update_engine/mock_metrics_reporter.h" -#include "update_engine/mock_omaha_request_params.h" -#include "update_engine/mock_p2p_manager.h" -#include "update_engine/mock_payload_state.h" -#include "update_engine/mock_power_manager.h" -#include "update_engine/mock_update_attempter.h" -#include "update_engine/system_state.h" +#include "update_engine/common/system_state.h" +#include "update_engine/cros/mock_connection_manager.h" +#include "update_engine/cros/mock_omaha_request_params.h" +#include "update_engine/cros/mock_p2p_manager.h" +#include "update_engine/cros/mock_payload_state.h" +#include "update_engine/cros/mock_power_manager.h" +#include "update_engine/cros/mock_update_attempter.h" #include "update_engine/update_manager/fake_update_manager.h" namespace chromeos_update_engine { @@ -278,4 +278,4 @@ class FakeSystemState : public SystemState { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_FAKE_SYSTEM_STATE_H_ +#endif // UPDATE_ENGINE_CROS_FAKE_SYSTEM_STATE_H_ diff --git a/hardware_chromeos.cc b/cros/hardware_chromeos.cc similarity index 98% rename from hardware_chromeos.cc rename to cros/hardware_chromeos.cc index dbb99dba..b9018ffb 100644 --- a/hardware_chromeos.cc +++ b/cros/hardware_chromeos.cc @@ -14,7 +14,7 @@ // limitations under the License. // -#include "update_engine/hardware_chromeos.h" +#include "update_engine/cros/hardware_chromeos.h" #include @@ -37,9 +37,9 @@ extern "C" { #include "update_engine/common/platform_constants.h" #include "update_engine/common/subprocess.h" #include "update_engine/common/utils.h" -#include "update_engine/dbus_connection.h" +#include "update_engine/cros/dbus_connection.h" #if USE_CFM -#include "update_engine/requisition_util.h" +#include "update_engine/cros/requisition_util.h" #endif using std::string; diff --git a/hardware_chromeos.h b/cros/hardware_chromeos.h similarity index 95% rename from hardware_chromeos.h rename to cros/hardware_chromeos.h index 9ee62f68..de84d783 100644 --- a/hardware_chromeos.h +++ b/cros/hardware_chromeos.h @@ -14,8 +14,8 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_HARDWARE_CHROMEOS_H_ -#define UPDATE_ENGINE_HARDWARE_CHROMEOS_H_ +#ifndef UPDATE_ENGINE_CROS_HARDWARE_CHROMEOS_H_ +#define UPDATE_ENGINE_CROS_HARDWARE_CHROMEOS_H_ #include #include @@ -85,4 +85,4 @@ class HardwareChromeOS final : public HardwareInterface { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_HARDWARE_CHROMEOS_H_ +#endif // UPDATE_ENGINE_CROS_HARDWARE_CHROMEOS_H_ diff --git a/hardware_chromeos_unittest.cc b/cros/hardware_chromeos_unittest.cc similarity index 98% rename from hardware_chromeos_unittest.cc rename to cros/hardware_chromeos_unittest.cc index 162dec4b..50bced6e 100644 --- a/hardware_chromeos_unittest.cc +++ b/cros/hardware_chromeos_unittest.cc @@ -14,7 +14,7 @@ // limitations under the License. // -#include "update_engine/hardware_chromeos.h" +#include "update_engine/cros/hardware_chromeos.h" #include diff --git a/image_properties.h b/cros/image_properties.h similarity index 96% rename from image_properties.h rename to cros/image_properties.h index 0887ca8a..4957d12d 100644 --- a/image_properties.h +++ b/cros/image_properties.h @@ -18,8 +18,8 @@ // properties are meant to be constant during the life of this daemon, but can // be modified in dev-move or non-official builds. -#ifndef UPDATE_ENGINE_IMAGE_PROPERTIES_H_ -#define UPDATE_ENGINE_IMAGE_PROPERTIES_H_ +#ifndef UPDATE_ENGINE_CROS_IMAGE_PROPERTIES_H_ +#define UPDATE_ENGINE_CROS_IMAGE_PROPERTIES_H_ #include @@ -99,4 +99,4 @@ void SetImagePropertiesRootPrefix(const char* test_root_prefix); } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_IMAGE_PROPERTIES_H_ +#endif // UPDATE_ENGINE_CROS_IMAGE_PROPERTIES_H_ diff --git a/image_properties_chromeos.cc b/cros/image_properties_chromeos.cc similarity index 98% rename from image_properties_chromeos.cc rename to cros/image_properties_chromeos.cc index 5ab8f052..c22da7cd 100644 --- a/image_properties_chromeos.cc +++ b/cros/image_properties_chromeos.cc @@ -14,7 +14,7 @@ // limitations under the License. // -#include "update_engine/image_properties.h" +#include "update_engine/cros/image_properties.h" #include #include @@ -26,8 +26,8 @@ #include "update_engine/common/constants.h" #include "update_engine/common/hardware_interface.h" #include "update_engine/common/platform_constants.h" +#include "update_engine/common/system_state.h" #include "update_engine/common/utils.h" -#include "update_engine/system_state.h" namespace { diff --git a/image_properties_chromeos_unittest.cc b/cros/image_properties_chromeos_unittest.cc similarity index 98% rename from image_properties_chromeos_unittest.cc rename to cros/image_properties_chromeos_unittest.cc index d9ed6883..4822995e 100644 --- a/image_properties_chromeos_unittest.cc +++ b/cros/image_properties_chromeos_unittest.cc @@ -14,7 +14,7 @@ // limitations under the License. // -#include "update_engine/image_properties.h" +#include "update_engine/cros/image_properties.h" #include @@ -24,7 +24,7 @@ #include "update_engine/common/constants.h" #include "update_engine/common/test_utils.h" -#include "update_engine/fake_system_state.h" +#include "update_engine/cros/fake_system_state.h" using chromeos_update_engine::test_utils::WriteFileString; using std::string; diff --git a/logging.cc b/cros/logging.cc similarity index 98% rename from logging.cc rename to cros/logging.cc index 012feee0..e09166ca 100644 --- a/logging.cc +++ b/cros/logging.cc @@ -25,8 +25,8 @@ #include #include +#include "update_engine/common/logging.h" #include "update_engine/common/utils.h" -#include "update_engine/logging.h" using std::string; diff --git a/metrics_reporter_omaha.cc b/cros/metrics_reporter_omaha.cc similarity index 92% rename from metrics_reporter_omaha.cc rename to cros/metrics_reporter_omaha.cc index 0cf0e59b..2cc0de5f 100644 --- a/metrics_reporter_omaha.cc +++ b/cros/metrics_reporter_omaha.cc @@ -14,7 +14,7 @@ // limitations under the License. // -#include "update_engine/metrics_reporter_omaha.h" +#include "update_engine/cros/metrics_reporter_omaha.h" #include @@ -25,15 +25,16 @@ #include "update_engine/common/clock_interface.h" #include "update_engine/common/constants.h" #include "update_engine/common/prefs_interface.h" +#include "update_engine/common/system_state.h" #include "update_engine/common/utils.h" +#include "update_engine/cros/omaha_request_params.h" #include "update_engine/metrics_utils.h" -#include "update_engine/omaha_request_params.h" -#include "update_engine/system_state.h" +using base::Time; +using base::TimeDelta; using std::string; namespace chromeos_update_engine { - namespace metrics { // UpdateEngine.Daily.* metrics. @@ -181,10 +182,9 @@ void MetricsReporterOmaha::ReportUpdateCheckMetrics( } base::TimeDelta time_since_last; - if (metrics_utils::WallclockDurationHelper( - system_state, - kPrefsMetricsCheckLastReportingTime, - &time_since_last)) { + if (WallclockDurationHelper(system_state, + kPrefsMetricsCheckLastReportingTime, + &time_since_last)) { metric = metrics::kMetricCheckTimeSinceLastCheckMinutes; metrics_lib_->SendToUMA(metric, time_since_last.InMinutes(), @@ -195,7 +195,7 @@ void MetricsReporterOmaha::ReportUpdateCheckMetrics( base::TimeDelta uptime_since_last; static int64_t uptime_since_last_storage = 0; - if (metrics_utils::MonotonicDurationHelper( + if (MonotonicDurationHelper( system_state, &uptime_since_last_storage, &uptime_since_last)) { metric = metrics::kMetricCheckTimeSinceLastCheckUptimeMinutes; metrics_lib_->SendToUMA(metric, @@ -284,10 +284,9 @@ void MetricsReporterOmaha::ReportUpdateAttemptMetrics( } base::TimeDelta time_since_last; - if (metrics_utils::WallclockDurationHelper( - system_state, - kPrefsMetricsAttemptLastReportingTime, - &time_since_last)) { + if (WallclockDurationHelper(system_state, + kPrefsMetricsAttemptLastReportingTime, + &time_since_last)) { metric = metrics::kMetricAttemptTimeSinceLastAttemptMinutes; metrics_lib_->SendToUMA(metric, time_since_last.InMinutes(), @@ -298,7 +297,7 @@ void MetricsReporterOmaha::ReportUpdateAttemptMetrics( static int64_t uptime_since_last_storage = 0; base::TimeDelta uptime_since_last; - if (metrics_utils::MonotonicDurationHelper( + if (MonotonicDurationHelper( system_state, &uptime_since_last_storage, &uptime_since_last)) { metric = metrics::kMetricAttemptTimeSinceLastAttemptUptimeMinutes; metrics_lib_->SendToUMA(metric, @@ -557,4 +556,45 @@ void MetricsReporterOmaha::ReportEnterpriseUpdateSeenToDownloadDays( 50); // num_buckets } +bool MetricsReporterOmaha::WallclockDurationHelper( + SystemState* system_state, + const std::string& state_variable_key, + TimeDelta* out_duration) { + bool ret = false; + Time now = system_state->clock()->GetWallclockTime(); + int64_t stored_value; + if (system_state->prefs()->GetInt64(state_variable_key, &stored_value)) { + Time stored_time = Time::FromInternalValue(stored_value); + if (stored_time > now) { + LOG(ERROR) << "Stored time-stamp used for " << state_variable_key + << " is in the future."; + } else { + *out_duration = now - stored_time; + ret = true; + } + } + + if (!system_state->prefs()->SetInt64(state_variable_key, + now.ToInternalValue())) { + LOG(ERROR) << "Error storing time-stamp in " << state_variable_key; + } + + return ret; +} + +bool MetricsReporterOmaha::MonotonicDurationHelper(SystemState* system_state, + int64_t* storage, + TimeDelta* out_duration) { + bool ret = false; + Time now = system_state->clock()->GetMonotonicTime(); + if (*storage != 0) { + Time stored_time = Time::FromInternalValue(*storage); + *out_duration = now - stored_time; + ret = true; + } + *storage = now.ToInternalValue(); + + return ret; +} + } // namespace chromeos_update_engine diff --git a/metrics_reporter_omaha.h b/cros/metrics_reporter_omaha.h similarity index 80% rename from metrics_reporter_omaha.h rename to cros/metrics_reporter_omaha.h index c84ac1ec..5b3fdb16 100644 --- a/metrics_reporter_omaha.h +++ b/cros/metrics_reporter_omaha.h @@ -14,21 +14,22 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_METRICS_REPORTER_OMAHA_H_ -#define UPDATE_ENGINE_METRICS_REPORTER_OMAHA_H_ +#ifndef UPDATE_ENGINE_CROS_METRICS_REPORTER_OMAHA_H_ +#define UPDATE_ENGINE_CROS_METRICS_REPORTER_OMAHA_H_ #include #include #include +#include // for FRIEND_TEST #include #include "update_engine/certificate_checker.h" #include "update_engine/common/constants.h" #include "update_engine/common/error_code.h" -#include "update_engine/metrics_constants.h" -#include "update_engine/metrics_reporter_interface.h" -#include "update_engine/system_state.h" +#include "update_engine/common/metrics_constants.h" +#include "update_engine/common/metrics_reporter_interface.h" +#include "update_engine/common/system_state.h" namespace chromeos_update_engine { @@ -171,6 +172,31 @@ class MetricsReporterOmaha : public MetricsReporterInterface { private: friend class MetricsReporterOmahaTest; + FRIEND_TEST(MetricsReporterOmahaTest, WallclockDurationHelper); + FRIEND_TEST(MetricsReporterOmahaTest, MonotonicDurationHelper); + + // This function returns the duration on the wallclock since the last + // time it was called for the same |state_variable_key| value. + // + // If the function returns |true|, the duration (always non-negative) + // is returned in |out_duration|. If the function returns |false| + // something went wrong or there was no previous measurement. + bool WallclockDurationHelper(SystemState* system_state, + const std::string& state_variable_key, + base::TimeDelta* out_duration); + + // This function returns the duration on the monotonic clock since the + // last time it was called for the same |storage| pointer. + // + // You should pass a pointer to a 64-bit integer in |storage| which + // should be initialized to 0. + // + // If the function returns |true|, the duration (always non-negative) + // is returned in |out_duration|. If the function returns |false| + // something went wrong or there was no previous measurement. + bool MonotonicDurationHelper(SystemState* system_state, + int64_t* storage, + base::TimeDelta* out_duration); std::unique_ptr metrics_lib_; @@ -179,4 +205,4 @@ class MetricsReporterOmaha : public MetricsReporterInterface { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_METRICS_REPORTER_OMAHA_H_ +#endif // UPDATE_ENGINE_CROS_METRICS_REPORTER_OMAHA_H_ diff --git a/metrics_reporter_omaha_unittest.cc b/cros/metrics_reporter_omaha_unittest.cc similarity index 80% rename from metrics_reporter_omaha_unittest.cc rename to cros/metrics_reporter_omaha_unittest.cc index 545d02f4..a25472a3 100644 --- a/metrics_reporter_omaha_unittest.cc +++ b/cros/metrics_reporter_omaha_unittest.cc @@ -14,7 +14,7 @@ // limitations under the License. // -#include "update_engine/metrics_reporter_omaha.h" +#include "update_engine/cros/metrics_reporter_omaha.h" #include #include @@ -26,7 +26,7 @@ #include "update_engine/common/fake_clock.h" #include "update_engine/common/fake_prefs.h" -#include "update_engine/fake_system_state.h" +#include "update_engine/cros/fake_system_state.h" using base::TimeDelta; using testing::_; @@ -538,4 +538,115 @@ TEST_F(MetricsReporterOmahaTest, true /* has_time_restriction_policy */, kDaysToUpdate); } +TEST_F(MetricsReporterOmahaTest, WallclockDurationHelper) { + FakeSystemState fake_system_state; + FakeClock fake_clock; + base::TimeDelta duration; + const std::string state_variable_key = "test-prefs"; + FakePrefs fake_prefs; + + fake_system_state.set_clock(&fake_clock); + fake_system_state.set_prefs(&fake_prefs); + + // Initialize wallclock to 1 sec. + fake_clock.SetWallclockTime(base::Time::FromInternalValue(1000000)); + + // First time called so no previous measurement available. + EXPECT_FALSE(reporter_.WallclockDurationHelper( + &fake_system_state, state_variable_key, &duration)); + + // Next time, we should get zero since the clock didn't advance. + EXPECT_TRUE(reporter_.WallclockDurationHelper( + &fake_system_state, state_variable_key, &duration)); + EXPECT_EQ(duration.InSeconds(), 0); + + // We can also call it as many times as we want with it being + // considered a failure. + EXPECT_TRUE(reporter_.WallclockDurationHelper( + &fake_system_state, state_variable_key, &duration)); + EXPECT_EQ(duration.InSeconds(), 0); + EXPECT_TRUE(reporter_.WallclockDurationHelper( + &fake_system_state, state_variable_key, &duration)); + EXPECT_EQ(duration.InSeconds(), 0); + + // Advance the clock one second, then we should get 1 sec on the + // next call and 0 sec on the subsequent call. + fake_clock.SetWallclockTime(base::Time::FromInternalValue(2000000)); + EXPECT_TRUE(reporter_.WallclockDurationHelper( + &fake_system_state, state_variable_key, &duration)); + EXPECT_EQ(duration.InSeconds(), 1); + EXPECT_TRUE(reporter_.WallclockDurationHelper( + &fake_system_state, state_variable_key, &duration)); + EXPECT_EQ(duration.InSeconds(), 0); + + // Advance clock two seconds and we should get 2 sec and then 0 sec. + fake_clock.SetWallclockTime(base::Time::FromInternalValue(4000000)); + EXPECT_TRUE(reporter_.WallclockDurationHelper( + &fake_system_state, state_variable_key, &duration)); + EXPECT_EQ(duration.InSeconds(), 2); + EXPECT_TRUE(reporter_.WallclockDurationHelper( + &fake_system_state, state_variable_key, &duration)); + EXPECT_EQ(duration.InSeconds(), 0); + + // There's a possibility that the wallclock can go backwards (NTP + // adjustments, for example) so check that we properly handle this + // case. + fake_clock.SetWallclockTime(base::Time::FromInternalValue(3000000)); + EXPECT_FALSE(reporter_.WallclockDurationHelper( + &fake_system_state, state_variable_key, &duration)); + fake_clock.SetWallclockTime(base::Time::FromInternalValue(4000000)); + EXPECT_TRUE(reporter_.WallclockDurationHelper( + &fake_system_state, state_variable_key, &duration)); + EXPECT_EQ(duration.InSeconds(), 1); +} + +TEST_F(MetricsReporterOmahaTest, MonotonicDurationHelper) { + int64_t storage = 0; + FakeSystemState fake_system_state; + FakeClock fake_clock; + base::TimeDelta duration; + + fake_system_state.set_clock(&fake_clock); + + // Initialize monotonic clock to 1 sec. + fake_clock.SetMonotonicTime(base::Time::FromInternalValue(1000000)); + + // First time called so no previous measurement available. + EXPECT_FALSE(reporter_.MonotonicDurationHelper( + &fake_system_state, &storage, &duration)); + + // Next time, we should get zero since the clock didn't advance. + EXPECT_TRUE(reporter_.MonotonicDurationHelper( + &fake_system_state, &storage, &duration)); + EXPECT_EQ(duration.InSeconds(), 0); + + // We can also call it as many times as we want with it being + // considered a failure. + EXPECT_TRUE(reporter_.MonotonicDurationHelper( + &fake_system_state, &storage, &duration)); + EXPECT_EQ(duration.InSeconds(), 0); + EXPECT_TRUE(reporter_.MonotonicDurationHelper( + &fake_system_state, &storage, &duration)); + EXPECT_EQ(duration.InSeconds(), 0); + + // Advance the clock one second, then we should get 1 sec on the + // next call and 0 sec on the subsequent call. + fake_clock.SetMonotonicTime(base::Time::FromInternalValue(2000000)); + EXPECT_TRUE(reporter_.MonotonicDurationHelper( + &fake_system_state, &storage, &duration)); + EXPECT_EQ(duration.InSeconds(), 1); + EXPECT_TRUE(reporter_.MonotonicDurationHelper( + &fake_system_state, &storage, &duration)); + EXPECT_EQ(duration.InSeconds(), 0); + + // Advance clock two seconds and we should get 2 sec and then 0 sec. + fake_clock.SetMonotonicTime(base::Time::FromInternalValue(4000000)); + EXPECT_TRUE(reporter_.MonotonicDurationHelper( + &fake_system_state, &storage, &duration)); + EXPECT_EQ(duration.InSeconds(), 2); + EXPECT_TRUE(reporter_.MonotonicDurationHelper( + &fake_system_state, &storage, &duration)); + EXPECT_EQ(duration.InSeconds(), 0); +} + } // namespace chromeos_update_engine diff --git a/mock_connection_manager.h b/cros/mock_connection_manager.h similarity index 85% rename from mock_connection_manager.h rename to cros/mock_connection_manager.h index 2fff68c6..899a49b1 100644 --- a/mock_connection_manager.h +++ b/cros/mock_connection_manager.h @@ -14,12 +14,12 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_MOCK_CONNECTION_MANAGER_H_ -#define UPDATE_ENGINE_MOCK_CONNECTION_MANAGER_H_ +#ifndef UPDATE_ENGINE_CROS_MOCK_CONNECTION_MANAGER_H_ +#define UPDATE_ENGINE_CROS_MOCK_CONNECTION_MANAGER_H_ #include -#include "update_engine/connection_manager_interface.h" +#include "update_engine/cros/connection_manager_interface.h" namespace chromeos_update_engine { @@ -41,4 +41,4 @@ class MockConnectionManager : public ConnectionManagerInterface { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_MOCK_CONNECTION_MANAGER_H_ +#endif // UPDATE_ENGINE_CROS_MOCK_CONNECTION_MANAGER_H_ diff --git a/mock_omaha_request_params.h b/cros/mock_omaha_request_params.h similarity index 92% rename from mock_omaha_request_params.h rename to cros/mock_omaha_request_params.h index 41bdc195..6072d224 100644 --- a/mock_omaha_request_params.h +++ b/cros/mock_omaha_request_params.h @@ -14,14 +14,14 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_MOCK_OMAHA_REQUEST_PARAMS_H_ -#define UPDATE_ENGINE_MOCK_OMAHA_REQUEST_PARAMS_H_ +#ifndef UPDATE_ENGINE_CROS_MOCK_OMAHA_REQUEST_PARAMS_H_ +#define UPDATE_ENGINE_CROS_MOCK_OMAHA_REQUEST_PARAMS_H_ #include #include -#include "update_engine/omaha_request_params.h" +#include "update_engine/cros/omaha_request_params.h" namespace chromeos_update_engine { @@ -79,4 +79,4 @@ class MockOmahaRequestParams : public OmahaRequestParams { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_MOCK_OMAHA_REQUEST_PARAMS_H_ +#endif // UPDATE_ENGINE_CROS_MOCK_OMAHA_REQUEST_PARAMS_H_ diff --git a/mock_p2p_manager.h b/cros/mock_p2p_manager.h similarity index 95% rename from mock_p2p_manager.h rename to cros/mock_p2p_manager.h index fd670345..273f7f9c 100644 --- a/mock_p2p_manager.h +++ b/cros/mock_p2p_manager.h @@ -14,12 +14,12 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_MOCK_P2P_MANAGER_H_ -#define UPDATE_ENGINE_MOCK_P2P_MANAGER_H_ +#ifndef UPDATE_ENGINE_CROS_MOCK_P2P_MANAGER_H_ +#define UPDATE_ENGINE_CROS_MOCK_P2P_MANAGER_H_ #include -#include "update_engine/fake_p2p_manager.h" +#include "update_engine/cros/fake_p2p_manager.h" #include @@ -99,4 +99,4 @@ class MockP2PManager : public P2PManager { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_MOCK_P2P_MANAGER_H_ +#endif // UPDATE_ENGINE_CROS_MOCK_P2P_MANAGER_H_ diff --git a/mock_payload_state.h b/cros/mock_payload_state.h similarity index 92% rename from mock_payload_state.h rename to cros/mock_payload_state.h index ad22de52..56094e64 100644 --- a/mock_payload_state.h +++ b/cros/mock_payload_state.h @@ -14,15 +14,15 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_MOCK_PAYLOAD_STATE_H_ -#define UPDATE_ENGINE_MOCK_PAYLOAD_STATE_H_ +#ifndef UPDATE_ENGINE_CROS_MOCK_PAYLOAD_STATE_H_ +#define UPDATE_ENGINE_CROS_MOCK_PAYLOAD_STATE_H_ #include #include -#include "update_engine/omaha_request_action.h" -#include "update_engine/payload_state_interface.h" +#include "update_engine/common/system_state.h" +#include "update_engine/cros/payload_state_interface.h" namespace chromeos_update_engine { @@ -81,4 +81,4 @@ class MockPayloadState : public PayloadStateInterface { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_MOCK_PAYLOAD_STATE_H_ +#endif // UPDATE_ENGINE_CROS_MOCK_PAYLOAD_STATE_H_ diff --git a/mock_power_manager.h b/cros/mock_power_manager.h similarity index 80% rename from mock_power_manager.h rename to cros/mock_power_manager.h index 8363171d..d4a86820 100644 --- a/mock_power_manager.h +++ b/cros/mock_power_manager.h @@ -14,12 +14,12 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_MOCK_POWER_MANAGER_H_ -#define UPDATE_ENGINE_MOCK_POWER_MANAGER_H_ +#ifndef UPDATE_ENGINE_CROS_MOCK_POWER_MANAGER_H_ +#define UPDATE_ENGINE_CROS_MOCK_POWER_MANAGER_H_ #include -#include "update_engine/power_manager_interface.h" +#include "update_engine/cros/power_manager_interface.h" namespace chromeos_update_engine { @@ -32,4 +32,4 @@ class MockPowerManager : public PowerManagerInterface { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_MOCK_POWER_MANAGER_H_ +#endif // UPDATE_ENGINE_CROS_MOCK_POWER_MANAGER_H_ diff --git a/mock_update_attempter.h b/cros/mock_update_attempter.h similarity index 90% rename from mock_update_attempter.h rename to cros/mock_update_attempter.h index 96d93fd5..be8cfccc 100644 --- a/mock_update_attempter.h +++ b/cros/mock_update_attempter.h @@ -14,13 +14,13 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_MOCK_UPDATE_ATTEMPTER_H_ -#define UPDATE_ENGINE_MOCK_UPDATE_ATTEMPTER_H_ +#ifndef UPDATE_ENGINE_CROS_MOCK_UPDATE_ATTEMPTER_H_ +#define UPDATE_ENGINE_CROS_MOCK_UPDATE_ATTEMPTER_H_ #include #include -#include "update_engine/update_attempter.h" +#include "update_engine/cros/update_attempter.h" #include @@ -65,4 +65,4 @@ class MockUpdateAttempter : public UpdateAttempter { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_MOCK_UPDATE_ATTEMPTER_H_ +#endif // UPDATE_ENGINE_CROS_MOCK_UPDATE_ATTEMPTER_H_ diff --git a/omaha_request_action.cc b/cros/omaha_request_action.cc similarity index 99% rename from omaha_request_action.cc rename to cros/omaha_request_action.cc index 4d236152..0916f9d3 100644 --- a/omaha_request_action.cc +++ b/cros/omaha_request_action.cc @@ -14,7 +14,7 @@ // limitations under the License. // -#include "update_engine/omaha_request_action.h" +#include "update_engine/cros/omaha_request_action.h" #include @@ -44,18 +44,18 @@ #include "update_engine/common/constants.h" #include "update_engine/common/hardware_interface.h" #include "update_engine/common/hash_calculator.h" +#include "update_engine/common/metrics_reporter_interface.h" #include "update_engine/common/platform_constants.h" #include "update_engine/common/prefs.h" #include "update_engine/common/prefs_interface.h" #include "update_engine/common/utils.h" -#include "update_engine/connection_manager_interface.h" -#include "update_engine/metrics_reporter_interface.h" +#include "update_engine/cros/connection_manager_interface.h" +#include "update_engine/cros/omaha_request_builder_xml.h" +#include "update_engine/cros/omaha_request_params.h" +#include "update_engine/cros/p2p_manager.h" +#include "update_engine/cros/payload_state_interface.h" +#include "update_engine/cros/update_attempter.h" #include "update_engine/metrics_utils.h" -#include "update_engine/omaha_request_builder_xml.h" -#include "update_engine/omaha_request_params.h" -#include "update_engine/p2p_manager.h" -#include "update_engine/payload_state_interface.h" -#include "update_engine/update_attempter.h" using base::Optional; using base::Time; diff --git a/omaha_request_action.h b/cros/omaha_request_action.h similarity index 97% rename from omaha_request_action.h rename to cros/omaha_request_action.h index 30b3d227..1a3a912f 100644 --- a/omaha_request_action.h +++ b/cros/omaha_request_action.h @@ -14,8 +14,8 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_OMAHA_REQUEST_ACTION_H_ -#define UPDATE_ENGINE_OMAHA_REQUEST_ACTION_H_ +#ifndef UPDATE_ENGINE_CROS_OMAHA_REQUEST_ACTION_H_ +#define UPDATE_ENGINE_CROS_OMAHA_REQUEST_ACTION_H_ #include #include @@ -33,9 +33,9 @@ #include "update_engine/common/action.h" #include "update_engine/common/http_fetcher.h" -#include "update_engine/omaha_request_builder_xml.h" -#include "update_engine/omaha_response.h" -#include "update_engine/system_state.h" +#include "update_engine/common/system_state.h" +#include "update_engine/cros/omaha_request_builder_xml.h" +#include "update_engine/cros/omaha_response.h" // The Omaha Request action makes a request to Omaha and can output // the response on the output ActionPipe. @@ -317,4 +317,4 @@ class OmahaRequestAction : public Action, } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_OMAHA_REQUEST_ACTION_H_ +#endif // UPDATE_ENGINE_CROS_OMAHA_REQUEST_ACTION_H_ diff --git a/omaha_request_action_fuzzer.cc b/cros/omaha_request_action_fuzzer.cc similarity index 94% rename from omaha_request_action_fuzzer.cc rename to cros/omaha_request_action_fuzzer.cc index 6c41b121..dd024670 100644 --- a/omaha_request_action_fuzzer.cc +++ b/cros/omaha_request_action_fuzzer.cc @@ -18,8 +18,8 @@ #include "update_engine/common/mock_http_fetcher.h" #include "update_engine/common/test_utils.h" -#include "update_engine/fake_system_state.h" -#include "update_engine/omaha_request_action.h" +#include "update_engine/cros/fake_system_state.h" +#include "update_engine/cros/omaha_request_action.h" class Environment { public: diff --git a/omaha_request_action_unittest.cc b/cros/omaha_request_action_unittest.cc similarity index 99% rename from omaha_request_action_unittest.cc rename to cros/omaha_request_action_unittest.cc index 43b20c1d..c3842b85 100644 --- a/omaha_request_action_unittest.cc +++ b/cros/omaha_request_action_unittest.cc @@ -14,7 +14,7 @@ // limitations under the License. // -#include "update_engine/omaha_request_action.h" +#include "update_engine/cros/omaha_request_action.h" #include @@ -44,18 +44,18 @@ #include "update_engine/common/constants.h" #include "update_engine/common/fake_prefs.h" #include "update_engine/common/hash_calculator.h" +#include "update_engine/common/metrics_reporter_interface.h" #include "update_engine/common/mock_excluder.h" #include "update_engine/common/mock_http_fetcher.h" #include "update_engine/common/platform_constants.h" #include "update_engine/common/prefs.h" #include "update_engine/common/test_utils.h" -#include "update_engine/fake_system_state.h" -#include "update_engine/metrics_reporter_interface.h" -#include "update_engine/mock_connection_manager.h" -#include "update_engine/mock_payload_state.h" -#include "update_engine/omaha_request_builder_xml.h" -#include "update_engine/omaha_request_params.h" -#include "update_engine/omaha_utils.h" +#include "update_engine/cros/fake_system_state.h" +#include "update_engine/cros/mock_connection_manager.h" +#include "update_engine/cros/mock_payload_state.h" +#include "update_engine/cros/omaha_request_builder_xml.h" +#include "update_engine/cros/omaha_request_params.h" +#include "update_engine/cros/omaha_utils.h" #include "update_engine/update_manager/rollback_prefs.h" using base::Time; diff --git a/omaha_request_builder_xml.cc b/cros/omaha_request_builder_xml.cc similarity index 99% rename from omaha_request_builder_xml.cc rename to cros/omaha_request_builder_xml.cc index c8758ab9..43ee5486 100644 --- a/omaha_request_builder_xml.cc +++ b/cros/omaha_request_builder_xml.cc @@ -14,7 +14,7 @@ // limitations under the License. // -#include "update_engine/omaha_request_builder_xml.h" +#include "update_engine/cros/omaha_request_builder_xml.h" #include @@ -30,7 +30,7 @@ #include "update_engine/common/constants.h" #include "update_engine/common/prefs_interface.h" #include "update_engine/common/utils.h" -#include "update_engine/omaha_request_params.h" +#include "update_engine/cros/omaha_request_params.h" using std::string; diff --git a/omaha_request_builder_xml.h b/cros/omaha_request_builder_xml.h similarity index 95% rename from omaha_request_builder_xml.h rename to cros/omaha_request_builder_xml.h index 50c708da..4f860dd2 100644 --- a/omaha_request_builder_xml.h +++ b/cros/omaha_request_builder_xml.h @@ -14,8 +14,8 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_OMAHA_REQUEST_BUILDER_XML_H_ -#define UPDATE_ENGINE_OMAHA_REQUEST_BUILDER_XML_H_ +#ifndef UPDATE_ENGINE_CROS_OMAHA_REQUEST_BUILDER_XML_H_ +#define UPDATE_ENGINE_CROS_OMAHA_REQUEST_BUILDER_XML_H_ #include #include @@ -33,9 +33,9 @@ #include "update_engine/common/action.h" #include "update_engine/common/http_fetcher.h" -#include "update_engine/omaha_request_params.h" -#include "update_engine/omaha_response.h" -#include "update_engine/system_state.h" +#include "update_engine/common/system_state.h" +#include "update_engine/cros/omaha_request_params.h" +#include "update_engine/cros/omaha_response.h" namespace chromeos_update_engine { @@ -196,4 +196,4 @@ class OmahaRequestBuilderXml : OmahaRequestBuilder { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_OMAHA_REQUEST_BUILDER_XML_H_ +#endif // UPDATE_ENGINE_CROS_OMAHA_REQUEST_BUILDER_XML_H_ diff --git a/omaha_request_builder_xml_unittest.cc b/cros/omaha_request_builder_xml_unittest.cc similarity index 99% rename from omaha_request_builder_xml_unittest.cc rename to cros/omaha_request_builder_xml_unittest.cc index 042d9919..11d808bb 100644 --- a/omaha_request_builder_xml_unittest.cc +++ b/cros/omaha_request_builder_xml_unittest.cc @@ -14,7 +14,7 @@ // limitations under the License. // -#include "update_engine/omaha_request_builder_xml.h" +#include "update_engine/cros/omaha_request_builder_xml.h" #include #include @@ -23,7 +23,7 @@ #include #include -#include "update_engine/fake_system_state.h" +#include "update_engine/cros/fake_system_state.h" using std::pair; using std::string; diff --git a/omaha_request_params.cc b/cros/omaha_request_params.cc similarity index 99% rename from omaha_request_params.cc rename to cros/omaha_request_params.cc index ce6fd27d..c814e00f 100644 --- a/omaha_request_params.cc +++ b/cros/omaha_request_params.cc @@ -14,7 +14,7 @@ // limitations under the License. // -#include "update_engine/omaha_request_params.h" +#include "update_engine/cros/omaha_request_params.h" #include #include @@ -35,8 +35,8 @@ #include "update_engine/common/constants.h" #include "update_engine/common/hardware_interface.h" #include "update_engine/common/platform_constants.h" +#include "update_engine/common/system_state.h" #include "update_engine/common/utils.h" -#include "update_engine/system_state.h" #include "update_engine/update_manager/policy.h" #define CALL_MEMBER_FN(object, member) ((object).*(member)) diff --git a/omaha_request_params.h b/cros/omaha_request_params.h similarity index 98% rename from omaha_request_params.h rename to cros/omaha_request_params.h index 1bf7ae7a..26ea1c9a 100644 --- a/omaha_request_params.h +++ b/cros/omaha_request_params.h @@ -14,8 +14,8 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_OMAHA_REQUEST_PARAMS_H_ -#define UPDATE_ENGINE_OMAHA_REQUEST_PARAMS_H_ +#ifndef UPDATE_ENGINE_CROS_OMAHA_REQUEST_PARAMS_H_ +#define UPDATE_ENGINE_CROS_OMAHA_REQUEST_PARAMS_H_ #include @@ -29,7 +29,7 @@ #include "update_engine/common/constants.h" #include "update_engine/common/platform_constants.h" -#include "update_engine/image_properties.h" +#include "update_engine/cros/image_properties.h" #include "update_engine/update_manager/policy.h" // This gathers local system information and prepares info used by the @@ -359,7 +359,7 @@ class OmahaRequestParams { // The value defining the parameters of the LTS (Long Term Support). std::string lts_tag_; - std::string hwid_; // Hardware Qualification ID of the client + std::string hwid_; // Hardware Qualification ID of the client // TODO(b:133324571) tracks removal of this field once it is no longer // needed in AU requests. Remove by October 1st 2019. std::string device_requisition_; // Chrome OS Requisition type. @@ -417,4 +417,4 @@ class OmahaRequestParams { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_OMAHA_REQUEST_PARAMS_H_ +#endif // UPDATE_ENGINE_CROS_OMAHA_REQUEST_PARAMS_H_ diff --git a/omaha_request_params_unittest.cc b/cros/omaha_request_params_unittest.cc similarity index 98% rename from omaha_request_params_unittest.cc rename to cros/omaha_request_params_unittest.cc index fcf80625..71f3d4c7 100644 --- a/omaha_request_params_unittest.cc +++ b/cros/omaha_request_params_unittest.cc @@ -14,7 +14,7 @@ // limitations under the License. // -#include "update_engine/omaha_request_params.h" +#include "update_engine/cros/omaha_request_params.h" #include @@ -29,7 +29,7 @@ #include "update_engine/common/platform_constants.h" #include "update_engine/common/test_utils.h" #include "update_engine/common/utils.h" -#include "update_engine/fake_system_state.h" +#include "update_engine/cros/fake_system_state.h" using chromeos_update_engine::test_utils::WriteFileString; using std::string; diff --git a/omaha_response.h b/cros/omaha_response.h similarity index 96% rename from omaha_response.h rename to cros/omaha_response.h index f50c14eb..43783d68 100644 --- a/omaha_response.h +++ b/cros/omaha_response.h @@ -14,8 +14,8 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_OMAHA_RESPONSE_H_ -#define UPDATE_ENGINE_OMAHA_RESPONSE_H_ +#ifndef UPDATE_ENGINE_CROS_OMAHA_RESPONSE_H_ +#define UPDATE_ENGINE_CROS_OMAHA_RESPONSE_H_ #include #include @@ -118,4 +118,4 @@ static_assert(sizeof(off_t) == 8, "off_t not 64 bit"); } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_OMAHA_RESPONSE_H_ +#endif // UPDATE_ENGINE_CROS_OMAHA_RESPONSE_H_ diff --git a/omaha_response_handler_action.cc b/cros/omaha_response_handler_action.cc similarity index 98% rename from omaha_response_handler_action.cc rename to cros/omaha_response_handler_action.cc index 67de64b5..b6c223f0 100644 --- a/omaha_response_handler_action.cc +++ b/cros/omaha_response_handler_action.cc @@ -14,7 +14,7 @@ // limitations under the License. // -#include "update_engine/omaha_response_handler_action.h" +#include "update_engine/cros/omaha_response_handler_action.h" #include #include @@ -28,10 +28,10 @@ #include "update_engine/common/hardware_interface.h" #include "update_engine/common/prefs_interface.h" #include "update_engine/common/utils.h" -#include "update_engine/connection_manager_interface.h" -#include "update_engine/omaha_request_params.h" +#include "update_engine/cros/connection_manager_interface.h" +#include "update_engine/cros/omaha_request_params.h" +#include "update_engine/cros/payload_state_interface.h" #include "update_engine/payload_consumer/delta_performer.h" -#include "update_engine/payload_state_interface.h" #include "update_engine/update_manager/policy.h" #include "update_engine/update_manager/update_manager.h" diff --git a/omaha_response_handler_action.h b/cros/omaha_response_handler_action.h similarity index 91% rename from omaha_response_handler_action.h rename to cros/omaha_response_handler_action.h index d2e6db86..f3b821ef 100644 --- a/omaha_response_handler_action.h +++ b/cros/omaha_response_handler_action.h @@ -14,17 +14,17 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_OMAHA_RESPONSE_HANDLER_ACTION_H_ -#define UPDATE_ENGINE_OMAHA_RESPONSE_HANDLER_ACTION_H_ +#ifndef UPDATE_ENGINE_CROS_OMAHA_RESPONSE_HANDLER_ACTION_H_ +#define UPDATE_ENGINE_CROS_OMAHA_RESPONSE_HANDLER_ACTION_H_ #include #include // for FRIEND_TEST #include "update_engine/common/action.h" -#include "update_engine/omaha_request_action.h" +#include "update_engine/common/system_state.h" +#include "update_engine/cros/omaha_request_action.h" #include "update_engine/payload_consumer/install_plan.h" -#include "update_engine/system_state.h" // This class reads in an Omaha response and converts what it sees into // an install plan which is passed out. @@ -90,4 +90,4 @@ class OmahaResponseHandlerAction : public Action { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_OMAHA_RESPONSE_HANDLER_ACTION_H_ +#endif // UPDATE_ENGINE_CROS_OMAHA_RESPONSE_HANDLER_ACTION_H_ diff --git a/omaha_response_handler_action_unittest.cc b/cros/omaha_response_handler_action_unittest.cc similarity index 99% rename from omaha_response_handler_action_unittest.cc rename to cros/omaha_response_handler_action_unittest.cc index 530c4af0..8da32059 100644 --- a/omaha_response_handler_action_unittest.cc +++ b/cros/omaha_response_handler_action_unittest.cc @@ -14,7 +14,7 @@ // limitations under the License. // -#include "update_engine/omaha_response_handler_action.h" +#include "update_engine/cros/omaha_response_handler_action.h" #include #include @@ -29,8 +29,8 @@ #include "update_engine/common/platform_constants.h" #include "update_engine/common/test_utils.h" #include "update_engine/common/utils.h" -#include "update_engine/fake_system_state.h" -#include "update_engine/mock_payload_state.h" +#include "update_engine/cros/fake_system_state.h" +#include "update_engine/cros/mock_payload_state.h" #include "update_engine/payload_consumer/payload_constants.h" #include "update_engine/update_manager/mock_policy.h" diff --git a/omaha_utils.cc b/cros/omaha_utils.cc similarity index 96% rename from omaha_utils.cc rename to cros/omaha_utils.cc index c7f99217..fc05cb9b 100644 --- a/omaha_utils.cc +++ b/cros/omaha_utils.cc @@ -14,7 +14,7 @@ // limitations under the License. // -#include "update_engine/omaha_utils.h" +#include "update_engine/cros/omaha_utils.h" #include #include diff --git a/omaha_utils.h b/cros/omaha_utils.h similarity index 89% rename from omaha_utils.h rename to cros/omaha_utils.h index 458bf9eb..67416353 100644 --- a/omaha_utils.h +++ b/cros/omaha_utils.h @@ -14,8 +14,8 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_OMAHA_UTILS_H_ -#define UPDATE_ENGINE_OMAHA_UTILS_H_ +#ifndef UPDATE_ENGINE_CROS_OMAHA_UTILS_H_ +#define UPDATE_ENGINE_CROS_OMAHA_UTILS_H_ #include @@ -36,4 +36,4 @@ EolDate StringToEolDate(const std::string& eol_date); } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_OMAHA_UTILS_H_ +#endif // UPDATE_ENGINE_CROS_OMAHA_UTILS_H_ diff --git a/omaha_utils_unittest.cc b/cros/omaha_utils_unittest.cc similarity index 96% rename from omaha_utils_unittest.cc rename to cros/omaha_utils_unittest.cc index 849905aa..f89f6902 100644 --- a/omaha_utils_unittest.cc +++ b/cros/omaha_utils_unittest.cc @@ -14,7 +14,7 @@ // limitations under the License. // -#include "update_engine/omaha_utils.h" +#include "update_engine/cros/omaha_utils.h" #include #include diff --git a/p2p_manager.cc b/cros/p2p_manager.cc similarity index 99% rename from p2p_manager.cc rename to cros/p2p_manager.cc index 00ff8cec..dc12b356 100644 --- a/p2p_manager.cc +++ b/cros/p2p_manager.cc @@ -23,7 +23,7 @@ #define _BSD_SOURCE #endif -#include "update_engine/p2p_manager.h" +#include "update_engine/cros/p2p_manager.h" #include #include diff --git a/p2p_manager.h b/cros/p2p_manager.h similarity index 98% rename from p2p_manager.h rename to cros/p2p_manager.h index ef62f0df..bd359fae 100644 --- a/p2p_manager.h +++ b/cros/p2p_manager.h @@ -14,8 +14,8 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_P2P_MANAGER_H_ -#define UPDATE_ENGINE_P2P_MANAGER_H_ +#ifndef UPDATE_ENGINE_CROS_P2P_MANAGER_H_ +#define UPDATE_ENGINE_CROS_P2P_MANAGER_H_ #include #include @@ -183,4 +183,4 @@ class P2PManager { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_P2P_MANAGER_H_ +#endif // UPDATE_ENGINE_CROS_P2P_MANAGER_H_ diff --git a/p2p_manager_unittest.cc b/cros/p2p_manager_unittest.cc similarity index 99% rename from p2p_manager_unittest.cc rename to cros/p2p_manager_unittest.cc index 5510dd7b..8b6d7411 100644 --- a/p2p_manager_unittest.cc +++ b/cros/p2p_manager_unittest.cc @@ -14,7 +14,7 @@ // limitations under the License. // -#include "update_engine/p2p_manager.h" +#include "update_engine/cros/p2p_manager.h" #include #include @@ -51,7 +51,7 @@ #include "update_engine/common/subprocess.h" #include "update_engine/common/test_utils.h" #include "update_engine/common/utils.h" -#include "update_engine/fake_p2p_manager_configuration.h" +#include "update_engine/cros/fake_p2p_manager_configuration.h" #include "update_engine/update_manager/fake_update_manager.h" #include "update_engine/update_manager/mock_policy.h" diff --git a/payload_state.cc b/cros/payload_state.cc similarity index 99% rename from payload_state.cc rename to cros/payload_state.cc index 1d1583b9..d2e6851e 100644 --- a/payload_state.cc +++ b/cros/payload_state.cc @@ -14,7 +14,7 @@ // limitations under the License. // -#include "update_engine/payload_state.h" +#include "update_engine/cros/payload_state.h" #include #include @@ -29,15 +29,15 @@ #include "update_engine/common/constants.h" #include "update_engine/common/error_code_utils.h" #include "update_engine/common/hardware_interface.h" +#include "update_engine/common/metrics_reporter_interface.h" #include "update_engine/common/prefs.h" +#include "update_engine/common/system_state.h" #include "update_engine/common/utils.h" -#include "update_engine/connection_manager_interface.h" -#include "update_engine/metrics_reporter_interface.h" +#include "update_engine/cros/connection_manager_interface.h" +#include "update_engine/cros/omaha_request_params.h" +#include "update_engine/cros/update_attempter.h" #include "update_engine/metrics_utils.h" -#include "update_engine/omaha_request_params.h" #include "update_engine/payload_consumer/install_plan.h" -#include "update_engine/system_state.h" -#include "update_engine/update_attempter.h" using base::Time; using base::TimeDelta; diff --git a/payload_state.h b/cros/payload_state.h similarity index 99% rename from payload_state.h rename to cros/payload_state.h index 77197a7a..08272730 100644 --- a/payload_state.h +++ b/cros/payload_state.h @@ -14,8 +14,8 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_PAYLOAD_STATE_H_ -#define UPDATE_ENGINE_PAYLOAD_STATE_H_ +#ifndef UPDATE_ENGINE_CROS_PAYLOAD_STATE_H_ +#define UPDATE_ENGINE_CROS_PAYLOAD_STATE_H_ #include #include @@ -25,9 +25,9 @@ #include // for FRIEND_TEST #include "update_engine/common/excluder_interface.h" +#include "update_engine/common/metrics_constants.h" #include "update_engine/common/prefs_interface.h" -#include "update_engine/metrics_constants.h" -#include "update_engine/payload_state_interface.h" +#include "update_engine/cros/payload_state_interface.h" namespace chromeos_update_engine { @@ -599,4 +599,4 @@ class PayloadState : public PayloadStateInterface { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_PAYLOAD_STATE_H_ +#endif // UPDATE_ENGINE_CROS_PAYLOAD_STATE_H_ diff --git a/payload_state_interface.h b/cros/payload_state_interface.h similarity index 97% rename from payload_state_interface.h rename to cros/payload_state_interface.h index d384a0eb..9ead6503 100644 --- a/payload_state_interface.h +++ b/cros/payload_state_interface.h @@ -14,14 +14,14 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_PAYLOAD_STATE_INTERFACE_H_ -#define UPDATE_ENGINE_PAYLOAD_STATE_INTERFACE_H_ +#ifndef UPDATE_ENGINE_CROS_PAYLOAD_STATE_INTERFACE_H_ +#define UPDATE_ENGINE_CROS_PAYLOAD_STATE_INTERFACE_H_ #include #include "update_engine/common/action_processor.h" #include "update_engine/common/constants.h" -#include "update_engine/omaha_response.h" +#include "update_engine/cros/omaha_response.h" namespace chromeos_update_engine { @@ -212,4 +212,4 @@ class PayloadStateInterface { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_PAYLOAD_STATE_INTERFACE_H_ +#endif // UPDATE_ENGINE_CROS_PAYLOAD_STATE_INTERFACE_H_ diff --git a/payload_state_unittest.cc b/cros/payload_state_unittest.cc similarity index 99% rename from payload_state_unittest.cc rename to cros/payload_state_unittest.cc index 2d571c12..b48cff48 100644 --- a/payload_state_unittest.cc +++ b/cros/payload_state_unittest.cc @@ -14,7 +14,7 @@ // limitations under the License. // -#include "update_engine/payload_state.h" +#include "update_engine/cros/payload_state.h" #include #include @@ -27,14 +27,14 @@ #include "update_engine/common/fake_clock.h" #include "update_engine/common/fake_hardware.h" #include "update_engine/common/fake_prefs.h" +#include "update_engine/common/metrics_reporter_interface.h" #include "update_engine/common/mock_excluder.h" #include "update_engine/common/mock_prefs.h" #include "update_engine/common/prefs.h" #include "update_engine/common/test_utils.h" #include "update_engine/common/utils.h" -#include "update_engine/fake_system_state.h" -#include "update_engine/metrics_reporter_interface.h" -#include "update_engine/omaha_request_action.h" +#include "update_engine/cros/fake_system_state.h" +#include "update_engine/cros/omaha_request_action.h" using base::Time; using base::TimeDelta; diff --git a/common/platform_constants_chromeos.cc b/cros/platform_constants_chromeos.cc similarity index 100% rename from common/platform_constants_chromeos.cc rename to cros/platform_constants_chromeos.cc diff --git a/power_manager_chromeos.cc b/cros/power_manager_chromeos.cc similarity index 93% rename from power_manager_chromeos.cc rename to cros/power_manager_chromeos.cc index 531d367d..c1a28591 100644 --- a/power_manager_chromeos.cc +++ b/cros/power_manager_chromeos.cc @@ -14,14 +14,14 @@ // limitations under the License. // -#include "update_engine/power_manager_chromeos.h" +#include "update_engine/cros/power_manager_chromeos.h" #include #include #include -#include "update_engine/dbus_connection.h" +#include "update_engine/cros/dbus_connection.h" namespace chromeos_update_engine { diff --git a/power_manager_chromeos.h b/cros/power_manager_chromeos.h similarity index 84% rename from power_manager_chromeos.h rename to cros/power_manager_chromeos.h index eeb14d84..89305083 100644 --- a/power_manager_chromeos.h +++ b/cros/power_manager_chromeos.h @@ -14,13 +14,13 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_POWER_MANAGER_CHROMEOS_H_ -#define UPDATE_ENGINE_POWER_MANAGER_CHROMEOS_H_ +#ifndef UPDATE_ENGINE_CROS_POWER_MANAGER_CHROMEOS_H_ +#define UPDATE_ENGINE_CROS_POWER_MANAGER_CHROMEOS_H_ #include #include -#include "update_engine/power_manager_interface.h" +#include "update_engine/cros/power_manager_interface.h" namespace chromeos_update_engine { @@ -41,4 +41,4 @@ class PowerManagerChromeOS : public PowerManagerInterface { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_POWER_MANAGER_CHROMEOS_H_ +#endif // UPDATE_ENGINE_CROS_POWER_MANAGER_CHROMEOS_H_ diff --git a/power_manager_interface.h b/cros/power_manager_interface.h similarity index 88% rename from power_manager_interface.h rename to cros/power_manager_interface.h index 8f776500..1f712d26 100644 --- a/power_manager_interface.h +++ b/cros/power_manager_interface.h @@ -14,8 +14,8 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_POWER_MANAGER_INTERFACE_H_ -#define UPDATE_ENGINE_POWER_MANAGER_INTERFACE_H_ +#ifndef UPDATE_ENGINE_CROS_POWER_MANAGER_INTERFACE_H_ +#define UPDATE_ENGINE_CROS_POWER_MANAGER_INTERFACE_H_ #include @@ -44,4 +44,4 @@ std::unique_ptr CreatePowerManager(); } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_POWER_MANAGER_INTERFACE_H_ +#endif // UPDATE_ENGINE_CROS_POWER_MANAGER_INTERFACE_H_ diff --git a/real_system_state.cc b/cros/real_system_state.cc similarity index 98% rename from real_system_state.cc rename to cros/real_system_state.cc index 924271ed..4f572465 100644 --- a/real_system_state.cc +++ b/cros/real_system_state.cc @@ -14,7 +14,7 @@ // limitations under the License. // -#include "update_engine/real_system_state.h" +#include "update_engine/cros/real_system_state.h" #include #include @@ -35,11 +35,11 @@ #include "update_engine/common/dlcservice_interface.h" #include "update_engine/common/hardware.h" #include "update_engine/common/utils.h" -#include "update_engine/metrics_reporter_omaha.h" -#include "update_engine/update_boot_flags_action.h" +#include "update_engine/cros/metrics_reporter_omaha.h" #if USE_DBUS -#include "update_engine/dbus_connection.h" +#include "update_engine/cros/dbus_connection.h" #endif // USE_DBUS +#include "update_engine/update_boot_flags_action.h" #include "update_engine/update_manager/state_factory.h" using brillo::MessageLoop; diff --git a/real_system_state.h b/cros/real_system_state.h similarity index 90% rename from real_system_state.h rename to cros/real_system_state.h index 807a205b..798fca0d 100644 --- a/real_system_state.h +++ b/cros/real_system_state.h @@ -14,10 +14,10 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_REAL_SYSTEM_STATE_H_ -#define UPDATE_ENGINE_REAL_SYSTEM_STATE_H_ +#ifndef UPDATE_ENGINE_CROS_REAL_SYSTEM_STATE_H_ +#define UPDATE_ENGINE_CROS_REAL_SYSTEM_STATE_H_ -#include "update_engine/system_state.h" +#include "update_engine/common/system_state.h" #include #include @@ -31,17 +31,17 @@ #include "update_engine/certificate_checker.h" #include "update_engine/common/boot_control_interface.h" #include "update_engine/common/clock.h" +#include "update_engine/common/daemon_state_interface.h" #include "update_engine/common/dlcservice_interface.h" #include "update_engine/common/hardware_interface.h" +#include "update_engine/common/metrics_reporter_interface.h" #include "update_engine/common/prefs.h" -#include "update_engine/connection_manager_interface.h" -#include "update_engine/daemon_state_interface.h" -#include "update_engine/metrics_reporter_interface.h" -#include "update_engine/metrics_reporter_omaha.h" -#include "update_engine/p2p_manager.h" -#include "update_engine/payload_state.h" -#include "update_engine/power_manager_interface.h" -#include "update_engine/update_attempter.h" +#include "update_engine/cros/connection_manager_interface.h" +#include "update_engine/cros/metrics_reporter_omaha.h" +#include "update_engine/cros/p2p_manager.h" +#include "update_engine/cros/payload_state.h" +#include "update_engine/cros/power_manager_interface.h" +#include "update_engine/cros/update_attempter.h" #include "update_engine/update_manager/update_manager.h" namespace chromeos_update_engine { @@ -199,4 +199,4 @@ class RealSystemState : public SystemState, public DaemonStateInterface { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_REAL_SYSTEM_STATE_H_ +#endif // UPDATE_ENGINE_CROS_REAL_SYSTEM_STATE_H_ diff --git a/requisition_util.cc b/cros/requisition_util.cc similarity index 97% rename from requisition_util.cc rename to cros/requisition_util.cc index 5445bceb..6296d0bb 100644 --- a/requisition_util.cc +++ b/cros/requisition_util.cc @@ -14,7 +14,7 @@ // limitations under the License. // -#include "update_engine/requisition_util.h" +#include "update_engine/cros/requisition_util.h" #include #include diff --git a/requisition_util.h b/cros/requisition_util.h similarity index 86% rename from requisition_util.h rename to cros/requisition_util.h index 8577ee7d..6ec47833 100644 --- a/requisition_util.h +++ b/cros/requisition_util.h @@ -14,8 +14,8 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_REQUISITION_UTIL_H_ -#define UPDATE_ENGINE_REQUISITION_UTIL_H_ +#ifndef UPDATE_ENGINE_CROS_REQUISITION_UTIL_H_ +#define UPDATE_ENGINE_CROS_REQUISITION_UTIL_H_ #include @@ -29,4 +29,4 @@ std::string ReadDeviceRequisition(const base::FilePath& local_state); } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_REQUISITION_UTIL_H_ +#endif // UPDATE_ENGINE_CROS_REQUISITION_UTIL_H_ diff --git a/requisition_util_unittest.cc b/cros/requisition_util_unittest.cc similarity index 98% rename from requisition_util_unittest.cc rename to cros/requisition_util_unittest.cc index c21c9c74..269585eb 100644 --- a/requisition_util_unittest.cc +++ b/cros/requisition_util_unittest.cc @@ -14,7 +14,7 @@ // limitations under the License. // -#include "update_engine/requisition_util.h" +#include "update_engine/cros/requisition_util.h" #include diff --git a/shill_proxy.cc b/cros/shill_proxy.cc similarity index 93% rename from shill_proxy.cc rename to cros/shill_proxy.cc index d398bbab..a3c8543a 100644 --- a/shill_proxy.cc +++ b/cros/shill_proxy.cc @@ -14,9 +14,9 @@ // limitations under the License. // -#include "update_engine/shill_proxy.h" +#include "update_engine/cros/shill_proxy.h" -#include "update_engine/dbus_connection.h" +#include "update_engine/cros/dbus_connection.h" using org::chromium::flimflam::ManagerProxy; using org::chromium::flimflam::ManagerProxyInterface; diff --git a/shill_proxy.h b/cros/shill_proxy.h similarity index 89% rename from shill_proxy.h rename to cros/shill_proxy.h index 4b466c91..aff428a7 100644 --- a/shill_proxy.h +++ b/cros/shill_proxy.h @@ -14,8 +14,8 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_SHILL_PROXY_H_ -#define UPDATE_ENGINE_SHILL_PROXY_H_ +#ifndef UPDATE_ENGINE_CROS_SHILL_PROXY_H_ +#define UPDATE_ENGINE_CROS_SHILL_PROXY_H_ #include #include @@ -25,7 +25,7 @@ #include #include -#include "update_engine/shill_proxy_interface.h" +#include "update_engine/cros/shill_proxy_interface.h" namespace chromeos_update_engine { @@ -51,4 +51,4 @@ class ShillProxy : public ShillProxyInterface { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_SHILL_PROXY_H_ +#endif // UPDATE_ENGINE_CROS_SHILL_PROXY_H_ diff --git a/shill_proxy_interface.h b/cros/shill_proxy_interface.h similarity index 92% rename from shill_proxy_interface.h rename to cros/shill_proxy_interface.h index 5f6b44ec..19e81f30 100644 --- a/shill_proxy_interface.h +++ b/cros/shill_proxy_interface.h @@ -14,8 +14,8 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_SHILL_PROXY_INTERFACE_H_ -#define UPDATE_ENGINE_SHILL_PROXY_INTERFACE_H_ +#ifndef UPDATE_ENGINE_CROS_SHILL_PROXY_INTERFACE_H_ +#define UPDATE_ENGINE_CROS_SHILL_PROXY_INTERFACE_H_ #include #include @@ -53,4 +53,4 @@ class ShillProxyInterface { } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_SHILL_PROXY_INTERFACE_H_ +#endif // UPDATE_ENGINE_CROS_SHILL_PROXY_INTERFACE_H_ diff --git a/update_attempter.cc b/cros/update_attempter.cc similarity index 99% rename from update_attempter.cc rename to cros/update_attempter.cc index 38b0f828..e8cb291e 100644 --- a/update_attempter.cc +++ b/cros/update_attempter.cc @@ -14,7 +14,7 @@ // limitations under the License. // -#include "update_engine/update_attempter.h" +#include "update_engine/cros/update_attempter.h" #include @@ -47,26 +47,26 @@ #include "update_engine/common/clock_interface.h" #include "update_engine/common/constants.h" #include "update_engine/common/dlcservice_interface.h" +#include "update_engine/common/download_action.h" #include "update_engine/common/excluder_interface.h" #include "update_engine/common/hardware_interface.h" +#include "update_engine/common/metrics_reporter_interface.h" #include "update_engine/common/platform_constants.h" #include "update_engine/common/prefs.h" #include "update_engine/common/prefs_interface.h" #include "update_engine/common/subprocess.h" +#include "update_engine/common/system_state.h" #include "update_engine/common/utils.h" +#include "update_engine/cros/omaha_request_action.h" +#include "update_engine/cros/omaha_request_params.h" +#include "update_engine/cros/omaha_response_handler_action.h" +#include "update_engine/cros/omaha_utils.h" +#include "update_engine/cros/p2p_manager.h" +#include "update_engine/cros/payload_state_interface.h" +#include "update_engine/cros/power_manager_interface.h" #include "update_engine/libcurl_http_fetcher.h" -#include "update_engine/metrics_reporter_interface.h" -#include "update_engine/omaha_request_action.h" -#include "update_engine/omaha_request_params.h" -#include "update_engine/omaha_response_handler_action.h" -#include "update_engine/omaha_utils.h" -#include "update_engine/p2p_manager.h" -#include "update_engine/payload_consumer/download_action.h" #include "update_engine/payload_consumer/filesystem_verifier_action.h" #include "update_engine/payload_consumer/postinstall_runner_action.h" -#include "update_engine/payload_state_interface.h" -#include "update_engine/power_manager_interface.h" -#include "update_engine/system_state.h" #include "update_engine/update_boot_flags_action.h" #include "update_engine/update_manager/policy.h" #include "update_engine/update_manager/policy_utils.h" diff --git a/update_attempter.h b/cros/update_attempter.h similarity index 97% rename from update_attempter.h rename to cros/update_attempter.h index 3a1bef48..0f4c9524 100644 --- a/update_attempter.h +++ b/cros/update_attempter.h @@ -14,8 +14,8 @@ // limitations under the License. // -#ifndef UPDATE_ENGINE_UPDATE_ATTEMPTER_H_ -#define UPDATE_ENGINE_UPDATE_ATTEMPTER_H_ +#ifndef UPDATE_ENGINE_CROS_UPDATE_ATTEMPTER_H_ +#define UPDATE_ENGINE_CROS_UPDATE_ATTEMPTER_H_ #include @@ -30,22 +30,22 @@ #include #include // for FRIEND_TEST -#if USE_CHROME_NETWORK_PROXY -#include "update_engine/chrome_browser_proxy_resolver.h" -#endif // USE_CHROME_NETWORK_PROXY #include "update_engine/certificate_checker.h" #include "update_engine/client_library/include/update_engine/update_status.h" #include "update_engine/common/action_processor.h" #include "update_engine/common/cpu_limiter.h" +#include "update_engine/common/download_action.h" #include "update_engine/common/excluder_interface.h" #include "update_engine/common/proxy_resolver.h" -#include "update_engine/omaha_request_builder_xml.h" -#include "update_engine/omaha_request_params.h" -#include "update_engine/omaha_response_handler_action.h" -#include "update_engine/payload_consumer/download_action.h" +#include "update_engine/common/service_observer_interface.h" +#include "update_engine/common/system_state.h" +#if USE_CHROME_NETWORK_PROXY +#include "update_engine/cros/chrome_browser_proxy_resolver.h" +#endif // USE_CHROME_NETWORK_PROXY +#include "update_engine/cros/omaha_request_builder_xml.h" +#include "update_engine/cros/omaha_request_params.h" +#include "update_engine/cros/omaha_response_handler_action.h" #include "update_engine/payload_consumer/postinstall_runner_action.h" -#include "update_engine/service_observer_interface.h" -#include "update_engine/system_state.h" #include "update_engine/update_manager/policy.h" #include "update_engine/update_manager/staging_utils.h" #include "update_engine/update_manager/update_manager.h" @@ -150,7 +150,7 @@ class UpdateAttempter : public ActionProcessorDelegate, // UPDATED_NEED_REBOOT. Returns true on success, false otherwise. bool RebootIfNeeded(); - // Sets the DLC as active or inactive. See common_service.h + // Sets the DLC as active or inactive. See chromeos/common_service.h virtual bool SetDlcActiveValue(bool is_active, const std::string& dlc_id); // DownloadActionDelegate methods: @@ -573,4 +573,4 @@ ErrorCode GetErrorCodeForAction(AbstractAction* action, ErrorCode code); } // namespace chromeos_update_engine -#endif // UPDATE_ENGINE_UPDATE_ATTEMPTER_H_ +#endif // UPDATE_ENGINE_CROS_UPDATE_ATTEMPTER_H_ diff --git a/update_attempter_unittest.cc b/cros/update_attempter_unittest.cc similarity index 99% rename from update_attempter_unittest.cc rename to cros/update_attempter_unittest.cc index 8935bebb..f3211a0e 100644 --- a/update_attempter_unittest.cc +++ b/cros/update_attempter_unittest.cc @@ -14,7 +14,7 @@ // limitations under the License. // -#include "update_engine/update_attempter.h" +#include "update_engine/cros/update_attempter.h" #include @@ -43,16 +43,16 @@ #include "update_engine/common/mock_action_processor.h" #include "update_engine/common/mock_http_fetcher.h" #include "update_engine/common/mock_prefs.h" +#include "update_engine/common/mock_service_observer.h" #include "update_engine/common/platform_constants.h" #include "update_engine/common/prefs.h" #include "update_engine/common/test_utils.h" #include "update_engine/common/utils.h" -#include "update_engine/fake_system_state.h" +#include "update_engine/cros/fake_system_state.h" +#include "update_engine/cros/mock_p2p_manager.h" +#include "update_engine/cros/mock_payload_state.h" +#include "update_engine/cros/omaha_utils.h" #include "update_engine/libcurl_http_fetcher.h" -#include "update_engine/mock_p2p_manager.h" -#include "update_engine/mock_payload_state.h" -#include "update_engine/mock_service_observer.h" -#include "update_engine/omaha_utils.h" #include "update_engine/payload_consumer/filesystem_verifier_action.h" #include "update_engine/payload_consumer/install_plan.h" #include "update_engine/payload_consumer/payload_constants.h" diff --git a/update_engine_client.cc b/cros/update_engine_client.cc similarity index 99% rename from update_engine_client.cc rename to cros/update_engine_client.cc index 31448eaa..6f20f11a 100644 --- a/update_engine_client.cc +++ b/cros/update_engine_client.cc @@ -37,7 +37,7 @@ #include "update_engine/client.h" #include "update_engine/common/error_code.h" #include "update_engine/common/error_code_utils.h" -#include "update_engine/omaha_utils.h" +#include "update_engine/cros/omaha_utils.h" #include "update_engine/status_update_handler.h" #include "update_engine/update_status.h" #include "update_engine/update_status_utils.h" diff --git a/payload_consumer/download_action.cc b/download_action.cc similarity index 98% rename from payload_consumer/download_action.cc rename to download_action.cc index ea998926..10dffd2d 100644 --- a/payload_consumer/download_action.cc +++ b/download_action.cc @@ -14,7 +14,7 @@ // limitations under the License. // -#include "update_engine/payload_consumer/download_action.h" +#include "update_engine/common/download_action.h" #include @@ -30,9 +30,9 @@ #include "update_engine/common/error_code_utils.h" #include "update_engine/common/multi_range_http_fetcher.h" #include "update_engine/common/utils.h" -#include "update_engine/omaha_request_params.h" -#include "update_engine/p2p_manager.h" -#include "update_engine/payload_state_interface.h" +#include "update_engine/cros/omaha_request_params.h" +#include "update_engine/cros/p2p_manager.h" +#include "update_engine/cros/payload_state_interface.h" using base::FilePath; using std::string; diff --git a/payload_consumer/download_action_android_unittest.cc b/download_action_android_unittest.cc similarity index 98% rename from payload_consumer/download_action_android_unittest.cc rename to download_action_android_unittest.cc index f78845f5..f2229770 100644 --- a/payload_consumer/download_action_android_unittest.cc +++ b/download_action_android_unittest.cc @@ -23,10 +23,10 @@ #include "update_engine/common/action_pipe.h" #include "update_engine/common/boot_control_stub.h" #include "update_engine/common/constants.h" +#include "update_engine/common/download_action.h" #include "update_engine/common/mock_http_fetcher.h" #include "update_engine/common/mock_prefs.h" #include "update_engine/common/test_utils.h" -#include "update_engine/payload_consumer/download_action.h" #include #include diff --git a/payload_consumer/download_action_unittest.cc b/download_action_unittest.cc similarity index 98% rename from payload_consumer/download_action_unittest.cc rename to download_action_unittest.cc index 9daa7914..5264b0f7 100644 --- a/payload_consumer/download_action_unittest.cc +++ b/download_action_unittest.cc @@ -14,7 +14,7 @@ // limitations under the License. // -#include "update_engine/payload_consumer/download_action.h" +#include "update_engine/common/download_action.h" #include #include @@ -34,14 +34,14 @@ #include "update_engine/common/action_pipe.h" #include "update_engine/common/hash_calculator.h" +#include "update_engine/common/mock_download_action.h" #include "update_engine/common/mock_http_fetcher.h" #include "update_engine/common/mock_prefs.h" #include "update_engine/common/test_utils.h" #include "update_engine/common/utils.h" -#include "update_engine/fake_p2p_manager_configuration.h" -#include "update_engine/fake_system_state.h" -#include "update_engine/mock_file_writer.h" -#include "update_engine/payload_consumer/mock_download_action.h" +#include "update_engine/cros/fake_p2p_manager_configuration.h" +#include "update_engine/cros/fake_system_state.h" +#include "update_engine/payload_consumer/mock_file_writer.h" #include "update_engine/update_manager/fake_update_manager.h" namespace chromeos_update_engine { diff --git a/hardware_android_unittest.cc b/hardware_android_unittest.cc deleted file mode 100644 index 9a491f3c..00000000 --- a/hardware_android_unittest.cc +++ /dev/null @@ -1,67 +0,0 @@ -// -// Copyright (C) 2020 The Android Open Source Project -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -#include - -#include "update_engine/common/error_code.h" -#include "update_engine/common/test_utils.h" -#include "update_engine/hardware_android.h" - -namespace chromeos_update_engine { - -TEST(HardwareAndroidTest, IsKernelUpdateValid) { - EXPECT_EQ(ErrorCode::kSuccess, - HardwareAndroid::IsKernelUpdateValid("5.4.42-not-gki", "")) - << "Legacy update should be fine"; - - EXPECT_EQ(ErrorCode::kSuccess, - HardwareAndroid::IsKernelUpdateValid("5.4.42-not-gki", - "5.4.42-android12-0")) - << "Update to GKI should be fine"; - - EXPECT_EQ( - ErrorCode::kDownloadManifestParseError, - HardwareAndroid::IsKernelUpdateValid("5.4.42-not-gki", "5.4.42-not-gki")) - << "Should report parse error for invalid version field"; - - EXPECT_EQ(ErrorCode::kSuccess, - HardwareAndroid::IsKernelUpdateValid( - "5.4.42-android12-0-something", "5.4.42-android12-0-something")) - << "Self update should be fine"; - - EXPECT_EQ(ErrorCode::kSuccess, - HardwareAndroid::IsKernelUpdateValid( - "5.4.42-android12-0-something", "5.4.43-android12-0-something")) - << "Sub-level update should be fine"; - - EXPECT_EQ( - ErrorCode::kSuccess, - HardwareAndroid::IsKernelUpdateValid("5.4.42-android12-0-something", - "5.10.10-android12-0-something")) - << "KMI version update should be fine"; - - EXPECT_EQ(ErrorCode::kPayloadTimestampError, - HardwareAndroid::IsKernelUpdateValid("5.4.42-android12-0-something", - "5.4.5-android12-0-something")) - << "Should detect sub-level downgrade"; - - EXPECT_EQ(ErrorCode::kPayloadTimestampError, - HardwareAndroid::IsKernelUpdateValid("5.4.42-android12-0-something", - "5.1.5-android12-0-something")) - << "Should detect KMI version downgrade"; -} - -} // namespace chromeos_update_engine diff --git a/main.cc b/main.cc index ceb5b566..a23a08bf 100644 --- a/main.cc +++ b/main.cc @@ -23,11 +23,11 @@ #include #include +#include "update_engine/common/daemon_base.h" +#include "update_engine/common/logging.h" #include "update_engine/common/subprocess.h" #include "update_engine/common/terminator.h" #include "update_engine/common/utils.h" -#include "update_engine/daemon_base.h" -#include "update_engine/logging.h" using std::string; diff --git a/metrics_utils.cc b/metrics_utils.cc index 2211a67b..19f274b2 100644 --- a/metrics_utils.cc +++ b/metrics_utils.cc @@ -23,7 +23,6 @@ #include "update_engine/common/clock_interface.h" #include "update_engine/common/constants.h" #include "update_engine/common/utils.h" -#include "update_engine/system_state.h" using base::Time; using base::TimeDelta; @@ -293,48 +292,6 @@ metrics::ConnectionType GetConnectionType(ConnectionType type, return metrics::ConnectionType::kUnknown; } -bool WallclockDurationHelper(SystemState* system_state, - const std::string& state_variable_key, - TimeDelta* out_duration) { - bool ret = false; - - Time now = system_state->clock()->GetWallclockTime(); - int64_t stored_value; - if (system_state->prefs()->GetInt64(state_variable_key, &stored_value)) { - Time stored_time = Time::FromInternalValue(stored_value); - if (stored_time > now) { - LOG(ERROR) << "Stored time-stamp used for " << state_variable_key - << " is in the future."; - } else { - *out_duration = now - stored_time; - ret = true; - } - } - - if (!system_state->prefs()->SetInt64(state_variable_key, - now.ToInternalValue())) { - LOG(ERROR) << "Error storing time-stamp in " << state_variable_key; - } - - return ret; -} - -bool MonotonicDurationHelper(SystemState* system_state, - int64_t* storage, - TimeDelta* out_duration) { - bool ret = false; - - Time now = system_state->clock()->GetMonotonicTime(); - if (*storage != 0) { - Time stored_time = Time::FromInternalValue(*storage); - *out_duration = now - stored_time; - ret = true; - } - *storage = now.ToInternalValue(); - - return ret; -} - int64_t GetPersistedValue(const std::string& key, PrefsInterface* prefs) { CHECK(prefs); if (!prefs->Exists(key)) @@ -404,8 +361,7 @@ bool LoadAndReportTimeToReboot(MetricsReporterInterface* metrics_reporter, return false; Time system_updated_at = Time::FromInternalValue(stored_value); - base::TimeDelta time_to_reboot = - clock->GetMonotonicTime() - system_updated_at; + TimeDelta time_to_reboot = clock->GetMonotonicTime() - system_updated_at; if (time_to_reboot.ToInternalValue() < 0) { LOG(ERROR) << "time_to_reboot is negative - system_updated_at: " << utils::ToString(system_updated_at); diff --git a/metrics_utils.h b/metrics_utils.h index 8f1aad1e..5952ec34 100644 --- a/metrics_utils.h +++ b/metrics_utils.h @@ -22,11 +22,11 @@ #include #include "update_engine/common/clock_interface.h" +#include "update_engine/common/connection_utils.h" #include "update_engine/common/error_code.h" +#include "update_engine/common/metrics_constants.h" +#include "update_engine/common/metrics_reporter_interface.h" #include "update_engine/common/prefs_interface.h" -#include "update_engine/connection_utils.h" -#include "update_engine/metrics_constants.h" -#include "update_engine/metrics_reporter_interface.h" namespace chromeos_update_engine { @@ -50,29 +50,6 @@ metrics::AttemptResult GetAttemptResult(ErrorCode code); metrics::ConnectionType GetConnectionType(ConnectionType type, ConnectionTethering tethering); -// This function returns the duration on the wallclock since the last -// time it was called for the same |state_variable_key| value. -// -// If the function returns |true|, the duration (always non-negative) -// is returned in |out_duration|. If the function returns |false| -// something went wrong or there was no previous measurement. -bool WallclockDurationHelper(SystemState* system_state, - const std::string& state_variable_key, - base::TimeDelta* out_duration); - -// This function returns the duration on the monotonic clock since the -// last time it was called for the same |storage| pointer. -// -// You should pass a pointer to a 64-bit integer in |storage| which -// should be initialized to 0. -// -// If the function returns |true|, the duration (always non-negative) -// is returned in |out_duration|. If the function returns |false| -// something went wrong or there was no previous measurement. -bool MonotonicDurationHelper(SystemState* system_state, - int64_t* storage, - base::TimeDelta* out_duration); - // Returns the persisted value from prefs for the given key. It also // validates that the value returned is non-negative. int64_t GetPersistedValue(const std::string& key, PrefsInterface* prefs); diff --git a/metrics_utils_unittest.cc b/metrics_utils_unittest.cc index 6ea996fa..cedd2691 100644 --- a/metrics_utils_unittest.cc +++ b/metrics_utils_unittest.cc @@ -20,7 +20,6 @@ #include "update_engine/common/fake_clock.h" #include "update_engine/common/fake_prefs.h" -#include "update_engine/fake_system_state.h" namespace chromeos_update_engine { namespace metrics_utils { @@ -74,116 +73,5 @@ TEST(MetricsUtilsTest, GetConnectionType) { GetConnectionType(ConnectionType::kWifi, ConnectionTethering::kUnknown)); } -TEST(MetricsUtilsTest, WallclockDurationHelper) { - FakeSystemState fake_system_state; - FakeClock fake_clock; - base::TimeDelta duration; - const std::string state_variable_key = "test-prefs"; - FakePrefs fake_prefs; - - fake_system_state.set_clock(&fake_clock); - fake_system_state.set_prefs(&fake_prefs); - - // Initialize wallclock to 1 sec. - fake_clock.SetWallclockTime(base::Time::FromInternalValue(1000000)); - - // First time called so no previous measurement available. - EXPECT_FALSE(metrics_utils::WallclockDurationHelper( - &fake_system_state, state_variable_key, &duration)); - - // Next time, we should get zero since the clock didn't advance. - EXPECT_TRUE(metrics_utils::WallclockDurationHelper( - &fake_system_state, state_variable_key, &duration)); - EXPECT_EQ(duration.InSeconds(), 0); - - // We can also call it as many times as we want with it being - // considered a failure. - EXPECT_TRUE(metrics_utils::WallclockDurationHelper( - &fake_system_state, state_variable_key, &duration)); - EXPECT_EQ(duration.InSeconds(), 0); - EXPECT_TRUE(metrics_utils::WallclockDurationHelper( - &fake_system_state, state_variable_key, &duration)); - EXPECT_EQ(duration.InSeconds(), 0); - - // Advance the clock one second, then we should get 1 sec on the - // next call and 0 sec on the subsequent call. - fake_clock.SetWallclockTime(base::Time::FromInternalValue(2000000)); - EXPECT_TRUE(metrics_utils::WallclockDurationHelper( - &fake_system_state, state_variable_key, &duration)); - EXPECT_EQ(duration.InSeconds(), 1); - EXPECT_TRUE(metrics_utils::WallclockDurationHelper( - &fake_system_state, state_variable_key, &duration)); - EXPECT_EQ(duration.InSeconds(), 0); - - // Advance clock two seconds and we should get 2 sec and then 0 sec. - fake_clock.SetWallclockTime(base::Time::FromInternalValue(4000000)); - EXPECT_TRUE(metrics_utils::WallclockDurationHelper( - &fake_system_state, state_variable_key, &duration)); - EXPECT_EQ(duration.InSeconds(), 2); - EXPECT_TRUE(metrics_utils::WallclockDurationHelper( - &fake_system_state, state_variable_key, &duration)); - EXPECT_EQ(duration.InSeconds(), 0); - - // There's a possibility that the wallclock can go backwards (NTP - // adjustments, for example) so check that we properly handle this - // case. - fake_clock.SetWallclockTime(base::Time::FromInternalValue(3000000)); - EXPECT_FALSE(metrics_utils::WallclockDurationHelper( - &fake_system_state, state_variable_key, &duration)); - fake_clock.SetWallclockTime(base::Time::FromInternalValue(4000000)); - EXPECT_TRUE(metrics_utils::WallclockDurationHelper( - &fake_system_state, state_variable_key, &duration)); - EXPECT_EQ(duration.InSeconds(), 1); -} - -TEST(MetricsUtilsTest, MonotonicDurationHelper) { - int64_t storage = 0; - FakeSystemState fake_system_state; - FakeClock fake_clock; - base::TimeDelta duration; - - fake_system_state.set_clock(&fake_clock); - - // Initialize monotonic clock to 1 sec. - fake_clock.SetMonotonicTime(base::Time::FromInternalValue(1000000)); - - // First time called so no previous measurement available. - EXPECT_FALSE(metrics_utils::MonotonicDurationHelper( - &fake_system_state, &storage, &duration)); - - // Next time, we should get zero since the clock didn't advance. - EXPECT_TRUE(metrics_utils::MonotonicDurationHelper( - &fake_system_state, &storage, &duration)); - EXPECT_EQ(duration.InSeconds(), 0); - - // We can also call it as many times as we want with it being - // considered a failure. - EXPECT_TRUE(metrics_utils::MonotonicDurationHelper( - &fake_system_state, &storage, &duration)); - EXPECT_EQ(duration.InSeconds(), 0); - EXPECT_TRUE(metrics_utils::MonotonicDurationHelper( - &fake_system_state, &storage, &duration)); - EXPECT_EQ(duration.InSeconds(), 0); - - // Advance the clock one second, then we should get 1 sec on the - // next call and 0 sec on the subsequent call. - fake_clock.SetMonotonicTime(base::Time::FromInternalValue(2000000)); - EXPECT_TRUE(metrics_utils::MonotonicDurationHelper( - &fake_system_state, &storage, &duration)); - EXPECT_EQ(duration.InSeconds(), 1); - EXPECT_TRUE(metrics_utils::MonotonicDurationHelper( - &fake_system_state, &storage, &duration)); - EXPECT_EQ(duration.InSeconds(), 0); - - // Advance clock two seconds and we should get 2 sec and then 0 sec. - fake_clock.SetMonotonicTime(base::Time::FromInternalValue(4000000)); - EXPECT_TRUE(metrics_utils::MonotonicDurationHelper( - &fake_system_state, &storage, &duration)); - EXPECT_EQ(duration.InSeconds(), 2); - EXPECT_TRUE(metrics_utils::MonotonicDurationHelper( - &fake_system_state, &storage, &duration)); - EXPECT_EQ(duration.InSeconds(), 0); -} - } // namespace metrics_utils } // namespace chromeos_update_engine diff --git a/mock_boot_control_hal.h b/mock_boot_control_hal.h deleted file mode 100644 index 4e9cb508..00000000 --- a/mock_boot_control_hal.h +++ /dev/null @@ -1,49 +0,0 @@ -// -// Copyright (C) 2018 The Android Open Source Project -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -#include -#include - -#include - -namespace chromeos_update_engine { - -class MockBootControlHal - : public ::android::hardware::boot::V1_0::IBootControl { - public: - MOCK_METHOD0(getNumberSlots, ::android::hardware::Return()); - MOCK_METHOD0(getCurrentSlot, ::android::hardware::Return()); - MOCK_METHOD1(markBootSuccessful, - ::android::hardware::Return(markBootSuccessful_cb)); - MOCK_METHOD2(setActiveBootSlot, - ::android::hardware::Return(uint32_t, - setActiveBootSlot_cb)); - MOCK_METHOD2(setSlotAsUnbootable, - ::android::hardware::Return(uint32_t, - setSlotAsUnbootable_cb)); - MOCK_METHOD1( - isSlotBootable, - ::android::hardware::Return<::android::hardware::boot::V1_0::BoolResult>( - uint32_t)); - MOCK_METHOD1( - isSlotMarkedSuccessful, - ::android::hardware::Return<::android::hardware::boot::V1_0::BoolResult>( - uint32_t)); - MOCK_METHOD2(getSuffix, - ::android::hardware::Return(uint32_t, getSuffix_cb)); -}; - -} // namespace chromeos_update_engine diff --git a/mock_libcurl_http_fetcher.h b/mock_libcurl_http_fetcher.h index a8ef0f44..a14f9530 100644 --- a/mock_libcurl_http_fetcher.h +++ b/mock_libcurl_http_fetcher.h @@ -19,7 +19,7 @@ #include -#include "update_engine/connection_manager_interface.h" +#include "update_engine/libcurl_http_fetcher.h" namespace chromeos_update_engine { diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc index d2ed24ab..e6ec67ae 100644 --- a/payload_consumer/delta_performer.cc +++ b/payload_consumer/delta_performer.cc @@ -41,6 +41,7 @@ #include #include "update_engine/common/constants.h" +#include "update_engine/common/download_action.h" #include "update_engine/common/error_code.h" #include "update_engine/common/error_code_utils.h" #include "update_engine/common/hardware_interface.h" @@ -50,7 +51,6 @@ #include "update_engine/payload_consumer/bzip_extent_writer.h" #include "update_engine/payload_consumer/cached_file_descriptor.h" #include "update_engine/payload_consumer/certificate_parser_interface.h" -#include "update_engine/payload_consumer/download_action.h" #include "update_engine/payload_consumer/extent_reader.h" #include "update_engine/payload_consumer/extent_writer.h" #include "update_engine/payload_consumer/partition_update_generator_interface.h" @@ -497,6 +497,7 @@ MetadataParseResult DeltaPerformer::ParsePayloadMetadata( << "Trusting metadata size in payload = " << metadata_size_; } + // NOLINTNEXTLINE(whitespace/braces) auto [payload_verifier, perform_verification] = CreatePayloadVerifier(); if (!payload_verifier) { LOG(ERROR) << "Failed to create payload verifier."; @@ -1809,6 +1810,7 @@ ErrorCode DeltaPerformer::VerifyPayload( ErrorCode::kPayloadHashMismatchError, payload_hash_calculator_.raw_hash() == update_check_response_hash); + // NOLINTNEXTLINE(whitespace/braces) auto [payload_verifier, perform_verification] = CreatePayloadVerifier(); if (!perform_verification) { LOG(WARNING) << "Not verifying signed delta payload -- missing public key."; diff --git a/payload_consumer/delta_performer_fuzzer.cc b/payload_consumer/delta_performer_fuzzer.cc index 73082c4c..0ce50819 100644 --- a/payload_consumer/delta_performer_fuzzer.cc +++ b/payload_consumer/delta_performer_fuzzer.cc @@ -18,11 +18,11 @@ #include #include +#include "update_engine/common/download_action.h" #include "update_engine/common/fake_boot_control.h" #include "update_engine/common/fake_hardware.h" #include "update_engine/common/prefs.h" #include "update_engine/payload_consumer/delta_performer.h" -#include "update_engine/payload_consumer/download_action.h" #include "update_engine/payload_consumer/install_plan.h" namespace chromeos_update_engine { diff --git a/payload_consumer/delta_performer_integration_test.cc b/payload_consumer/delta_performer_integration_test.cc index 74ddd27d..374131ef 100644 --- a/payload_consumer/delta_performer_integration_test.cc +++ b/payload_consumer/delta_performer_integration_test.cc @@ -37,12 +37,12 @@ #include "update_engine/common/fake_boot_control.h" #include "update_engine/common/fake_hardware.h" #include "update_engine/common/fake_prefs.h" +#include "update_engine/common/hardware_interface.h" +#include "update_engine/common/mock_download_action.h" #include "update_engine/common/mock_prefs.h" #include "update_engine/common/test_utils.h" #include "update_engine/common/utils.h" -#include "update_engine/hardware_android.h" #include "update_engine/payload_consumer/install_plan.h" -#include "update_engine/payload_consumer/mock_download_action.h" #include "update_engine/payload_consumer/payload_constants.h" #include "update_engine/payload_consumer/payload_metadata.h" #include "update_engine/payload_consumer/payload_verifier.h" diff --git a/payload_consumer/delta_performer_unittest.cc b/payload_consumer/delta_performer_unittest.cc index 65b9dac9..92698828 100644 --- a/payload_consumer/delta_performer_unittest.cc +++ b/payload_consumer/delta_performer_unittest.cc @@ -41,10 +41,10 @@ #include "update_engine/common/fake_hardware.h" #include "update_engine/common/fake_prefs.h" #include "update_engine/common/hardware_interface.h" +#include "update_engine/common/mock_download_action.h" #include "update_engine/common/test_utils.h" #include "update_engine/common/utils.h" #include "update_engine/payload_consumer/fake_file_descriptor.h" -#include "update_engine/payload_consumer/mock_download_action.h" #include "update_engine/payload_consumer/payload_constants.h" #include "update_engine/payload_consumer/payload_metadata.h" #include "update_engine/payload_generator/bzip.h" diff --git a/mock_file_writer.h b/payload_consumer/mock_file_writer.h similarity index 100% rename from mock_file_writer.h rename to payload_consumer/mock_file_writer.h diff --git a/payload_consumer/postinstall_runner_action_unittest.cc b/payload_consumer/postinstall_runner_action_unittest.cc index 5910c239..cce86e9b 100644 --- a/payload_consumer/postinstall_runner_action_unittest.cc +++ b/payload_consumer/postinstall_runner_action_unittest.cc @@ -45,7 +45,7 @@ #include "update_engine/common/subprocess.h" #include "update_engine/common/test_utils.h" #include "update_engine/common/utils.h" -#include "update_engine/mock_payload_state.h" +#include "update_engine/cros/mock_payload_state.h" using brillo::MessageLoop; using chromeos_update_engine::test_utils::ScopedLoopbackDeviceBinder; diff --git a/payload_generator/generate_delta_main.cc b/payload_generator/generate_delta_main.cc index 1944847e..29ec290d 100644 --- a/payload_generator/generate_delta_main.cc +++ b/payload_generator/generate_delta_main.cc @@ -29,13 +29,13 @@ #include #include +#include "update_engine/common/download_action.h" #include "update_engine/common/fake_boot_control.h" #include "update_engine/common/fake_hardware.h" #include "update_engine/common/file_fetcher.h" #include "update_engine/common/prefs.h" #include "update_engine/common/terminator.h" #include "update_engine/common/utils.h" -#include "update_engine/payload_consumer/download_action.h" #include "update_engine/payload_consumer/filesystem_verifier_action.h" #include "update_engine/payload_consumer/payload_constants.h" #include "update_engine/payload_generator/delta_diff_generator.h" diff --git a/power_manager_android.cc b/power_manager_android.cc deleted file mode 100644 index 63a0351e..00000000 --- a/power_manager_android.cc +++ /dev/null @@ -1,36 +0,0 @@ -// -// Copyright (C) 2016 The Android Open Source Project -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -#include "update_engine/power_manager_android.h" - -#include - -#include - -namespace chromeos_update_engine { - -namespace power_manager { -std::unique_ptr CreatePowerManager() { - return std::unique_ptr(new PowerManagerAndroid()); -} -} // namespace power_manager - -bool PowerManagerAndroid::RequestReboot() { - LOG(WARNING) << "PowerManager not implemented."; - return false; -} - -} // namespace chromeos_update_engine diff --git a/power_manager_android.h b/power_manager_android.h deleted file mode 100644 index 86399abc..00000000 --- a/power_manager_android.h +++ /dev/null @@ -1,40 +0,0 @@ -// -// Copyright (C) 2016 The Android Open Source Project -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -#ifndef UPDATE_ENGINE_POWER_MANAGER_ANDROID_H_ -#define UPDATE_ENGINE_POWER_MANAGER_ANDROID_H_ - -#include - -#include "update_engine/power_manager_interface.h" - -namespace chromeos_update_engine { - -class PowerManagerAndroid : public PowerManagerInterface { - public: - PowerManagerAndroid() = default; - ~PowerManagerAndroid() override = default; - - // PowerManagerInterface overrides. - bool RequestReboot() override; - - private: - DISALLOW_COPY_AND_ASSIGN(PowerManagerAndroid); -}; - -} // namespace chromeos_update_engine - -#endif // UPDATE_ENGINE_POWER_MANAGER_ANDROID_H_ diff --git a/update_boot_flags_action_unittest.cc b/update_boot_flags_action_unittest.cc index 1b2bfa5a..26cbe909 100644 --- a/update_boot_flags_action_unittest.cc +++ b/update_boot_flags_action_unittest.cc @@ -22,18 +22,17 @@ #include #include -#include "update_engine/fake_system_state.h" +#include "update_engine/common/fake_boot_control.h" namespace chromeos_update_engine { class UpdateBootFlagsActionTest : public ::testing::Test { - public: - FakeSystemState fake_system_state_; + protected: + FakeBootControl boot_control_; }; TEST_F(UpdateBootFlagsActionTest, SimpleTest) { - auto boot_control = fake_system_state_.fake_boot_control(); - auto action = std::make_unique(boot_control); + auto action = std::make_unique(&boot_control_); ActionProcessor processor; processor.EnqueueAction(std::move(action)); @@ -49,9 +48,8 @@ TEST_F(UpdateBootFlagsActionTest, DoubleActionTest) { UpdateBootFlagsAction::updated_boot_flags_ = false; UpdateBootFlagsAction::is_running_ = false; - auto boot_control = fake_system_state_.fake_boot_control(); - auto action1 = std::make_unique(boot_control); - auto action2 = std::make_unique(boot_control); + auto action1 = std::make_unique(&boot_control_); + auto action2 = std::make_unique(&boot_control_); ActionProcessor processor1, processor2; processor1.EnqueueAction(std::move(action1)); processor2.EnqueueAction(std::move(action2)); diff --git a/update_manager/boxed_value.cc b/update_manager/boxed_value.cc index ba84a414..907eb959 100644 --- a/update_manager/boxed_value.cc +++ b/update_manager/boxed_value.cc @@ -25,8 +25,8 @@ #include #include +#include "update_engine/common/connection_utils.h" #include "update_engine/common/utils.h" -#include "update_engine/connection_utils.h" #include "update_engine/update_manager/rollback_prefs.h" #include "update_engine/update_manager/shill_provider.h" #include "update_engine/update_manager/updater_provider.h" diff --git a/update_manager/real_device_policy_provider.cc b/update_manager/real_device_policy_provider.cc index 0aaf20ee..05091d9d 100644 --- a/update_manager/real_device_policy_provider.cc +++ b/update_manager/real_device_policy_provider.cc @@ -25,8 +25,8 @@ #include #include +#include "update_engine/common/connection_utils.h" #include "update_engine/common/utils.h" -#include "update_engine/connection_utils.h" #include "update_engine/update_manager/generic_variables.h" using base::TimeDelta; diff --git a/update_manager/real_device_policy_provider_unittest.cc b/update_manager/real_device_policy_provider_unittest.cc index 4699ad18..32396d64 100644 --- a/update_manager/real_device_policy_provider_unittest.cc +++ b/update_manager/real_device_policy_provider_unittest.cc @@ -34,7 +34,7 @@ #include "update_engine/common/test_utils.h" #if USE_DBUS -#include "update_engine/dbus_test_utils.h" +#include "update_engine/cros/dbus_test_utils.h" #endif // USE_DBUS #include "update_engine/update_manager/umtest_utils.h" diff --git a/update_manager/real_shill_provider.h b/update_manager/real_shill_provider.h index ec5c5701..baa2cdc6 100644 --- a/update_manager/real_shill_provider.h +++ b/update_manager/real_shill_provider.h @@ -28,7 +28,7 @@ #include #include "update_engine/common/clock_interface.h" -#include "update_engine/shill_proxy_interface.h" +#include "update_engine/cros/shill_proxy_interface.h" #include "update_engine/update_manager/generic_variables.h" #include "update_engine/update_manager/shill_provider.h" diff --git a/update_manager/real_shill_provider_unittest.cc b/update_manager/real_shill_provider_unittest.cc index 505f2f80..682c2330 100644 --- a/update_manager/real_shill_provider_unittest.cc +++ b/update_manager/real_shill_provider_unittest.cc @@ -29,8 +29,8 @@ #include "update_engine/common/fake_clock.h" #include "update_engine/common/test_utils.h" -#include "update_engine/dbus_test_utils.h" -#include "update_engine/fake_shill_proxy.h" +#include "update_engine/cros/dbus_test_utils.h" +#include "update_engine/cros/fake_shill_proxy.h" #include "update_engine/update_manager/umtest_utils.h" using base::Time; diff --git a/update_manager/real_system_provider.cc b/update_manager/real_system_provider.cc index 9b5bc02e..4e88b07f 100644 --- a/update_manager/real_system_provider.cc +++ b/update_manager/real_system_provider.cc @@ -27,7 +27,7 @@ #include "update_engine/common/boot_control_interface.h" #include "update_engine/common/hardware_interface.h" #include "update_engine/common/utils.h" -#include "update_engine/omaha_request_params.h" +#include "update_engine/cros/omaha_request_params.h" #include "update_engine/update_manager/generic_variables.h" #include "update_engine/update_manager/variable.h" diff --git a/update_manager/real_system_provider.h b/update_manager/real_system_provider.h index 0e689977..ffa1467b 100644 --- a/update_manager/real_system_provider.h +++ b/update_manager/real_system_provider.h @@ -22,7 +22,7 @@ #include -#include "update_engine/system_state.h" +#include "update_engine/common/system_state.h" #include "update_engine/update_manager/system_provider.h" namespace org { diff --git a/update_manager/real_system_provider_unittest.cc b/update_manager/real_system_provider_unittest.cc index 97571466..8add6906 100644 --- a/update_manager/real_system_provider_unittest.cc +++ b/update_manager/real_system_provider_unittest.cc @@ -24,7 +24,7 @@ #include "update_engine/common/fake_boot_control.h" #include "update_engine/common/fake_hardware.h" -#include "update_engine/fake_system_state.h" +#include "update_engine/cros/fake_system_state.h" #include "update_engine/update_manager/umtest_utils.h" #if USE_CHROME_KIOSK_APP #include "kiosk-app/dbus-proxies.h" diff --git a/update_manager/real_updater_provider.cc b/update_manager/real_updater_provider.cc index 1548d574..e975b80e 100644 --- a/update_manager/real_updater_provider.cc +++ b/update_manager/real_updater_provider.cc @@ -29,8 +29,8 @@ #include "update_engine/client_library/include/update_engine/update_status.h" #include "update_engine/common/clock_interface.h" #include "update_engine/common/prefs.h" -#include "update_engine/omaha_request_params.h" -#include "update_engine/update_attempter.h" +#include "update_engine/cros/omaha_request_params.h" +#include "update_engine/cros/update_attempter.h" #include "update_engine/update_status_utils.h" using base::StringPrintf; diff --git a/update_manager/real_updater_provider.h b/update_manager/real_updater_provider.h index 08193577..a32e7e95 100644 --- a/update_manager/real_updater_provider.h +++ b/update_manager/real_updater_provider.h @@ -20,7 +20,7 @@ #include #include -#include "update_engine/system_state.h" +#include "update_engine/common/system_state.h" #include "update_engine/update_manager/generic_variables.h" #include "update_engine/update_manager/updater_provider.h" diff --git a/update_manager/real_updater_provider_unittest.cc b/update_manager/real_updater_provider_unittest.cc index 06808b81..0dc56acf 100644 --- a/update_manager/real_updater_provider_unittest.cc +++ b/update_manager/real_updater_provider_unittest.cc @@ -25,9 +25,9 @@ #include "update_engine/common/fake_clock.h" #include "update_engine/common/fake_prefs.h" -#include "update_engine/fake_system_state.h" -#include "update_engine/mock_update_attempter.h" -#include "update_engine/omaha_request_params.h" +#include "update_engine/cros/fake_system_state.h" +#include "update_engine/cros/mock_update_attempter.h" +#include "update_engine/cros/omaha_request_params.h" #include "update_engine/update_manager/umtest_utils.h" using base::Time; diff --git a/update_manager/shill_provider.h b/update_manager/shill_provider.h index c7bb2e22..ebe7a3ae 100644 --- a/update_manager/shill_provider.h +++ b/update_manager/shill_provider.h @@ -19,7 +19,7 @@ #include -#include "update_engine/connection_utils.h" +#include "update_engine/common/connection_utils.h" #include "update_engine/update_manager/provider.h" #include "update_engine/update_manager/variable.h" diff --git a/update_manager/staging_utils.cc b/update_manager/staging_utils.cc index f4f685c6..e8f07bb2 100644 --- a/update_manager/staging_utils.cc +++ b/update_manager/staging_utils.cc @@ -27,7 +27,7 @@ #include "update_engine/common/constants.h" #include "update_engine/common/hardware_interface.h" #include "update_engine/common/prefs_interface.h" -#include "update_engine/system_state.h" +#include "update_engine/common/system_state.h" using base::TimeDelta; using chromeos_update_engine::kPrefsWallClockStagingWaitPeriod; diff --git a/update_manager/state_factory.cc b/update_manager/state_factory.cc index a0d8a63b..a95a5a8f 100644 --- a/update_manager/state_factory.cc +++ b/update_manager/state_factory.cc @@ -25,7 +25,7 @@ #include "update_engine/common/clock_interface.h" #if USE_DBUS -#include "update_engine/dbus_connection.h" +#include "update_engine/cros/dbus_connection.h" #endif // USE_DBUS #include "update_engine/update_manager/fake_shill_provider.h" #include "update_engine/update_manager/real_config_provider.h" @@ -36,7 +36,7 @@ #include "update_engine/update_manager/real_time_provider.h" #include "update_engine/update_manager/real_updater_provider.h" #if USE_SHILL -#include "update_engine/shill_proxy.h" +#include "update_engine/cros/shill_proxy.h" #include "update_engine/update_manager/real_shill_provider.h" #endif // USE_SHILL diff --git a/update_manager/state_factory.h b/update_manager/state_factory.h index 1c1c1d95..ac3bf6b9 100644 --- a/update_manager/state_factory.h +++ b/update_manager/state_factory.h @@ -17,7 +17,7 @@ #ifndef UPDATE_ENGINE_UPDATE_MANAGER_STATE_FACTORY_H_ #define UPDATE_ENGINE_UPDATE_MANAGER_STATE_FACTORY_H_ -#include "update_engine/system_state.h" +#include "update_engine/common/system_state.h" #include "update_engine/update_manager/state.h" namespace org { From a1e7c8f0e885b6fdb89017fbffac2e0e93e0c035 Mon Sep 17 00:00:00 2001 From: Tianjie Date: Thu, 22 Oct 2020 17:16:04 -0700 Subject: [PATCH 439/624] Support host simulation of partial update There were two problems for partial update's host verification: 1. We cannot generate copy commands for partitions not included in the payload on host side. 2. Partial update should always be delta updates, while the logic was missing on host side. Address both issues in this cl. Bug: 171519321 Test: delta_generator --is_partial_update true --in_file=payload.bin \ --partition_names=product:system:system_ext:vbmeta_system \ --new_partitions=/tmp/1:/tmp/2:/tmp/3:/tmp/4 --major_version=2 Change-Id: I87c1b162d1688c04be0dd81566966eced3690ca1 --- .../partition_update_generator_android.cc | 5 +++++ payload_generator/generate_delta_main.cc | 15 +++++++++------ 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/payload_consumer/partition_update_generator_android.cc b/payload_consumer/partition_update_generator_android.cc index 25771e16..44671828 100644 --- a/payload_consumer/partition_update_generator_android.cc +++ b/payload_consumer/partition_update_generator_android.cc @@ -41,6 +41,11 @@ bool PartitionUpdateGeneratorAndroid:: BootControlInterface::Slot target_slot, const std::set& partitions_in_payload, std::vector* update_list) { +#ifndef __ANDROID__ + // Skip copying partitions for host verification. + return true; +#endif + auto ab_partitions = GetAbPartitionsOnDevice(); if (ab_partitions.empty()) { LOG(ERROR) << "Failed to load static a/b partitions"; diff --git a/payload_generator/generate_delta_main.cc b/payload_generator/generate_delta_main.cc index 2e32f1bf..8cda9d19 100644 --- a/payload_generator/generate_delta_main.cc +++ b/payload_generator/generate_delta_main.cc @@ -183,8 +183,11 @@ bool ApplyPayload(const string& payload_file, install_plan.source_slot = config.is_delta ? 0 : BootControlInterface::kInvalidSlot; install_plan.target_slot = 1; - payload.type = - config.is_delta ? InstallPayloadType::kDelta : InstallPayloadType::kFull; + // For partial updates, we always write kDelta to the payload. Make it + // consistent for host simulation. + payload.type = config.is_delta || config.is_partial_update + ? InstallPayloadType::kDelta + : InstallPayloadType::kFull; payload.size = utils::FileSize(payload_file); // TODO(senj): This hash is only correct for unsigned payload, need to support // signed payload using PayloadSigner. @@ -576,6 +579,10 @@ int Main(int argc, char** argv) { } } + if (FLAGS_is_partial_update) { + payload_config.is_partial_update = true; + } + if (!FLAGS_in_file.empty()) { return ApplyPayload(FLAGS_in_file, payload_config) ? 0 : 1; } @@ -604,10 +611,6 @@ int Main(int argc, char** argv) { CHECK(payload_config.target.ValidateDynamicPartitionMetadata()); } - if (FLAGS_is_partial_update) { - payload_config.is_partial_update = true; - } - CHECK(!FLAGS_out_file.empty()); payload_config.rootfs_partition_size = FLAGS_rootfs_partition_size; From bdd98f9a8f9bb7a72e987de49513964961d6899b Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Tue, 3 Nov 2020 14:38:11 -0800 Subject: [PATCH 440/624] update_engine: Remove android specific code from cros These codes are not compiled in aosp. BUG=b:171829801 TEST=None Change-Id: I81fd8202d3fb5f86775a6efe202f7f4f86ae1089 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2518385 Commit-Queue: Amin Hassani Commit-Queue: Jae Hoon Kim Tested-by: Amin Hassani Auto-Submit: Amin Hassani Reviewed-by: Jae Hoon Kim --- .../omaha_response_handler_action_unittest.cc | 22 ------------------- cros/update_attempter.cc | 8 ------- 2 files changed, 30 deletions(-) diff --git a/cros/omaha_response_handler_action_unittest.cc b/cros/omaha_response_handler_action_unittest.cc index 8da32059..b05660c4 100644 --- a/cros/omaha_response_handler_action_unittest.cc +++ b/cros/omaha_response_handler_action_unittest.cc @@ -448,11 +448,6 @@ TEST_F(OmahaResponseHandlerActionTest, fake_system_state_.fake_hardware()->SetIsOfficialBuild(false); params.set_root(tempdir.GetPath().value()); params.set_current_channel("canary-channel"); - // The ImageProperties in Android uses prefs to store MutableImageProperties. -#ifdef __ANDROID__ - EXPECT_CALL(*fake_system_state_.mock_prefs(), SetBoolean(_, true)) - .WillOnce(Return(true)); -#endif // __ANDROID__ EXPECT_TRUE(params.SetTargetChannel("stable-channel", true, nullptr)); params.UpdateDownloadChannel(); params.set_app_version("2.0.0.0"); @@ -481,12 +476,6 @@ TEST_F(OmahaResponseHandlerActionTest, fake_system_state_.fake_hardware()->SetIsOfficialBuild(false); params.set_root(tempdir.GetPath().value()); params.set_current_channel("canary-channel"); - // The |ImageProperties| in Android uses prefs to store - // |MutableImageProperties|. -#ifdef __ANDROID__ - EXPECT_CALL(*fake_system_state_.mock_prefs(), SetBoolean(_, true)) - .WillOnce(Return(true)); -#endif // __ANDROID__ EXPECT_TRUE(params.SetTargetChannel("stable-channel", false, nullptr)); params.UpdateDownloadChannel(); params.set_app_version("2.0.0.0"); @@ -515,12 +504,6 @@ TEST_F(OmahaResponseHandlerActionTest, fake_system_state_.fake_hardware()->SetIsOfficialBuild(false); params.set_root(tempdir.GetPath().value()); params.set_current_channel("beta-channel"); - // The |ImageProperties| in Android uses prefs to store - // |MutableImageProperties|. -#ifdef __ANDROID__ - EXPECT_CALL(*fake_system_state_.mock_prefs(), SetBoolean(_, true)) - .WillOnce(Return(true)); -#endif // __ANDROID__ EXPECT_TRUE(params.SetTargetChannel("stable-channel", true, nullptr)); params.UpdateDownloadChannel(); params.set_app_version("12345.48.0.0"); @@ -675,11 +658,6 @@ TEST_F(OmahaResponseHandlerActionTest, fake_system_state_.fake_hardware()->SetIsOfficialBuild(false); params.set_root(tempdir.GetPath().value()); params.set_current_channel("stable-channel"); - // The ImageProperties in Android uses prefs to store MutableImageProperties. -#ifdef __ANDROID__ - EXPECT_CALL(*fake_system_state_.mock_prefs(), SetBoolean(_, false)) - .WillOnce(Return(true)); -#endif // __ANDROID__ EXPECT_TRUE(params.SetTargetChannel("canary-channel", false, nullptr)); params.UpdateDownloadChannel(); params.set_app_version("1.0.0.0"); diff --git a/cros/update_attempter.cc b/cros/update_attempter.cc index e8cb291e..e9098ded 100644 --- a/cros/update_attempter.cc +++ b/cros/update_attempter.cc @@ -981,14 +981,6 @@ bool UpdateAttempter::CheckForInstall(const vector& dlc_ids, } bool UpdateAttempter::RebootIfNeeded() { -#ifdef __ANDROID__ - if (status_ != UpdateStatus::UPDATED_NEED_REBOOT) { - LOG(INFO) << "Reboot requested, but status is " - << UpdateStatusToString(status_) << ", so not rebooting."; - return false; - } -#endif // __ANDROID__ - if (system_state_->power_manager()->RequestReboot()) return true; From cb210c56abff827acf4a13635b48f554f8d71376 Mon Sep 17 00:00:00 2001 From: Jae Hoon Kim Date: Tue, 3 Nov 2020 13:25:06 -0800 Subject: [PATCH 441/624] update_engine: Delete UpdateDownloadAllowed Policy Unused policy in update_engine code. |UpdateDownloadAllowed| was a work in progress that was left over. Network connection is checked in a different flow. BUG=b:171829801 TEST=FEATURES=test emerge-$B update_engine Change-Id: Ic726efd066c270be7ca0b594d5627ee884893c27 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2518383 Tested-by: Jae Hoon Kim Commit-Queue: Amin Hassani Reviewed-by: Amin Hassani --- update_manager/android_things_policy.cc | 10 -- update_manager/android_things_policy.h | 6 -- update_manager/chromeos_policy.cc | 82 ----------------- update_manager/chromeos_policy.h | 5 - update_manager/chromeos_policy_unittest.cc | 101 --------------------- update_manager/default_policy.cc | 8 -- update_manager/default_policy.h | 5 - update_manager/mock_policy.h | 5 - update_manager/policy.h | 14 --- update_manager/policy_utils.h | 7 -- 10 files changed, 243 deletions(-) diff --git a/update_manager/android_things_policy.cc b/update_manager/android_things_policy.cc index c4fa75a4..57591455 100644 --- a/update_manager/android_things_policy.cc +++ b/update_manager/android_things_policy.cc @@ -157,16 +157,6 @@ EvalStatus AndroidThingsPolicy::UpdateCanStart(EvaluationContext* ec, return EvalStatus::kSucceeded; } -// Always returns |EvalStatus::kSucceeded| -EvalStatus AndroidThingsPolicy::UpdateDownloadAllowed(EvaluationContext* ec, - State* state, - string* error, - bool* result) const { - // By default, we allow updates. - *result = true; - return EvalStatus::kSucceeded; -} - // P2P is always disabled. Returns |result|==|false| and // |EvalStatus::kSucceeded| EvalStatus AndroidThingsPolicy::P2PEnabled(EvaluationContext* ec, diff --git a/update_manager/android_things_policy.h b/update_manager/android_things_policy.h index 9fd8bc44..3b273ca8 100644 --- a/update_manager/android_things_policy.h +++ b/update_manager/android_things_policy.h @@ -53,12 +53,6 @@ class AndroidThingsPolicy : public Policy { UpdateDownloadParams* result, UpdateState update_state) const override; - // Always returns |EvalStatus::kSucceeded| - EvalStatus UpdateDownloadAllowed(EvaluationContext* ec, - State* state, - std::string* error, - bool* result) const override; - // P2P is always disabled. Returns |result|==|false| and // |EvalStatus::kSucceeded| EvalStatus P2PEnabled(EvaluationContext* ec, diff --git a/update_manager/chromeos_policy.cc b/update_manager/chromeos_policy.cc index 4c651b7d..262987e5 100644 --- a/update_manager/chromeos_policy.cc +++ b/update_manager/chromeos_policy.cc @@ -465,88 +465,6 @@ EvalStatus ChromeOSPolicy::UpdateCanStart( return EvalStatus::kSucceeded; } -// TODO(garnold) Logic in this method is based on -// ConnectionManager::IsUpdateAllowedOver(); be sure to deprecate the latter. -// -// TODO(garnold) The current logic generally treats the list of allowed -// connections coming from the device policy as an allowlist, meaning that it -// can only be used for enabling connections, but not disable them. Further, -// certain connection types cannot be enabled even by policy. -// In effect, the only thing that device policy can change is to enable -// updates over a cellular network (disabled by default). We may want to -// revisit this semantics, allowing greater flexibility in defining specific -// permissions over all types of networks. -EvalStatus ChromeOSPolicy::UpdateDownloadAllowed(EvaluationContext* ec, - State* state, - string* error, - bool* result) const { - // Get the current connection type. - ShillProvider* const shill_provider = state->shill_provider(); - const ConnectionType* conn_type_p = - ec->GetValue(shill_provider->var_conn_type()); - POLICY_CHECK_VALUE_AND_FAIL(conn_type_p, error); - ConnectionType conn_type = *conn_type_p; - - // If we're tethering, treat it as a cellular connection. - if (conn_type != ConnectionType::kCellular) { - const ConnectionTethering* conn_tethering_p = - ec->GetValue(shill_provider->var_conn_tethering()); - POLICY_CHECK_VALUE_AND_FAIL(conn_tethering_p, error); - if (*conn_tethering_p == ConnectionTethering::kConfirmed) - conn_type = ConnectionType::kCellular; - } - - // By default, we allow updates for all connection types, with exceptions as - // noted below. This also determines whether a device policy can override the - // default. - *result = true; - bool device_policy_can_override = false; - switch (conn_type) { - case ConnectionType::kCellular: - *result = false; - device_policy_can_override = true; - break; - - case ConnectionType::kUnknown: - if (error) - *error = "Unknown connection type"; - return EvalStatus::kFailed; - - default: - break; // Nothing to do. - } - - // If update is allowed, we're done. - if (*result) - return EvalStatus::kSucceeded; - - // Check whether the device policy specifically allows this connection. - if (device_policy_can_override) { - DevicePolicyProvider* const dp_provider = state->device_policy_provider(); - const bool* device_policy_is_loaded_p = - ec->GetValue(dp_provider->var_device_policy_is_loaded()); - if (device_policy_is_loaded_p && *device_policy_is_loaded_p) { - const set* allowed_conn_types_p = - ec->GetValue(dp_provider->var_allowed_connection_types_for_update()); - if (allowed_conn_types_p) { - if (allowed_conn_types_p->count(conn_type)) { - *result = true; - return EvalStatus::kSucceeded; - } - } else if (conn_type == ConnectionType::kCellular) { - // Local user settings can allow updates over cellular iff a policy was - // loaded but no allowed connections were specified in it. - const bool* update_over_cellular_allowed_p = - ec->GetValue(state->updater_provider()->var_cellular_enabled()); - if (update_over_cellular_allowed_p && *update_over_cellular_allowed_p) - *result = true; - } - } - } - - return (*result ? EvalStatus::kSucceeded : EvalStatus::kAskMeAgainLater); -} - EvalStatus ChromeOSPolicy::P2PEnabled(EvaluationContext* ec, State* state, string* error, diff --git a/update_manager/chromeos_policy.h b/update_manager/chromeos_policy.h index ded51646..3c196dac 100644 --- a/update_manager/chromeos_policy.h +++ b/update_manager/chromeos_policy.h @@ -72,11 +72,6 @@ class ChromeOSPolicy : public Policy { UpdateDownloadParams* result, UpdateState update_state) const override; - EvalStatus UpdateDownloadAllowed(EvaluationContext* ec, - State* state, - std::string* error, - bool* result) const override; - EvalStatus P2PEnabled(EvaluationContext* ec, State* state, std::string* error, diff --git a/update_manager/chromeos_policy_unittest.cc b/update_manager/chromeos_policy_unittest.cc index 996db2bf..53e8a52f 100644 --- a/update_manager/chromeos_policy_unittest.cc +++ b/update_manager/chromeos_policy_unittest.cc @@ -1406,107 +1406,6 @@ TEST_F(UmChromeOSPolicyTest, EXPECT_FALSE(result.do_increment_failures); } -TEST_F(UmChromeOSPolicyTest, UpdateDownloadAllowedEthernetDefault) { - // Ethernet is always allowed. - - fake_state_.shill_provider()->var_conn_type()->reset( - new ConnectionType(ConnectionType::kEthernet)); - - bool result; - ExpectPolicyStatus( - EvalStatus::kSucceeded, &Policy::UpdateDownloadAllowed, &result); - EXPECT_TRUE(result); -} - -TEST_F(UmChromeOSPolicyTest, UpdateDownloadAllowedWifiDefault) { - // Wifi is allowed if not tethered. - - fake_state_.shill_provider()->var_conn_type()->reset( - new ConnectionType(ConnectionType::kWifi)); - - bool result; - ExpectPolicyStatus( - EvalStatus::kSucceeded, &Policy::UpdateDownloadAllowed, &result); - EXPECT_TRUE(result); -} - -TEST_F(UmChromeOSPolicyTest, - UpdateCurrentConnectionNotAllowedWifiTetheredDefault) { - // Tethered wifi is not allowed by default. - - fake_state_.shill_provider()->var_conn_type()->reset( - new ConnectionType(ConnectionType::kWifi)); - fake_state_.shill_provider()->var_conn_tethering()->reset( - new ConnectionTethering(ConnectionTethering::kConfirmed)); - - bool result; - ExpectPolicyStatus( - EvalStatus::kAskMeAgainLater, &Policy::UpdateDownloadAllowed, &result); -} - -TEST_F(UmChromeOSPolicyTest, UpdateDownloadAllowedWifiTetheredPolicyOverride) { - // Tethered wifi can be allowed by policy. - - fake_state_.shill_provider()->var_conn_type()->reset( - new ConnectionType(ConnectionType::kWifi)); - fake_state_.shill_provider()->var_conn_tethering()->reset( - new ConnectionTethering(ConnectionTethering::kConfirmed)); - set allowed_connections; - allowed_connections.insert(ConnectionType::kCellular); - fake_state_.device_policy_provider() - ->var_allowed_connection_types_for_update() - ->reset(new set(allowed_connections)); - - bool result; - ExpectPolicyStatus( - EvalStatus::kSucceeded, &Policy::UpdateDownloadAllowed, &result); - EXPECT_TRUE(result); -} - -TEST_F(UmChromeOSPolicyTest, UpdateCurrentConnectionNotAllowedCellularDefault) { - // Cellular is not allowed by default. - - fake_state_.shill_provider()->var_conn_type()->reset( - new ConnectionType(ConnectionType::kCellular)); - - bool result; - ExpectPolicyStatus( - EvalStatus::kAskMeAgainLater, &Policy::UpdateDownloadAllowed, &result); -} - -TEST_F(UmChromeOSPolicyTest, UpdateDownloadAllowedCellularPolicyOverride) { - // Update over cellular can be enabled by policy. - - fake_state_.shill_provider()->var_conn_type()->reset( - new ConnectionType(ConnectionType::kCellular)); - set allowed_connections; - allowed_connections.insert(ConnectionType::kCellular); - fake_state_.device_policy_provider() - ->var_allowed_connection_types_for_update() - ->reset(new set(allowed_connections)); - - bool result; - ExpectPolicyStatus( - EvalStatus::kSucceeded, &Policy::UpdateDownloadAllowed, &result); - EXPECT_TRUE(result); -} - -TEST_F(UmChromeOSPolicyTest, UpdateDownloadAllowedCellularUserOverride) { - // Update over cellular can be enabled by user settings, but only if policy - // is present and does not determine allowed connections. - - fake_state_.shill_provider()->var_conn_type()->reset( - new ConnectionType(ConnectionType::kCellular)); - set allowed_connections; - allowed_connections.insert(ConnectionType::kCellular); - fake_state_.updater_provider()->var_cellular_enabled()->reset(new bool(true)); - - bool result; - ExpectPolicyStatus( - EvalStatus::kSucceeded, &Policy::UpdateDownloadAllowed, &result); - EXPECT_TRUE(result); -} - TEST_F(UmChromeOSPolicyTest, UpdateCanStartAllowedScatteringSupressedDueToP2P) { // The UpdateCanStart policy returns true; scattering should have applied, but // P2P download is allowed. Scattering values are nonetheless returned, and so diff --git a/update_manager/default_policy.cc b/update_manager/default_policy.cc index 7ca414b1..5a381800 100644 --- a/update_manager/default_policy.cc +++ b/update_manager/default_policy.cc @@ -89,14 +89,6 @@ EvalStatus DefaultPolicy::UpdateCanStart(EvaluationContext* ec, return EvalStatus::kSucceeded; } -EvalStatus DefaultPolicy::UpdateDownloadAllowed(EvaluationContext* ec, - State* state, - std::string* error, - bool* result) const { - *result = true; - return EvalStatus::kSucceeded; -} - EvalStatus DefaultPolicy::P2PEnabled(EvaluationContext* ec, State* state, std::string* error, diff --git a/update_manager/default_policy.h b/update_manager/default_policy.h index 1b284f41..006bcb7b 100644 --- a/update_manager/default_policy.h +++ b/update_manager/default_policy.h @@ -83,11 +83,6 @@ class DefaultPolicy : public Policy { UpdateDownloadParams* result, UpdateState update_state) const override; - EvalStatus UpdateDownloadAllowed(EvaluationContext* ec, - State* state, - std::string* error, - bool* result) const override; - EvalStatus P2PEnabled(EvaluationContext* ec, State* state, std::string* error, diff --git a/update_manager/mock_policy.h b/update_manager/mock_policy.h index 46b6c789..183130fb 100644 --- a/update_manager/mock_policy.h +++ b/update_manager/mock_policy.h @@ -46,11 +46,6 @@ class MockPolicy : public Policy { testing::_, testing::_, testing::_, testing::_, testing::_)) .WillByDefault( testing::Invoke(&default_policy_, &DefaultPolicy::UpdateCanStart)); - ON_CALL( - *this, - UpdateDownloadAllowed(testing::_, testing::_, testing::_, testing::_)) - .WillByDefault(testing::Invoke(&default_policy_, - &DefaultPolicy::UpdateDownloadAllowed)); ON_CALL(*this, P2PEnabled(testing::_, testing::_, testing::_, testing::_)) .WillByDefault( testing::Invoke(&default_policy_, &DefaultPolicy::P2PEnabled)); diff --git a/update_manager/policy.h b/update_manager/policy.h index ad6994c6..7543ea9f 100644 --- a/update_manager/policy.h +++ b/update_manager/policy.h @@ -227,9 +227,6 @@ class Policy { if (reinterpret_cast(policy_method) == &Policy::UpdateCanStart) return class_name + "UpdateCanStart"; - if (reinterpret_cast( - policy_method) == &Policy::UpdateDownloadAllowed) - return class_name + "UpdateDownloadAllowed"; if (reinterpret_cast(policy_method) == &Policy::P2PEnabled) return class_name + "P2PEnabled"; @@ -278,17 +275,6 @@ class Policy { UpdateDownloadParams* result, UpdateState update_state) const = 0; - // Checks whether downloading of an update is allowed; currently, this checks - // whether the network connection type is suitable for updating over. May - // consult the shill provider as well as the device policy (if available). - // Returns |EvalStatus::kSucceeded|, setting |result| according to whether or - // not the current connection can be used; on error, returns - // |EvalStatus::kFailed| and sets |error| accordingly. - virtual EvalStatus UpdateDownloadAllowed(EvaluationContext* ec, - State* state, - std::string* error, - bool* result) const = 0; - // Checks whether P2P is enabled. This may consult device policy and other // global settings. virtual EvalStatus P2PEnabled(EvaluationContext* ec, diff --git a/update_manager/policy_utils.h b/update_manager/policy_utils.h index dc606f21..aedb90cb 100644 --- a/update_manager/policy_utils.h +++ b/update_manager/policy_utils.h @@ -92,13 +92,6 @@ class PolicyImplBase : public Policy { return EvalStatus::kContinue; }; - EvalStatus UpdateDownloadAllowed(EvaluationContext* ec, - State* state, - std::string* error, - bool* result) const override { - return EvalStatus::kContinue; - }; - EvalStatus P2PEnabled(EvaluationContext* ec, State* state, std::string* error, From 7d43024dfe2bf06aef98828c010c71beb97feca7 Mon Sep 17 00:00:00 2001 From: Jae Hoon Kim Date: Tue, 3 Nov 2020 14:20:56 -0800 Subject: [PATCH 442/624] update_engine: Delete android things policy Android things policy not used. BUG=b:171829801 TEST=none Change-Id: Ibf14e72ac9061dcad7c4b291474834529681c395 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2518384 Tested-by: Jae Hoon Kim Commit-Queue: Amin Hassani Reviewed-by: Amin Hassani --- update_manager/android_things_policy.cc | 181 ----------------- update_manager/android_things_policy.h | 86 -------- .../android_things_policy_unittest.cc | 188 ------------------ update_manager/policy.h | 4 +- 4 files changed, 1 insertion(+), 458 deletions(-) delete mode 100644 update_manager/android_things_policy.cc delete mode 100644 update_manager/android_things_policy.h delete mode 100644 update_manager/android_things_policy_unittest.cc diff --git a/update_manager/android_things_policy.cc b/update_manager/android_things_policy.cc deleted file mode 100644 index 57591455..00000000 --- a/update_manager/android_things_policy.cc +++ /dev/null @@ -1,181 +0,0 @@ -// -// Copyright (C) 2017 The Android Open Source Project -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -#include "update_engine/update_manager/android_things_policy.h" - -#include -#include -#include - -#include -#include - -#include "update_engine/update_manager/api_restricted_downloads_policy_impl.h" -#include "update_engine/update_manager/enough_slots_ab_updates_policy_impl.h" -#include "update_engine/update_manager/interactive_update_policy_impl.h" -#include "update_engine/update_manager/official_build_check_policy_impl.h" - -using base::Time; -using chromeos_update_engine::ErrorCode; -using std::string; -using std::unique_ptr; -using std::vector; - -namespace chromeos_update_manager { - -unique_ptr GetSystemPolicy() { - return std::make_unique(); -} - -const NextUpdateCheckPolicyConstants - AndroidThingsPolicy::kNextUpdateCheckPolicyConstants = { - .timeout_initial_interval = 7 * 60, - .timeout_periodic_interval = 5 * 60 * 60, - .timeout_max_backoff_interval = 26 * 60 * 60, - .timeout_regular_fuzz = 10 * 60, - .attempt_backoff_max_interval_in_days = 16, - .attempt_backoff_fuzz_in_hours = 12, -}; - -EvalStatus AndroidThingsPolicy::UpdateCheckAllowed( - EvaluationContext* ec, - State* state, - string* error, - UpdateCheckParams* result) const { - // Set the default return values. - result->updates_enabled = true; - result->target_channel.clear(); - result->lts_tag.clear(); - result->target_version_prefix.clear(); - result->rollback_allowed = false; - result->rollback_data_save_requested = false; - result->rollback_allowed_milestones = -1; - result->rollback_on_channel_downgrade = false; - result->interactive = false; - - // Build a list of policies to consult. Note that each policy may modify the - // result structure, even if it signals kContinue. - EnoughSlotsAbUpdatesPolicyImpl enough_slots_ab_updates_policy; - OnlyUpdateOfficialBuildsPolicyImpl only_update_official_builds_policy; - InteractiveUpdatePolicyImpl interactive_update_policy; - NextUpdateCheckTimePolicyImpl next_update_check_time_policy( - kNextUpdateCheckPolicyConstants); - - vector policies_to_consult = { - // Do not perform any updates if there are not enough slots to do - // A/B updates - &enough_slots_ab_updates_policy, - - // Check to see if an interactive update was requested. - &interactive_update_policy, - - // Unofficial builds should not perform periodic update checks. - &only_update_official_builds_policy, - - // Ensure that periodic update checks are timed properly. - &next_update_check_time_policy, - }; - - // Now that the list of policy implementations, and the order to consult them, - // as been setup, do that. If none of the policies make a definitive - // decisions about whether or not to check for updates, then allow the update - // check to happen. - EvalStatus status = ConsultPolicies(policies_to_consult, - &Policy::UpdateCheckAllowed, - ec, - state, - error, - result); - if (status != EvalStatus::kContinue) { - return status; - } else { - // It is time to check for an update. - LOG(INFO) << "Allowing update check."; - return EvalStatus::kSucceeded; - } -} - -// Uses the |UpdateRestrictions| to determine if the download and apply can -// occur at this time. -EvalStatus AndroidThingsPolicy::UpdateCanBeApplied( - EvaluationContext* ec, - State* state, - string* error, - ErrorCode* result, - chromeos_update_engine::InstallPlan* install_plan) const { - // Build a list of policies to consult. Note that each policy may modify the - // result structure, even if it signals kContinue. - ApiRestrictedDownloadsPolicyImpl api_restricted_downloads_policy; - - vector policies_to_consult = { - // Do not apply the update if all updates are restricted by the API. - &api_restricted_downloads_policy, - }; - - // Now that the list of policy implementations, and the order to consult them, - // as been setup, do that. If none of the policies make a definitive - // decisions about whether or not to check for updates, then allow the update - // check to happen. - EvalStatus status = ConsultPolicies(policies_to_consult, - &Policy::UpdateCanBeApplied, - ec, - state, - error, - result, - install_plan); - if (EvalStatus::kContinue != status) { - return status; - } else { - // The update can proceed. - LOG(INFO) << "Allowing update to be applied."; - *result = ErrorCode::kSuccess; - return EvalStatus::kSucceeded; - } -} - -// Always returns |EvalStatus::kSucceeded| -EvalStatus AndroidThingsPolicy::UpdateCanStart(EvaluationContext* ec, - State* state, - string* error, - UpdateDownloadParams* result, - UpdateState update_state) const { - // Update is good to go. - result->update_can_start = true; - return EvalStatus::kSucceeded; -} - -// P2P is always disabled. Returns |result|==|false| and -// |EvalStatus::kSucceeded| -EvalStatus AndroidThingsPolicy::P2PEnabled(EvaluationContext* ec, - State* state, - string* error, - bool* result) const { - *result = false; - return EvalStatus::kSucceeded; -} - -// This will return immediately with |EvalStatus::kSucceeded| and set -// |result|==|false| -EvalStatus AndroidThingsPolicy::P2PEnabledChanged(EvaluationContext* ec, - State* state, - string* error, - bool* result, - bool prev_result) const { - *result = false; - return EvalStatus::kSucceeded; -} - -} // namespace chromeos_update_manager diff --git a/update_manager/android_things_policy.h b/update_manager/android_things_policy.h deleted file mode 100644 index 3b273ca8..00000000 --- a/update_manager/android_things_policy.h +++ /dev/null @@ -1,86 +0,0 @@ -// -// Copyright (C) 2017 The Android Open Source Project -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -#ifndef UPDATE_ENGINE_UPDATE_MANAGER_ANDROID_THINGS_POLICY_H_ -#define UPDATE_ENGINE_UPDATE_MANAGER_ANDROID_THINGS_POLICY_H_ - -#include - -#include "update_engine/update_manager/next_update_check_policy_impl.h" -#include "update_engine/update_manager/policy_utils.h" - -namespace chromeos_update_manager { - -// AndroidThingsPolicy implements the policy-related logic used in -// AndroidThings. -class AndroidThingsPolicy : public Policy { - public: - AndroidThingsPolicy() = default; - ~AndroidThingsPolicy() override = default; - - // Policy overrides. - EvalStatus UpdateCheckAllowed(EvaluationContext* ec, - State* state, - std::string* error, - UpdateCheckParams* result) const override; - - // Uses the |UpdateRestrictions| to determine if the download and apply can - // occur at this time. - EvalStatus UpdateCanBeApplied( - EvaluationContext* ec, - State* state, - std::string* error, - chromeos_update_engine::ErrorCode* result, - chromeos_update_engine::InstallPlan* install_plan) const override; - - // Always returns |EvalStatus::kSucceeded| - EvalStatus UpdateCanStart(EvaluationContext* ec, - State* state, - std::string* error, - UpdateDownloadParams* result, - UpdateState update_state) const override; - - // P2P is always disabled. Returns |result|==|false| and - // |EvalStatus::kSucceeded| - EvalStatus P2PEnabled(EvaluationContext* ec, - State* state, - std::string* error, - bool* result) const override; - - // This will return immediately with |EvalStatus::kSucceeded| and set - // |result|==|false| - EvalStatus P2PEnabledChanged(EvaluationContext* ec, - State* state, - std::string* error, - bool* result, - bool prev_result) const override; - - protected: - // Policy override. - std::string PolicyName() const override { return "AndroidThingsPolicy"; } - - private: - friend class UmAndroidThingsPolicyTest; - FRIEND_TEST(UmAndroidThingsPolicyTest, UpdateCheckAllowedWaitsForTheTimeout); - - static const NextUpdateCheckPolicyConstants kNextUpdateCheckPolicyConstants; - - DISALLOW_COPY_AND_ASSIGN(AndroidThingsPolicy); -}; - -} // namespace chromeos_update_manager - -#endif // UPDATE_ENGINE_UPDATE_MANAGER_ANDROID_THINGS_POLICY_H_ diff --git a/update_manager/android_things_policy_unittest.cc b/update_manager/android_things_policy_unittest.cc deleted file mode 100644 index 6961efcb..00000000 --- a/update_manager/android_things_policy_unittest.cc +++ /dev/null @@ -1,188 +0,0 @@ -// -// Copyright (C) 2017 The Android Open Source Project -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -#include "update_engine/update_manager/android_things_policy.h" - -#include - -#include "update_engine/update_manager/next_update_check_policy_impl.h" -#include "update_engine/update_manager/policy_test_utils.h" - -using base::Time; -using base::TimeDelta; -using chromeos_update_engine::ErrorCode; -using chromeos_update_engine::InstallPlan; - -namespace chromeos_update_manager { - -class UmAndroidThingsPolicyTest : public UmPolicyTestBase { - protected: - UmAndroidThingsPolicyTest() { - policy_ = std::make_unique(); - } - - void SetUpDefaultState() override { - UmPolicyTestBase::SetUpDefaultState(); - - // For the purpose of the tests, this is an official build - fake_state_.system_provider()->var_is_official_build()->reset( - new bool(true)); - // NOLINTNEXTLINE(readability/casting) - fake_state_.system_provider()->var_num_slots()->reset(new unsigned int(2)); - } - - // Configures the policy to return a desired value from UpdateCheckAllowed by - // faking the current wall clock time as needed. Restores the default state. - // This is used when testing policies that depend on this one. - virtual void SetUpdateCheckAllowed(bool allow_check) { - Time next_update_check; - CallMethodWithContext(&NextUpdateCheckTimePolicyImpl::NextUpdateCheckTime, - &next_update_check, - AndroidThingsPolicy::kNextUpdateCheckPolicyConstants); - SetUpDefaultState(); - Time curr_time = next_update_check; - if (allow_check) - curr_time += TimeDelta::FromSeconds(1); - else - curr_time -= TimeDelta::FromSeconds(1); - fake_clock_.SetWallclockTime(curr_time); - } -}; - -TEST_F(UmAndroidThingsPolicyTest, UpdateCheckAllowedWaitsForTheTimeout) { - // We get the next update_check timestamp from the policy's private method - // and then we check the public method respects that value on the normal - // case. - Time next_update_check; - Time last_checked_time = - fake_clock_.GetWallclockTime() + TimeDelta::FromMinutes(1234); - - LOG(INFO) << "last_checked_time: " << last_checked_time; - fake_state_.updater_provider()->var_last_checked_time()->reset( - new Time(last_checked_time)); - CallMethodWithContext(&NextUpdateCheckTimePolicyImpl::NextUpdateCheckTime, - &next_update_check, - AndroidThingsPolicy::kNextUpdateCheckPolicyConstants); - LOG(INFO) << "Next check allowed at: " << next_update_check; - - // Check that the policy blocks until the next_update_check is reached. - SetUpDefaultClock(); - SetUpDefaultState(); - fake_state_.updater_provider()->var_last_checked_time()->reset( - new Time(last_checked_time)); - fake_clock_.SetWallclockTime(next_update_check - TimeDelta::FromSeconds(1)); - - UpdateCheckParams result; - ExpectPolicyStatus( - EvalStatus::kAskMeAgainLater, &Policy::UpdateCheckAllowed, &result); - - SetUpDefaultClock(); - SetUpDefaultState(); - fake_state_.updater_provider()->var_last_checked_time()->reset( - new Time(last_checked_time)); - fake_clock_.SetWallclockTime(next_update_check + TimeDelta::FromSeconds(1)); - ExpectPolicyStatus( - EvalStatus::kSucceeded, &Policy::UpdateCheckAllowed, &result); - EXPECT_TRUE(result.updates_enabled); - EXPECT_FALSE(result.interactive); -} - -TEST_F(UmAndroidThingsPolicyTest, - UpdateCheckAllowedUpdatesDisabledForUnofficialBuilds) { - // UpdateCheckAllowed should return kAskMeAgainLater if this is an unofficial - // build; we don't want periodic update checks on developer images. - - fake_state_.system_provider()->var_is_official_build()->reset( - new bool(false)); - - UpdateCheckParams result; - ExpectPolicyStatus( - EvalStatus::kAskMeAgainLater, &Policy::UpdateCheckAllowed, &result); -} - -TEST_F(UmAndroidThingsPolicyTest, - UpdateCheckAllowedUpdatesDisabledWhenNotEnoughSlotsAbUpdates) { - // UpdateCheckAllowed should return false (kSucceeded) if the image booted - // without enough slots to do A/B updates. - - // NOLINTNEXTLINE(readability/casting) - fake_state_.system_provider()->var_num_slots()->reset(new unsigned int(1)); - - UpdateCheckParams result; - ExpectPolicyStatus( - EvalStatus::kSucceeded, &Policy::UpdateCheckAllowed, &result); - EXPECT_FALSE(result.updates_enabled); -} - -TEST_F(UmAndroidThingsPolicyTest, - UpdateCheckAllowedForcedUpdateRequestedInteractive) { - // UpdateCheckAllowed should return true because a forced update request was - // signaled for an interactive update. - - SetUpdateCheckAllowed(true); - fake_state_.updater_provider()->var_forced_update_requested()->reset( - new UpdateRequestStatus(UpdateRequestStatus::kInteractive)); - - UpdateCheckParams result; - ExpectPolicyStatus( - EvalStatus::kSucceeded, &Policy::UpdateCheckAllowed, &result); - EXPECT_TRUE(result.updates_enabled); - EXPECT_TRUE(result.interactive); -} - -TEST_F(UmAndroidThingsPolicyTest, - UpdateCheckAllowedForcedUpdateRequestedPeriodic) { - // UpdateCheckAllowed should return true because a forced update request was - // signaled for a periodic check. - - SetUpdateCheckAllowed(true); - fake_state_.updater_provider()->var_forced_update_requested()->reset( - new UpdateRequestStatus(UpdateRequestStatus::kPeriodic)); - - UpdateCheckParams result; - ExpectPolicyStatus( - EvalStatus::kSucceeded, &Policy::UpdateCheckAllowed, &result); - EXPECT_TRUE(result.updates_enabled); - EXPECT_FALSE(result.interactive); -} - -TEST_F(UmAndroidThingsPolicyTest, UpdateCanBeAppliedOk) { - // UpdateCanBeApplied should return kSucceeded in the base case - - InstallPlan plan; - ErrorCode result; - ExpectPolicyStatus( - EvalStatus::kSucceeded, &Policy::UpdateCanBeApplied, &result, &plan); - - EXPECT_EQ(ErrorCode::kSuccess, result); -} - -TEST_F(UmAndroidThingsPolicyTest, UpdateCanBeAppliedRestricted) { - // UpdateCanBeApplied should return kOmahaUpdateDeferredPerPolicy in - // when the restricted flag is set in the Updater. - - fake_state_.updater_provider()->var_update_restrictions()->reset( - new UpdateRestrictions(UpdateRestrictions::kRestrictDownloading)); - - InstallPlan plan; - ErrorCode result; - ExpectPolicyStatus( - EvalStatus::kSucceeded, &Policy::UpdateCanBeApplied, &result, &plan); - - EXPECT_EQ(ErrorCode::kOmahaUpdateDeferredPerPolicy, result); -} - -} // namespace chromeos_update_manager diff --git a/update_manager/policy.h b/update_manager/policy.h index 7543ea9f..595fb7cf 100644 --- a/update_manager/policy.h +++ b/update_manager/policy.h @@ -303,9 +303,7 @@ class Policy { DISALLOW_COPY_AND_ASSIGN(Policy); }; -// Get system dependent (Chrome OS vs. Android) policy -// implementation. Implementations can be found in chromeos_policy.cc and -// android_things_policy.cc. +// Get system dependent policy implementation. std::unique_ptr GetSystemPolicy(); } // namespace chromeos_update_manager From 4796ea84c10f04f4c5238c13523f9e9c707d5eb1 Mon Sep 17 00:00:00 2001 From: Kelvin Zhang Date: Mon, 2 Nov 2020 10:54:07 -0500 Subject: [PATCH 443/624] Avoid conflict of self-overlapping ops When generating OTA, self-overlapping SOURCE_COPY operations are allowed.(such as [20-30] -> [25-35]) These operations might cause conflicts during merge, handle them in ConvertToCowOperations. Test: treehugger Change-Id: I4df9ce9e54f8512cae0aa0f4bcf9bcf8c01fd254 --- common/cow_operation_convert.cc | 28 ++++++++++++++++++++---- common/cow_operation_convert_unittest.cc | 18 ++++++++++++++- 2 files changed, 41 insertions(+), 5 deletions(-) diff --git a/common/cow_operation_convert.cc b/common/cow_operation_convert.cc index db17b5fd..6b64a9c7 100644 --- a/common/cow_operation_convert.cc +++ b/common/cow_operation_convert.cc @@ -30,6 +30,7 @@ std::vector ConvertToCowOperations( merge_operations) { ExtentRanges merge_extents; std::vector converted; + ExtentRanges modified_extents; // We want all CowCopy ops to be done first, before any COW_REPLACE happen. // Therefore we add these ops in 2 separate loops. This is because during @@ -42,10 +43,29 @@ std::vector ConvertToCowOperations( merge_extents.AddExtent(merge_op.dst_extent()); const auto& src_extent = merge_op.src_extent(); const auto& dst_extent = merge_op.dst_extent(); - for (uint64_t i = 0; i < src_extent.num_blocks(); i++) { - converted.push_back({CowOperation::CowCopy, - src_extent.start_block() + i, - dst_extent.start_block() + i}); + // Add blocks in reverse order to avoid merge conflicts on self-overlapping + // Ops. + // For example: SOURCE_COPY [20 - 30] -> [25 - 35] If blocks are added in + // forward order, then 20->25 is performed first, destroying block 25, which + // is neede by a later operation. + if (src_extent.start_block() < dst_extent.start_block()) { + for (uint64_t i = src_extent.num_blocks(); i > 0; i--) { + auto src_block = src_extent.start_block() + i - 1; + auto dst_block = dst_extent.start_block() + i - 1; + CHECK(!modified_extents.ContainsBlock(src_block)) + << "block " << src_block << " is modified by previous CowCopy"; + converted.push_back({CowOperation::CowCopy, src_block, dst_block}); + modified_extents.AddBlock(dst_block); + } + } else { + for (uint64_t i = 0; i < src_extent.num_blocks(); i++) { + auto src_block = src_extent.start_block() + i; + auto dst_block = dst_extent.start_block() + i; + CHECK(!modified_extents.ContainsBlock(src_block)) + << "block " << src_block << " is modified by previous CowCopy"; + converted.push_back({CowOperation::CowCopy, src_block, dst_block}); + modified_extents.AddBlock(dst_block); + } } } // COW_REPLACE are added after COW_COPY, because replace might modify blocks diff --git a/common/cow_operation_convert_unittest.cc b/common/cow_operation_convert_unittest.cc index b70dcdfa..93173fe8 100644 --- a/common/cow_operation_convert_unittest.cc +++ b/common/cow_operation_convert_unittest.cc @@ -68,7 +68,6 @@ class CowOperationConvertTest : public testing::Test { EXPECT_FALSE(modified_extents.ContainsBlock(cow_op.src_block)) << "SOURCE_COPY operation " << cow_op << " read from a modified block"; - src_extent_set.SubtractExtent(ExtentForRange(cow_op.src_block, 1)); } EXPECT_TRUE(dst_extent_set.ContainsBlock(cow_op.dst_block)); dst_extent_set.SubtractExtent(ExtentForRange(cow_op.dst_block, 1)); @@ -217,4 +216,21 @@ TEST_F(CowOperationConvertTest, InterleavingSrcExtent) { })); VerifyCowMergeOp(cow_ops); } + +TEST_F(CowOperationConvertTest, SelfOverlappingOperation) { + AddOperation( + &operations_, InstallOperation::SOURCE_COPY, {{20, 10}}, {{25, 10}}); + + AddMergeOperation( + &merge_operations_, CowMergeOperation::COW_COPY, {20, 10}, {25, 10}); + + auto cow_ops = ConvertToCowOperations(operations_, merge_operations_); + // Expect 10 COW_COPY + ASSERT_EQ(cow_ops.size(), 10UL); + ASSERT_TRUE(std::all_of(cow_ops.begin(), cow_ops.end(), [](auto&& cow_op) { + return cow_op.op == CowOperation::CowCopy; + })); + VerifyCowMergeOp(cow_ops); +} + } // namespace chromeos_update_engine From 24599af599acf74b71a555a8eeb827bedcd672b5 Mon Sep 17 00:00:00 2001 From: Kelvin Zhang Date: Tue, 27 Oct 2020 13:44:25 -0400 Subject: [PATCH 444/624] Handle resume of VABC updates by emitting labels To support resuming an update with Virtual AB Compression, we emit labels in between operations. Before writing any data to CowWriter, we emit label 0. After writing all SOURCE_COPY, we emit label 1. Each time we finished writing an InstallOp, we emit a label incremented by 1. When resuming, we pass the label to CowWriter. Test: treehugger Change-Id: I126efc406286a30a6090a9ce14c65a7d273aee56 --- common/constants.cc | 2 + common/constants.h | 1 + payload_consumer/delta_performer.cc | 11 +++ .../delta_performer_integration_test.cc | 2 + payload_consumer/partition_writer.cc | 4 +- payload_consumer/partition_writer.h | 10 +++ .../partition_writer_factory_android.cc | 4 ++ .../partition_writer_factory_chromeos.cc | 2 + payload_consumer/partition_writer_unittest.cc | 8 ++- payload_consumer/vabc_partition_writer.cc | 68 ++++++++++++++++++- payload_consumer/vabc_partition_writer.h | 1 + 11 files changed, 108 insertions(+), 5 deletions(-) diff --git a/common/constants.cc b/common/constants.cc index 8883668a..16485960 100644 --- a/common/constants.cc +++ b/common/constants.cc @@ -98,6 +98,8 @@ const char kPrefsUpdateServerCertificate[] = "update-server-cert"; const char kPrefsUpdateStateNextDataLength[] = "update-state-next-data-length"; const char kPrefsUpdateStateNextDataOffset[] = "update-state-next-data-offset"; const char kPrefsUpdateStateNextOperation[] = "update-state-next-operation"; +const char kPrefsUpdateStatePartitionNextOperation[] = + "update-state-partition-next-operation"; const char kPrefsUpdateStatePayloadIndex[] = "update-state-payload-index"; const char kPrefsUpdateStateSHA256Context[] = "update-state-sha-256-context"; const char kPrefsUpdateStateSignatureBlob[] = "update-state-signature-blob"; diff --git a/common/constants.h b/common/constants.h index 36851020..2a2a62aa 100644 --- a/common/constants.h +++ b/common/constants.h @@ -95,6 +95,7 @@ extern const char kPrefsUpdateServerCertificate[]; extern const char kPrefsUpdateStateNextDataLength[]; extern const char kPrefsUpdateStateNextDataOffset[]; extern const char kPrefsUpdateStateNextOperation[]; +extern const char kPrefsUpdateStatePartitionNextOperation[]; extern const char kPrefsUpdateStatePayloadIndex[]; extern const char kPrefsUpdateStateSHA256Context[]; extern const char kPrefsUpdateStateSignatureBlob[]; diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc index 30bd1ef5..b75c8cfd 100644 --- a/payload_consumer/delta_performer.cc +++ b/payload_consumer/delta_performer.cc @@ -48,6 +48,7 @@ #include "update_engine/common/prefs_interface.h" #include "update_engine/common/subprocess.h" #include "update_engine/common/terminator.h" +#include "update_engine/common/utils.h" #include "update_engine/payload_consumer/bzip_extent_writer.h" #include "update_engine/payload_consumer/cached_file_descriptor.h" #include "update_engine/payload_consumer/certificate_parser_interface.h" @@ -247,6 +248,7 @@ bool DeltaPerformer::OpenCurrentPartition() { install_part, dynamic_control, block_size_, + prefs_, interactive_, IsDynamicPartition(install_part.name)); // Open source fds if we have a delta payload, or for partitions in the @@ -1335,6 +1337,13 @@ bool DeltaPerformer::CanResumeUpdate(PrefsInterface* prefs, next_operation != kUpdateStateOperationInvalid && next_operation > 0)) return false; + int64_t partition_next_op = -1; + if (!(prefs->GetInt64(kPrefsUpdateStatePartitionNextOperation, + &partition_next_op) && + partition_next_op >= 0)) { + return false; + } + string interrupted_hash; if (!(prefs->GetString(kPrefsUpdateCheckResponseHash, &interrupted_hash) && !interrupted_hash.empty() && @@ -1387,6 +1396,7 @@ bool DeltaPerformer::ResetUpdateProgress( prefs->SetString(kPrefsUpdateStateSignatureBlob, ""); prefs->SetInt64(kPrefsManifestMetadataSize, -1); prefs->SetInt64(kPrefsManifestSignatureSize, -1); + prefs->SetInt64(kPrefsUpdateStatePartitionNextOperation, -1); prefs->SetInt64(kPrefsResumedUpdateFailures, 0); prefs->Delete(kPrefsPostInstallSucceeded); prefs->Delete(kPrefsVerityWritten); @@ -1431,6 +1441,7 @@ bool DeltaPerformer::CheckpointUpdateProgress(bool force) { partitions_[partition_index].operations(partition_operation_num); TEST_AND_RETURN_FALSE( prefs_->SetInt64(kPrefsUpdateStateNextDataLength, op.data_length())); + partition_writer_->CheckpointUpdateProgress(partition_operation_num); } else { TEST_AND_RETURN_FALSE( prefs_->SetInt64(kPrefsUpdateStateNextDataLength, 0)); diff --git a/payload_consumer/delta_performer_integration_test.cc b/payload_consumer/delta_performer_integration_test.cc index 374131ef..e83a20d0 100644 --- a/payload_consumer/delta_performer_integration_test.cc +++ b/payload_consumer/delta_performer_integration_test.cc @@ -718,6 +718,8 @@ static void ApplyDeltaFile(bool full_kernel, .WillOnce(Return(true)); EXPECT_CALL(prefs, SetInt64(kPrefsUpdateStateNextOperation, _)) .WillRepeatedly(Return(true)); + EXPECT_CALL(prefs, SetInt64(kPrefsUpdateStatePartitionNextOperation, _)) + .WillRepeatedly(Return(true)); EXPECT_CALL(prefs, GetInt64(kPrefsUpdateStateNextOperation, _)) .WillOnce(Return(false)); EXPECT_CALL(prefs, SetInt64(kPrefsUpdateStateNextDataOffset, _)) diff --git a/payload_consumer/partition_writer.cc b/payload_consumer/partition_writer.cc index b4b869cd..bdc60158 100644 --- a/payload_consumer/partition_writer.cc +++ b/payload_consumer/partition_writer.cc @@ -242,12 +242,14 @@ PartitionWriter::PartitionWriter( const InstallPlan::Partition& install_part, DynamicPartitionControlInterface* dynamic_control, size_t block_size, + PrefsInterface* prefs, bool is_interactive) : partition_update_(partition_update), install_part_(install_part), dynamic_control_(dynamic_control), interactive_(is_interactive), - block_size_(block_size) {} + block_size_(block_size), + prefs_(prefs) {} PartitionWriter::~PartitionWriter() { Close(); diff --git a/payload_consumer/partition_writer.h b/payload_consumer/partition_writer.h index 1acbddcb..a67339ec 100644 --- a/payload_consumer/partition_writer.h +++ b/payload_consumer/partition_writer.h @@ -25,6 +25,7 @@ #include #include "update_engine/common/dynamic_partition_control_interface.h" +#include "update_engine/common/prefs_interface.h" #include "update_engine/payload_consumer/extent_writer.h" #include "update_engine/payload_consumer/file_descriptor.h" #include "update_engine/payload_consumer/install_plan.h" @@ -36,6 +37,7 @@ class PartitionWriter { const InstallPlan::Partition& install_part, DynamicPartitionControlInterface* dynamic_control, size_t block_size, + PrefsInterface* prefs, bool is_interactive); virtual ~PartitionWriter(); static bool ValidateSourceHash(const brillo::Blob& calculated_hash, @@ -48,6 +50,11 @@ class PartitionWriter { [[nodiscard]] virtual bool Init(const InstallPlan* install_plan, bool source_may_exist); + // This will be called by DeltaPerformer after applying an InstallOp. + // |next_op_index| is index of next operation that should be applied. + // |next_op_index-1| is the last operation that is already applied. + virtual void CheckpointUpdateProgress(size_t next_op_index) {} + int Close(); // These perform a specific type of operation and return true on success. @@ -111,6 +118,8 @@ class PartitionWriter { // Used to avoid re-opening the same source partition if it is not actually // error corrected. bool source_ecc_open_failure_{false}; + + PrefsInterface* prefs_; }; namespace partition_writer { @@ -121,6 +130,7 @@ std::unique_ptr CreatePartitionWriter( const InstallPlan::Partition& install_part, DynamicPartitionControlInterface* dynamic_control, size_t block_size, + PrefsInterface* prefs, bool is_interactive, bool is_dynamic_partition); } // namespace partition_writer diff --git a/payload_consumer/partition_writer_factory_android.cc b/payload_consumer/partition_writer_factory_android.cc index 0c9f7ea2..5960d9b2 100644 --- a/payload_consumer/partition_writer_factory_android.cc +++ b/payload_consumer/partition_writer_factory_android.cc @@ -19,6 +19,7 @@ #include +#include "update_engine/common/prefs_interface.h" #include "update_engine/payload_consumer/vabc_partition_writer.h" namespace chromeos_update_engine::partition_writer { @@ -28,6 +29,7 @@ std::unique_ptr CreatePartitionWriter( const InstallPlan::Partition& install_part, DynamicPartitionControlInterface* dynamic_control, size_t block_size, + PrefsInterface* prefs, bool is_interactive, bool is_dynamic_partition) { if (dynamic_control && @@ -40,6 +42,7 @@ std::unique_ptr CreatePartitionWriter( install_part, dynamic_control, block_size, + prefs, is_interactive); } else { LOG(INFO) << "Virtual AB Compression disabled, using Partition Writer for `" @@ -48,6 +51,7 @@ std::unique_ptr CreatePartitionWriter( install_part, dynamic_control, block_size, + prefs, is_interactive); } } diff --git a/payload_consumer/partition_writer_factory_chromeos.cc b/payload_consumer/partition_writer_factory_chromeos.cc index 609f0431..d89beb70 100644 --- a/payload_consumer/partition_writer_factory_chromeos.cc +++ b/payload_consumer/partition_writer_factory_chromeos.cc @@ -27,12 +27,14 @@ std::unique_ptr CreatePartitionWriter( const InstallPlan::Partition& install_part, DynamicPartitionControlInterface* dynamic_control, size_t block_size, + PrefsInterface* prefs, bool is_interactive, bool is_dynamic_partition) { return std::make_unique(partition_update, install_part, dynamic_control, block_size, + prefs, is_interactive); } } // namespace chromeos_update_engine::partition_writer diff --git a/payload_consumer/partition_writer_unittest.cc b/payload_consumer/partition_writer_unittest.cc index 1ef4783b..ca2ef413 100644 --- a/payload_consumer/partition_writer_unittest.cc +++ b/payload_consumer/partition_writer_unittest.cc @@ -112,8 +112,12 @@ class PartitionWriterTest : public testing::Test { DeltaArchiveManifest manifest_{}; PartitionUpdate partition_update_{}; InstallPlan::Partition install_part_{}; - PartitionWriter writer_{ - partition_update_, install_part_, &dynamic_control_, kBlockSize, false}; + PartitionWriter writer_{partition_update_, + install_part_, + &dynamic_control_, + kBlockSize, + &prefs_, + false}; }; // Test that the error-corrected file descriptor is used to read a partition // when no hash is available for SOURCE_COPY but it falls back to the normal diff --git a/payload_consumer/vabc_partition_writer.cc b/payload_consumer/vabc_partition_writer.cc index 1578f29e..9e4d9b8a 100644 --- a/payload_consumer/vabc_partition_writer.cc +++ b/payload_consumer/vabc_partition_writer.cc @@ -29,6 +29,30 @@ #include "update_engine/payload_consumer/snapshot_extent_writer.h" namespace chromeos_update_engine { +// Expected layout of COW file: +// === Beginning of Cow Image === +// All Source Copy Operations +// ========== Label 0 ========== +// Operation 0 in PartitionUpdate +// ========== Label 1 ========== +// Operation 1 in PartitionUpdate +// ========== label 2 ========== +// Operation 2 in PartitionUpdate +// ========== label 3 ========== +// . +// . +// . + +// When resuming, pass |kPrefsUpdateStatePartitionNextOperation| as label to +// |InitializeWithAppend|. +// For example, suppose we finished writing SOURCE_COPY, and we finished writing +// operation 2 completely. Update is suspended when we are half way through +// operation 3. +// |kPrefsUpdateStatePartitionNextOperation| would be 3, so we pass 3 as +// label to |InitializeWithAppend|. The CowWriter will retain all data before +// label 3, Which contains all operation 2's data, but none of operation 3's +// data. + bool VABCPartitionWriter::Init(const InstallPlan* install_plan, bool source_may_exist) { TEST_AND_RETURN_FALSE(install_plan != nullptr); @@ -37,18 +61,37 @@ bool VABCPartitionWriter::Init(const InstallPlan* install_plan, install_part_.name, install_part_.source_path, install_plan->is_resume); TEST_AND_RETURN_FALSE(cow_writer_ != nullptr); - // TODO(zhangkelvin) Emit a label before writing SOURCE_COPY. When resuming, + // Emit a label before writing SOURCE_COPY. When resuming, // use pref or CowWriter::GetLastLabel to determine if the SOURCE_COPY ops are // written. No need to handle SOURCE_COPY operations when resuming. // ===== Resume case handling code goes here ==== + if (install_plan->is_resume) { + int64_t next_op = 0; + if (!prefs_->GetInt64(kPrefsUpdateStatePartitionNextOperation, &next_op)) { + LOG(ERROR) + << "Resuming an update but can't fetch |next_op| from saved prefs."; + return false; + } + if (next_op < 0) { + TEST_AND_RETURN_FALSE(cow_writer_->Initialize()); + } else { + TEST_AND_RETURN_FALSE(cow_writer_->InitializeAppend(next_op)); + return true; + } + } else { + TEST_AND_RETURN_FALSE(cow_writer_->Initialize()); + } // ============================================== + TEST_AND_RETURN_FALSE( + prefs_->SetInt64(kPrefsUpdateStatePartitionNextOperation, -1)); // TODO(zhangkelvin) Rewrite this in C++20 coroutine once that's available. auto converted = ConvertToCowOperations(partition_update_.operations(), partition_update_.merge_operations()); std::vector buffer(block_size_); + for (const auto& cow_op : converted) { switch (cow_op.op) { case CowOperation::CowCopy: @@ -71,6 +114,11 @@ bool VABCPartitionWriter::Init(const InstallPlan* install_plan, break; } } + + // Emit label 0 to mark end of all SOURCE_COPY operations + cow_writer_->AddLabel(0); + TEST_AND_RETURN_FALSE( + prefs_->SetInt64(kPrefsUpdateStatePartitionNextOperation, 0)); return true; } @@ -95,11 +143,27 @@ std::unique_ptr VABCPartitionWriter::CreateBaseExtentWriter() { } bool VABCPartitionWriter::Flush() { - // No need to do anything, as CowWriter automatically flushes every OP added. + // No need to call fsync/sync, as CowWriter flushes after a label is added + // added. + int64_t next_op = 0; + // |kPrefsUpdateStatePartitionNextOperation| will be maintained and set by + // CheckpointUpdateProgress() + TEST_AND_RETURN_FALSE( + prefs_->GetInt64(kPrefsUpdateStatePartitionNextOperation, &next_op)); + // +1 because label 0 is reserved for SOURCE_COPY. See beginning of this + // file for explanation for cow format. + cow_writer_->AddLabel(next_op + 1); return true; } +void VABCPartitionWriter::CheckpointUpdateProgress(size_t next_op_index) { + prefs_->SetInt64(kPrefsUpdateStatePartitionNextOperation, next_op_index); +} + VABCPartitionWriter::~VABCPartitionWriter() { + // Reset |kPrefsUpdateStatePartitionNextOperation| once we finished a + // partition. + prefs_->SetInt64(kPrefsUpdateStatePartitionNextOperation, -1); cow_writer_->Finalize(); } diff --git a/payload_consumer/vabc_partition_writer.h b/payload_consumer/vabc_partition_writer.h index d65ac4a5..3fc97ce8 100644 --- a/payload_consumer/vabc_partition_writer.h +++ b/payload_consumer/vabc_partition_writer.h @@ -42,6 +42,7 @@ class VABCPartitionWriter final : public PartitionWriter { [[nodiscard]] bool PerformSourceCopyOperation( const InstallOperation& operation, ErrorCode* error) override; [[nodiscard]] bool Flush() override; + void CheckpointUpdateProgress(size_t next_op_index) override; private: std::unique_ptr cow_writer_; From 7a2657510c0ff24afe282d8e061916ebac78cf27 Mon Sep 17 00:00:00 2001 From: Kelvin Zhang Date: Thu, 29 Oct 2020 15:51:35 -0400 Subject: [PATCH 445/624] Estimate COW image size during OTA generation Estimate COW image size and put the estimation in OTA metadata. Then VAB could use this to allocate disk space and prompt the user if more space required. Test: create an OTA package Change-Id: Iaedafcf39af2d1a4d9cae9cd1a642a3cd3a4815c --- Android.bp | 1 + BUILD.gn | 1 + payload_consumer/delta_performer_unittest.cc | 4 +- payload_consumer/vabc_partition_writer.cc | 36 +++--- payload_consumer/vabc_partition_writer.h | 7 ++ payload_generator/cow_size_estimator.cc | 110 ++++++++++++++++++ payload_generator/cow_size_estimator.h | 36 ++++++ payload_generator/cow_size_estimator_stub.cc | 31 +++++ payload_generator/delta_diff_generator.cc | 62 +++++++++- payload_generator/payload_file.cc | 7 +- payload_generator/payload_file.h | 4 +- .../payload_properties_unittest.cc | 2 +- scripts/payload_info.py | 5 +- 13 files changed, 281 insertions(+), 25 deletions(-) create mode 100644 payload_generator/cow_size_estimator.cc create mode 100644 payload_generator/cow_size_estimator.h create mode 100644 payload_generator/cow_size_estimator_stub.cc diff --git a/Android.bp b/Android.bp index d5213a8b..ed704ffc 100644 --- a/Android.bp +++ b/Android.bp @@ -533,6 +533,7 @@ cc_library_static { "payload_generator/block_mapping.cc", "payload_generator/boot_img_filesystem.cc", "payload_generator/bzip.cc", + "payload_generator/cow_size_estimator.cc", "payload_generator/deflate_utils.cc", "payload_generator/delta_diff_generator.cc", "payload_generator/delta_diff_utils.cc", diff --git a/BUILD.gn b/BUILD.gn index 30ba9770..6e282f5d 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -344,6 +344,7 @@ static_library("libpayload_generator") { "payload_generator/block_mapping.cc", "payload_generator/boot_img_filesystem_stub.cc", "payload_generator/bzip.cc", + "payload_generator/cow_size_estimator_stub.cc", "payload_generator/deflate_utils.cc", "payload_generator/delta_diff_generator.cc", "payload_generator/delta_diff_utils.cc", diff --git a/payload_consumer/delta_performer_unittest.cc b/payload_consumer/delta_performer_unittest.cc index 2f308c01..74f17d27 100644 --- a/payload_consumer/delta_performer_unittest.cc +++ b/payload_consumer/delta_performer_unittest.cc @@ -228,13 +228,13 @@ class DeltaPerformerTest : public ::testing::Test { new_part.path = "/dev/zero"; new_part.size = 1234; - payload.AddPartition(*old_part, new_part, aops, {}); + payload.AddPartition(*old_part, new_part, aops, {}, 0); // We include a kernel partition without operations. old_part->name = kPartitionNameKernel; new_part.name = kPartitionNameKernel; new_part.size = 0; - payload.AddPartition(*old_part, new_part, {}, {}); + payload.AddPartition(*old_part, new_part, {}, {}, 0); ScopedTempFile payload_file("Payload-XXXXXX"); string private_key = diff --git a/payload_consumer/vabc_partition_writer.cc b/payload_consumer/vabc_partition_writer.cc index 9e4d9b8a..73bf413f 100644 --- a/payload_consumer/vabc_partition_writer.cc +++ b/payload_consumer/vabc_partition_writer.cc @@ -24,6 +24,7 @@ #include "update_engine/common/cow_operation_convert.h" #include "update_engine/common/utils.h" #include "update_engine/payload_consumer/extent_writer.h" +#include "update_engine/payload_consumer/file_descriptor.h" #include "update_engine/payload_consumer/install_plan.h" #include "update_engine/payload_consumer/partition_writer.h" #include "update_engine/payload_consumer/snapshot_extent_writer.h" @@ -90,35 +91,44 @@ bool VABCPartitionWriter::Init(const InstallPlan* install_plan, // TODO(zhangkelvin) Rewrite this in C++20 coroutine once that's available. auto converted = ConvertToCowOperations(partition_update_.operations(), partition_update_.merge_operations()); - std::vector buffer(block_size_); + + WriteAllCowOps(block_size_, converted, cow_writer_.get(), source_fd_); + // Emit label 0 to mark end of all SOURCE_COPY operations + cow_writer_->AddLabel(0); + TEST_AND_RETURN_FALSE( + prefs_->SetInt64(kPrefsUpdateStatePartitionNextOperation, 0)); + return true; +} + +bool VABCPartitionWriter::WriteAllCowOps( + size_t block_size, + const std::vector& converted, + android::snapshot::ICowWriter* cow_writer, + FileDescriptorPtr source_fd) { + std::vector buffer(block_size); for (const auto& cow_op : converted) { switch (cow_op.op) { case CowOperation::CowCopy: TEST_AND_RETURN_FALSE( - cow_writer_->AddCopy(cow_op.dst_block, cow_op.src_block)); + cow_writer->AddCopy(cow_op.dst_block, cow_op.src_block)); break; case CowOperation::CowReplace: ssize_t bytes_read = 0; - TEST_AND_RETURN_FALSE(utils::PReadAll(source_fd_, + TEST_AND_RETURN_FALSE(utils::PReadAll(source_fd, buffer.data(), - block_size_, - cow_op.src_block * block_size_, + block_size, + cow_op.src_block * block_size, &bytes_read)); - if (bytes_read <= 0 || static_cast(bytes_read) != block_size_) { + if (bytes_read <= 0 || static_cast(bytes_read) != block_size) { LOG(ERROR) << "source_fd->Read failed: " << bytes_read; return false; } - TEST_AND_RETURN_FALSE(cow_writer_->AddRawBlocks( - cow_op.dst_block, buffer.data(), block_size_)); + TEST_AND_RETURN_FALSE(cow_writer->AddRawBlocks( + cow_op.dst_block, buffer.data(), block_size)); break; } } - - // Emit label 0 to mark end of all SOURCE_COPY operations - cow_writer_->AddLabel(0); - TEST_AND_RETURN_FALSE( - prefs_->SetInt64(kPrefsUpdateStatePartitionNextOperation, 0)); return true; } diff --git a/payload_consumer/vabc_partition_writer.h b/payload_consumer/vabc_partition_writer.h index 3fc97ce8..ddade704 100644 --- a/payload_consumer/vabc_partition_writer.h +++ b/payload_consumer/vabc_partition_writer.h @@ -18,9 +18,11 @@ #define UPDATE_ENGINE_VABC_PARTITION_WRITER_H_ #include +#include #include +#include "update_engine/common/cow_operation_convert.h" #include "update_engine/payload_consumer/install_plan.h" #include "update_engine/payload_consumer/partition_writer.h" @@ -44,6 +46,11 @@ class VABCPartitionWriter final : public PartitionWriter { [[nodiscard]] bool Flush() override; void CheckpointUpdateProgress(size_t next_op_index) override; + static bool WriteAllCowOps(size_t block_size, + const std::vector& converted, + android::snapshot::ICowWriter* cow_writer, + FileDescriptorPtr source_fd); + private: std::unique_ptr cow_writer_; }; diff --git a/payload_generator/cow_size_estimator.cc b/payload_generator/cow_size_estimator.cc new file mode 100644 index 00000000..662a3ca1 --- /dev/null +++ b/payload_generator/cow_size_estimator.cc @@ -0,0 +1,110 @@ +// +// Copyright (C) 2020 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include "update_engine/payload_generator/cow_size_estimator.h" + +#include +#include + +#include + +#include "android-base/unique_fd.h" +#include "update_engine/common/cow_operation_convert.h" +#include "update_engine/payload_consumer/vabc_partition_writer.h" +#include "update_engine/update_metadata.pb.h" + +namespace chromeos_update_engine { +using android::snapshot::CowWriter; + +void PerformReplaceOp(const InstallOperation& op, + CowWriter* writer, + FileDescriptorPtr target_fd, + size_t block_size) { + std::vector buffer; + for (const auto& extent : op.dst_extents()) { + buffer.resize(extent.num_blocks() * block_size); + // No need to read from payload.bin then decompress, just read from target + // directly. + ssize_t bytes_read = 0; + auto success = utils::PReadAll(target_fd, + buffer.data(), + buffer.size(), + extent.start_block() * block_size, + &bytes_read); + CHECK(success); + CHECK_EQ(static_cast(bytes_read), buffer.size()); + writer->AddRawBlocks(extent.start_block(), buffer.data(), buffer.size()); + } +} + +void PerformZeroOp(const InstallOperation& op, + CowWriter* writer, + size_t block_size) { + for (const auto& extent : op.dst_extents()) { + writer->AddZeroBlocks(extent.start_block(), extent.num_blocks()); + } +} + +size_t EstimateCowSize( + FileDescriptorPtr source_fd, + FileDescriptorPtr target_fd, + const google::protobuf::RepeatedPtrField& operations, + const google::protobuf::RepeatedPtrField& + merge_operations, + size_t block_size) { + android::snapshot::CowWriter cow_writer{ + {.block_size = static_cast(block_size), .compression = "gz"}}; + // CowWriter treats -1 as special value, will discard all the data but still + // reports Cow size. Good for estimation purposes + cow_writer.Initialize(android::base::borrowed_fd{-1}); + + const auto converted = ConvertToCowOperations(operations, merge_operations); + VABCPartitionWriter::WriteAllCowOps( + block_size, converted, &cow_writer, source_fd); + cow_writer.AddLabel(0); + for (const auto& op : operations) { + switch (op.type()) { + case InstallOperation::REPLACE: + case InstallOperation::REPLACE_BZ: + case InstallOperation::REPLACE_XZ: + PerformReplaceOp(op, &cow_writer, target_fd, block_size); + break; + case InstallOperation::ZERO: + case InstallOperation::DISCARD: + PerformZeroOp(op, &cow_writer, block_size); + break; + case InstallOperation::SOURCE_COPY: + case InstallOperation::MOVE: + // Already handeled by WriteAllCowOps, + break; + case InstallOperation::SOURCE_BSDIFF: + case InstallOperation::BROTLI_BSDIFF: + case InstallOperation::PUFFDIFF: + case InstallOperation::BSDIFF: + // We might do something special by adding CowBsdiff to CowWriter. + // For now proceed the same way as normal REPLACE operation. + PerformReplaceOp(op, &cow_writer, target_fd, block_size); + break; + } + // Arbitrary label number, we won't be resuming use these labels here. + // They are emitted just to keep size estimates accurate. As update_engine + // emits 1 label for every op. + cow_writer.AddLabel(2); + } + // TODO(zhangkelvin) Take FEC extents into account once VABC stabilizes + return cow_writer.GetCowSize(); +} +} // namespace chromeos_update_engine diff --git a/payload_generator/cow_size_estimator.h b/payload_generator/cow_size_estimator.h new file mode 100644 index 00000000..cba89b54 --- /dev/null +++ b/payload_generator/cow_size_estimator.h @@ -0,0 +1,36 @@ +// +// Copyright (C) 2020 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include + +#include + +#include "update_engine/payload_consumer/file_descriptor.h" + +namespace chromeos_update_engine { +// Given file descriptor to the source image, target image, and list of +// operations, estimate the size of COW image if the operations are applied on +// Virtual AB Compression enabled device. This is intended to be used by update +// generators to put an estimate cow size in OTA payload. When installing an OTA +// update, libsnapshot will take this estimate as a hint to allocate spaces. +size_t EstimateCowSize( + FileDescriptorPtr source_fd, + FileDescriptorPtr target_fd, + const google::protobuf::RepeatedPtrField& operations, + const google::protobuf::RepeatedPtrField& + merge_operations, + size_t block_size); + +} // namespace chromeos_update_engine diff --git a/payload_generator/cow_size_estimator_stub.cc b/payload_generator/cow_size_estimator_stub.cc new file mode 100644 index 00000000..9d94d63b --- /dev/null +++ b/payload_generator/cow_size_estimator_stub.cc @@ -0,0 +1,31 @@ +// +// Copyright (C) 2020 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include "update_engine/payload_generator/cow_size_estimator.h" + +namespace chromeos_update_engine { + +size_t EstimateCowSize( + FileDescriptorPtr source_fd, + FileDescriptorPtr target_fd, + const google::protobuf::RepeatedPtrField& operations, + const google::protobuf::RepeatedPtrField& + merge_operations, + size_t block_size) { + return 0; +} + +} // namespace chromeos_update_engine diff --git a/payload_generator/delta_diff_generator.cc b/payload_generator/delta_diff_generator.cc index ff8b0dae..47c92e02 100644 --- a/payload_generator/delta_diff_generator.cc +++ b/payload_generator/delta_diff_generator.cc @@ -33,14 +33,17 @@ #include "update_engine/common/utils.h" #include "update_engine/payload_consumer/delta_performer.h" +#include "update_engine/payload_consumer/file_descriptor.h" #include "update_engine/payload_consumer/payload_constants.h" #include "update_engine/payload_generator/ab_generator.h" #include "update_engine/payload_generator/annotated_operation.h" #include "update_engine/payload_generator/blob_file_writer.h" +#include "update_engine/payload_generator/cow_size_estimator.h" #include "update_engine/payload_generator/delta_diff_utils.h" #include "update_engine/payload_generator/full_update_generator.h" #include "update_engine/payload_generator/merge_sequence_generator.h" #include "update_engine/payload_generator/payload_file.h" +#include "update_engine/update_metadata.pb.h" using std::string; using std::unique_ptr; @@ -53,6 +56,18 @@ const size_t kRootFSPartitionSize = static_cast(2) * 1024 * 1024 * 1024; const size_t kBlockSize = 4096; // bytes class PartitionProcessor : public base::DelegateSimpleThread::Delegate { + bool IsDynamicPartition(const std::string& partition_name) { + for (const auto& group : + config_.target.dynamic_partition_metadata->groups()) { + const auto& names = group.partition_names(); + if (std::find(names.begin(), names.end(), partition_name) != + names.end()) { + return true; + } + } + return false; + } + public: explicit PartitionProcessor( const PayloadGenerationConfig& config, @@ -61,6 +76,7 @@ class PartitionProcessor : public base::DelegateSimpleThread::Delegate { BlobFileWriter* file_writer, std::vector* aops, std::vector* cow_merge_sequence, + size_t* cow_size, std::unique_ptr strategy) : config_(config), old_part_(old_part), @@ -68,11 +84,13 @@ class PartitionProcessor : public base::DelegateSimpleThread::Delegate { file_writer_(file_writer), aops_(aops), cow_merge_sequence_(cow_merge_sequence), + cow_size_(cow_size), strategy_(std::move(strategy)) {} PartitionProcessor(PartitionProcessor&&) noexcept = default; + void Run() override { LOG(INFO) << "Started an async task to process partition " - << old_part_.name; + << new_part_.name; bool success = strategy_->GenerateOperations( config_, old_part_, new_part_, file_writer_, aops_); if (!success) { @@ -85,13 +103,38 @@ class PartitionProcessor : public base::DelegateSimpleThread::Delegate { bool snapshot_enabled = config_.target.dynamic_partition_metadata && config_.target.dynamic_partition_metadata->snapshot_enabled(); - if (old_part_.path.empty() || !snapshot_enabled) { + if (!snapshot_enabled || !IsDynamicPartition(new_part_.name)) { return; } - auto generator = MergeSequenceGenerator::Create(*aops_); - if (!generator || !generator->Generate(cow_merge_sequence_)) { - LOG(FATAL) << "Failed to generate merge sequence"; + if (!old_part_.path.empty()) { + auto generator = MergeSequenceGenerator::Create(*aops_); + if (!generator || !generator->Generate(cow_merge_sequence_)) { + LOG(FATAL) << "Failed to generate merge sequence"; + } } + + LOG(INFO) << "Estimating COW size for partition: " << new_part_.name; + // Need the contents of source/target image bytes when doing + // dry run. + FileDescriptorPtr source_fd{new EintrSafeFileDescriptor()}; + source_fd->Open(old_part_.path.c_str(), O_RDONLY); + + auto target_fd = std::make_unique(); + target_fd->Open(new_part_.path.c_str(), O_RDONLY); + + google::protobuf::RepeatedPtrField operations; + + for (const AnnotatedOperation& aop : *aops_) { + *operations.Add() = aop.op; + } + *cow_size_ = EstimateCowSize( + source_fd, + std::move(target_fd), + operations, + {cow_merge_sequence_->begin(), cow_merge_sequence_->end()}, + config_.block_size); + LOG(INFO) << "Estimated COW size for partition: " << new_part_.name << " " + << *cow_size_; } private: @@ -101,6 +144,7 @@ class PartitionProcessor : public base::DelegateSimpleThread::Delegate { BlobFileWriter* file_writer_; std::vector* aops_; std::vector* cow_merge_sequence_; + size_t* cow_size_; std::unique_ptr strategy_; DISALLOW_COPY_AND_ASSIGN(PartitionProcessor); }; @@ -130,8 +174,12 @@ bool GenerateUpdatePayloadFile(const PayloadGenerationConfig& config, PartitionConfig empty_part(""); std::vector> all_aops; all_aops.resize(config.target.partitions.size()); + std::vector> all_merge_sequences; all_merge_sequences.resize(config.target.partitions.size()); + + std::vector all_cow_sizes(config.target.partitions.size(), 0); + std::vector partition_tasks{}; auto thread_count = std::min(diff_utils::GetMaxThreads(), config.target.partitions.size()); @@ -163,6 +211,7 @@ bool GenerateUpdatePayloadFile(const PayloadGenerationConfig& config, &blob_file, &all_aops[i], &all_merge_sequences[i], + &all_cow_sizes[i], std::move(strategy))); } thread_pool.Start(); @@ -179,7 +228,8 @@ bool GenerateUpdatePayloadFile(const PayloadGenerationConfig& config, payload.AddPartition(old_part, new_part, std::move(all_aops[i]), - std::move(all_merge_sequences[i]))); + std::move(all_merge_sequences[i]), + all_cow_sizes[i])); } } data_file.CloseFd(); diff --git a/payload_generator/payload_file.cc b/payload_generator/payload_file.cc index 74423d12..33c07493 100644 --- a/payload_generator/payload_file.cc +++ b/payload_generator/payload_file.cc @@ -80,8 +80,10 @@ bool PayloadFile::Init(const PayloadGenerationConfig& config) { bool PayloadFile::AddPartition(const PartitionConfig& old_conf, const PartitionConfig& new_conf, vector aops, - vector merge_sequence) { + vector merge_sequence, + size_t cow_size) { Partition part; + part.cow_size = cow_size; part.name = new_conf.name; part.aops = std::move(aops); part.cow_merge_sequence = std::move(merge_sequence); @@ -129,6 +131,9 @@ bool PayloadFile::WritePayload(const string& payload_file, if (!part.version.empty()) { partition->set_version(part.version); } + if (part.cow_size > 0) { + partition->set_estimate_cow_size(part.cow_size); + } if (part.postinstall.run) { partition->set_run_postinstall(true); if (!part.postinstall.path.empty()) diff --git a/payload_generator/payload_file.h b/payload_generator/payload_file.h index 8b179569..3a457930 100644 --- a/payload_generator/payload_file.h +++ b/payload_generator/payload_file.h @@ -44,7 +44,8 @@ class PayloadFile { bool AddPartition(const PartitionConfig& old_conf, const PartitionConfig& new_conf, std::vector aops, - std::vector merge_sequence); + std::vector merge_sequence, + size_t cow_size); // Write the payload to the |payload_file| file. The operations reference // blobs in the |data_blobs_path| file and the blobs will be reordered in the @@ -100,6 +101,7 @@ class PayloadFile { VerityConfig verity; // Per partition timestamp. std::string version; + size_t cow_size; }; std::vector part_vec_; diff --git a/payload_generator/payload_properties_unittest.cc b/payload_generator/payload_properties_unittest.cc index ed936ff7..0ff364fc 100644 --- a/payload_generator/payload_properties_unittest.cc +++ b/payload_generator/payload_properties_unittest.cc @@ -88,7 +88,7 @@ class PayloadPropertiesTest : public ::testing::Test { EXPECT_TRUE(strategy->GenerateOperations( config, old_part, new_part, &blob_file_writer, &aops)); - payload.AddPartition(old_part, new_part, aops, {}); + payload.AddPartition(old_part, new_part, aops, {}, 0); uint64_t metadata_size; EXPECT_TRUE(payload.WritePayload( diff --git a/scripts/payload_info.py b/scripts/payload_info.py index 7625ee8e..8343d212 100755 --- a/scripts/payload_info.py +++ b/scripts/payload_info.py @@ -75,8 +75,11 @@ def _DisplayManifest(self): DisplayValue(' Number of "%s" ops' % partition.partition_name, len(partition.operations)) for partition in manifest.partitions: - DisplayValue("Timestamp for " + + DisplayValue(" Timestamp for " + partition.partition_name, partition.version) + for partition in manifest.partitions: + DisplayValue(" COW Size for " + + partition.partition_name, partition.estimate_cow_size) DisplayValue('Block size', manifest.block_size) DisplayValue('Minor version', manifest.minor_version) From 4b28024b7b3a628830ce11319b4cc6a075a7262c Mon Sep 17 00:00:00 2001 From: Kelvin Zhang Date: Fri, 6 Nov 2020 16:07:45 -0500 Subject: [PATCH 446/624] Rename PReadAll/PWriteAll for file descriptor ptr to ReadAll/WriteAll Utils.cc contain 2 implementation of PReadAll: 1 for unix fd, and 1 for FileDescriptorPtr. However, the implementation for unix fd calls pread syscall under the hood, which does not change file offset. The implementation for FileDescriptorPtr DOES change file offset, making code inconsistent. For now we rename inconsistent functions to ReadAll/WriteAll. The next CL adds PReadAll/PWriteAll implementation for FileDescriptorPtr. Test: treehugger Change-Id: I2781b294f0f8e866275e1649e9b45d565d4cd5b8 --- common/utils.cc | 18 +++++++++--------- common/utils.h | 21 ++++++++++++--------- payload_consumer/extent_reader.cc | 2 +- payload_consumer/mount_history.cc | 2 +- payload_consumer/partition_writer.cc | 2 +- payload_consumer/vabc_partition_writer.cc | 10 +++++----- payload_generator/cow_size_estimator.cc | 10 +++++----- 7 files changed, 34 insertions(+), 31 deletions(-) diff --git a/common/utils.cc b/common/utils.cc index c8924b1e..66fd12d5 100644 --- a/common/utils.cc +++ b/common/utils.cc @@ -192,10 +192,10 @@ bool WriteAll(const FileDescriptorPtr& fd, const void* buf, size_t count) { return true; } -bool PWriteAll(const FileDescriptorPtr& fd, - const void* buf, - size_t count, - off_t offset) { +bool WriteAll(const FileDescriptorPtr& fd, + const void* buf, + size_t count, + off_t offset) { TEST_AND_RETURN_FALSE_ERRNO(fd->Seek(offset, SEEK_SET) != static_cast(-1)); return WriteAll(fd, buf, count); @@ -218,11 +218,11 @@ bool PReadAll( return true; } -bool PReadAll(const FileDescriptorPtr& fd, - void* buf, - size_t count, - off_t offset, - ssize_t* out_bytes_read) { +bool ReadAll(const FileDescriptorPtr& fd, + void* buf, + size_t count, + off_t offset, + ssize_t* out_bytes_read) { TEST_AND_RETURN_FALSE_ERRNO(fd->Seek(offset, SEEK_SET) != static_cast(-1)); char* c_buf = static_cast(buf); diff --git a/common/utils.h b/common/utils.h index 05a92be8..0762796f 100644 --- a/common/utils.h +++ b/common/utils.h @@ -18,6 +18,7 @@ #define UPDATE_ENGINE_COMMON_UTILS_H_ #include +#include #include #include @@ -63,10 +64,11 @@ bool WriteAll(int fd, const void* buf, size_t count); bool PWriteAll(int fd, const void* buf, size_t count, off_t offset); bool WriteAll(const FileDescriptorPtr& fd, const void* buf, size_t count); -bool PWriteAll(const FileDescriptorPtr& fd, - const void* buf, - size_t count, - off_t offset); +// WriteAll writes data at specified offset, but it modifies file position. +bool WriteAll(const FileDescriptorPtr& fd, + const void* buf, + size_t count, + off_t off); // Calls read() repeatedly until |count| bytes are read or EOF or EWOULDBLOCK // is reached. Returns whether all read() calls succeeded (including EWOULDBLOCK @@ -81,11 +83,12 @@ bool ReadAll( bool PReadAll( int fd, void* buf, size_t count, off_t offset, ssize_t* out_bytes_read); -bool PReadAll(const FileDescriptorPtr& fd, - void* buf, - size_t count, - off_t offset, - ssize_t* out_bytes_read); +// Reads data at specified offset, this function does change file position. +bool ReadAll(const FileDescriptorPtr& fd, + void* buf, + size_t count, + off_t offset, + ssize_t* out_bytes_read); // Opens |path| for reading and appends its entire content to the container // pointed to by |out_p|. Returns true upon successfully reading all of the diff --git a/payload_consumer/extent_reader.cc b/payload_consumer/extent_reader.cc index ad983ae1..3c7329dc 100644 --- a/payload_consumer/extent_reader.cc +++ b/payload_consumer/extent_reader.cc @@ -77,7 +77,7 @@ bool DirectExtentReader::Read(void* buffer, size_t count) { std::min(count - bytes_read, cur_extent_bytes_left); ssize_t out_bytes_read; - TEST_AND_RETURN_FALSE(utils::PReadAll( + TEST_AND_RETURN_FALSE(utils::ReadAll( fd_, bytes + bytes_read, bytes_to_read, diff --git a/payload_consumer/mount_history.cc b/payload_consumer/mount_history.cc index 43a75b34..1d2ec769 100644 --- a/payload_consumer/mount_history.cc +++ b/payload_consumer/mount_history.cc @@ -37,7 +37,7 @@ void LogMountHistory(const FileDescriptorPtr blockdevice_fd) { brillo::Blob block0_buffer(kBlockSize); ssize_t bytes_read; - if (!utils::PReadAll( + if (!utils::ReadAll( blockdevice_fd, block0_buffer.data(), kBlockSize, 0, &bytes_read)) { LOG(WARNING) << "PReadAll failed"; return; diff --git a/payload_consumer/partition_writer.cc b/payload_consumer/partition_writer.cc index bdc60158..12b206e4 100644 --- a/payload_consumer/partition_writer.cc +++ b/payload_consumer/partition_writer.cc @@ -351,7 +351,7 @@ bool PartitionWriter::PerformZeroOrDiscardOperation( for (uint64_t offset = 0; offset < length; offset += zeros.size()) { uint64_t chunk_length = std::min(length - offset, static_cast(zeros.size())); - TEST_AND_RETURN_FALSE(utils::PWriteAll( + TEST_AND_RETURN_FALSE(utils::WriteAll( target_fd_, zeros.data(), chunk_length, start + offset)); } } diff --git a/payload_consumer/vabc_partition_writer.cc b/payload_consumer/vabc_partition_writer.cc index 73bf413f..e8994b40 100644 --- a/payload_consumer/vabc_partition_writer.cc +++ b/payload_consumer/vabc_partition_writer.cc @@ -115,11 +115,11 @@ bool VABCPartitionWriter::WriteAllCowOps( break; case CowOperation::CowReplace: ssize_t bytes_read = 0; - TEST_AND_RETURN_FALSE(utils::PReadAll(source_fd, - buffer.data(), - block_size, - cow_op.src_block * block_size, - &bytes_read)); + TEST_AND_RETURN_FALSE(utils::ReadAll(source_fd, + buffer.data(), + block_size, + cow_op.src_block * block_size, + &bytes_read)); if (bytes_read <= 0 || static_cast(bytes_read) != block_size) { LOG(ERROR) << "source_fd->Read failed: " << bytes_read; return false; diff --git a/payload_generator/cow_size_estimator.cc b/payload_generator/cow_size_estimator.cc index 662a3ca1..3eb0acac 100644 --- a/payload_generator/cow_size_estimator.cc +++ b/payload_generator/cow_size_estimator.cc @@ -39,11 +39,11 @@ void PerformReplaceOp(const InstallOperation& op, // No need to read from payload.bin then decompress, just read from target // directly. ssize_t bytes_read = 0; - auto success = utils::PReadAll(target_fd, - buffer.data(), - buffer.size(), - extent.start_block() * block_size, - &bytes_read); + auto success = utils::ReadAll(target_fd, + buffer.data(), + buffer.size(), + extent.start_block() * block_size, + &bytes_read); CHECK(success); CHECK_EQ(static_cast(bytes_read), buffer.size()); writer->AddRawBlocks(extent.start_block(), buffer.data(), buffer.size()); From c3c0a3ab46e5907272f73287b54baf65db3bb555 Mon Sep 17 00:00:00 2001 From: Kelvin Zhang Date: Mon, 9 Nov 2020 16:08:13 -0500 Subject: [PATCH 447/624] Add PReadAll/PWriteAll for filedescriptor ptr the newly added functions will read/write to a specified offset without modifying file offset. Test: treehugger Change-Id: If0e7e3f2b9d2371838c3a7f27fe4250400ba3133 --- common/utils.cc | 25 +++++++++++++++++++++++++ common/utils.h | 17 +++++++++++++++++ 2 files changed, 42 insertions(+) diff --git a/common/utils.cc b/common/utils.cc index 66fd12d5..1ac42dcd 100644 --- a/common/utils.cc +++ b/common/utils.cc @@ -239,6 +239,31 @@ bool ReadAll(const FileDescriptorPtr& fd, return true; } +bool PReadAll(const FileDescriptorPtr& fd, + void* buf, + size_t count, + off_t offset, + ssize_t* out_bytes_read) { + auto old_off = fd->Seek(0, SEEK_CUR); + TEST_AND_RETURN_FALSE_ERRNO(old_off >= 0); + + auto success = ReadAll(fd, buf, count, offset, out_bytes_read); + TEST_AND_RETURN_FALSE_ERRNO(fd->Seek(old_off, SEEK_SET) == old_off); + return success; +} + +bool PWriteAll(const FileDescriptorPtr& fd, + const void* buf, + size_t count, + off_t offset) { + auto old_off = fd->Seek(0, SEEK_CUR); + TEST_AND_RETURN_FALSE_ERRNO(old_off >= 0); + + auto success = WriteAll(fd, buf, count, offset); + TEST_AND_RETURN_FALSE_ERRNO(fd->Seek(old_off, SEEK_SET) == old_off); + return success; +} + // Append |nbytes| of content from |buf| to the vector pointed to by either // |vec_p| or |str_p|. static void AppendBytes(const uint8_t* buf, diff --git a/common/utils.h b/common/utils.h index 0762796f..616de065 100644 --- a/common/utils.h +++ b/common/utils.h @@ -70,6 +70,14 @@ bool WriteAll(const FileDescriptorPtr& fd, size_t count, off_t off); +// https://man7.org/linux/man-pages/man2/pread.2.html +// PWriteAll writes data at specified offset, but it DOES NOT modify file +// position. Behaves similar to linux' pwrite syscall. +bool PWriteAll(const FileDescriptorPtr& fd, + const void* buf, + size_t count, + off_t offset); + // Calls read() repeatedly until |count| bytes are read or EOF or EWOULDBLOCK // is reached. Returns whether all read() calls succeeded (including EWOULDBLOCK // as a success case), sets |eof| to whether the eof was reached and sets @@ -90,6 +98,15 @@ bool ReadAll(const FileDescriptorPtr& fd, off_t offset, ssize_t* out_bytes_read); +// https://man7.org/linux/man-pages/man2/pread.2.html +// Reads data at specified offset, this function DOES NOT change file position. +// Behavior is similar to linux's pread syscall. +bool PReadAll(const FileDescriptorPtr& fd, + void* buf, + size_t count, + off_t offset, + ssize_t* out_bytes_read); + // Opens |path| for reading and appends its entire content to the container // pointed to by |out_p|. Returns true upon successfully reading all of the // file's content, false otherwise, in which case the state of the output From 5611cd3e418bc1e7451d4247daef836077efa9a0 Mon Sep 17 00:00:00 2001 From: Jae Hoon Kim Date: Fri, 6 Nov 2020 12:08:04 -0800 Subject: [PATCH 448/624] update_engine: Rename const test variables Use snake case over camel case for local constants. BUG=b:171829801 TEST=FEATURES=test emerge-$B update_engine Change-Id: Icac43ca7080cd3e2e585afba5dae4ad172f0378c Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2523668 Tested-by: Jae Hoon Kim Auto-Submit: Jae Hoon Kim Reviewed-by: Amin Hassani Commit-Queue: Jae Hoon Kim --- cros/omaha_request_builder_xml_unittest.cc | 80 +++++++++++----------- 1 file changed, 40 insertions(+), 40 deletions(-) diff --git a/cros/omaha_request_builder_xml_unittest.cc b/cros/omaha_request_builder_xml_unittest.cc index 11d808bb..055d0f3d 100644 --- a/cros/omaha_request_builder_xml_unittest.cc +++ b/cros/omaha_request_builder_xml_unittest.cc @@ -144,10 +144,10 @@ TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlRequestIdTest) { 0, fake_system_state_.prefs(), ""}; - const string kRequestXml = omaha_request.GetRequest(); + const string request_xml = omaha_request.GetRequest(); const string key = "requestid"; const string request_id = - FindAttributeKeyValueInXml(kRequestXml, key, kGuidSize); + FindAttributeKeyValueInXml(request_xml, key, kGuidSize); // A valid |request_id| is either a GUID version 4 or empty string. if (!request_id.empty()) EXPECT_TRUE(base::IsValidGUID(request_id)); @@ -165,10 +165,10 @@ TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlSessionIdTest) { 0, fake_system_state_.prefs(), gen_session_id}; - const string kRequestXml = omaha_request.GetRequest(); + const string request_xml = omaha_request.GetRequest(); const string key = "sessionid"; const string session_id = - FindAttributeKeyValueInXml(kRequestXml, key, kGuidSize); + FindAttributeKeyValueInXml(request_xml, key, kGuidSize); // A valid |session_id| is either a GUID version 4 or empty string. if (!session_id.empty()) { EXPECT_TRUE(base::IsValidGUID(session_id)); @@ -187,9 +187,9 @@ TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlPlatformUpdateTest) { 0, fake_system_state_.prefs(), ""}; - const string kRequestXml = omaha_request.GetRequest(); - EXPECT_EQ(1, CountSubstringInString(kRequestXml, " size_t { - return kRequestXml.find(" size_t { + return request_xml.find("")) - << kRequestXml; + request_xml, "")) + << request_xml; } TEST_F(OmahaRequestBuilderXmlTest, @@ -354,18 +354,18 @@ TEST_F(OmahaRequestBuilderXmlTest, 0, fake_system_state_.prefs(), ""}; - const string kRequestXml = omaha_request.GetRequest(); + const string request_xml = omaha_request.GetRequest(); EXPECT_EQ( 2, CountSubstringInString( - kRequestXml, "")) - << kRequestXml; + request_xml, "")) + << request_xml; EXPECT_EQ( 1, CountSubstringInString( - kRequestXml, + request_xml, "")) - << kRequestXml; + << request_xml; } TEST_F(OmahaRequestBuilderXmlTest, @@ -385,17 +385,17 @@ TEST_F(OmahaRequestBuilderXmlTest, 0, fake_system_state_.prefs(), ""}; - const string kRequestXml = omaha_request.GetRequest(); + const string request_xml = omaha_request.GetRequest(); EXPECT_EQ( 1, CountSubstringInString( - kRequestXml, "")) - << kRequestXml; + request_xml, "")) + << request_xml; EXPECT_EQ( 2, CountSubstringInString( - kRequestXml, + request_xml, "")) - << kRequestXml; + << request_xml; } } // namespace chromeos_update_engine From e2cac6148b9e1357596e545900b40acd9509749c Mon Sep 17 00:00:00 2001 From: Jae Hoon Kim Date: Mon, 2 Nov 2020 18:30:29 -0800 Subject: [PATCH 449/624] update_engine: Support DLC Cohorts UE at the moment doesn't send the correct cohorts to Omaha for DLCs. In fact, the platform cohorts that are persisted are used for all DLCs. This is incorrect and this CL fixes it. ``` Example DLC Response: ... cohort="1:7:" cohortname="eve_dlc_pita_canary"... localhost ~ # ls /var/lib/update_engine/prefs/dlc/pita/omaha-cohort* /var/lib/update_engine/prefs/dlc/pita/omaha-cohort /var/lib/update_engine/prefs/dlc/pita/omaha-cohort-name localhost ~ # cat /var/lib/update_engine/prefs/dlc/pita/omaha-cohort 1:7: localhost ~ # cat /var/lib/update_engine/prefs/dlc/pita/omaha-cohort-name eve_dlc_pita_canary ``` BUG=b:162463872 TEST=FEATURES=test emerge-$B update_engine TEST=# cros deploy + comment above Change-Id: Ie503f0a63d3b19a51abb88379cb2e8f85919858b Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2515072 Tested-by: Jae Hoon Kim Reviewed-by: Amin Hassani Commit-Queue: Jae Hoon Kim --- cros/omaha_request_action.cc | 67 +++++++---- cros/omaha_request_action.h | 18 ++- cros/omaha_request_action_unittest.cc | 127 ++++++++++++++++++++- cros/omaha_request_builder_xml.cc | 35 ++++-- cros/omaha_request_builder_xml.h | 6 +- cros/omaha_request_builder_xml_unittest.cc | 76 ++++++++++++ cros/omaha_request_params.cc | 8 ++ cros/omaha_request_params.h | 4 + cros/omaha_request_params_unittest.cc | 29 +++++ 9 files changed, 330 insertions(+), 40 deletions(-) diff --git a/cros/omaha_request_action.cc b/cros/omaha_request_action.cc index 0916f9d3..51c0ac92 100644 --- a/cros/omaha_request_action.cc +++ b/cros/omaha_request_action.cc @@ -763,17 +763,7 @@ bool OmahaRequestAction::ParseResponse(OmahaParserData* parser_data, } // We persist the cohorts sent by omaha even if the status is "noupdate". - for (const auto& app : parser_data->apps) { - if (app.id == params_->GetAppId()) { - if (app.cohort) - PersistCohortData(kPrefsOmahaCohort, app.cohort.value()); - if (app.cohorthint) - PersistCohortData(kPrefsOmahaCohortHint, app.cohorthint.value()); - if (app.cohortname) - PersistCohortData(kPrefsOmahaCohortName, app.cohortname.value()); - break; - } - } + PersistCohorts(*parser_data); PersistEolInfo(parser_data->updatecheck_attrs); @@ -1398,16 +1388,53 @@ bool OmahaRequestAction::PersistInstallDate( return true; } -bool OmahaRequestAction::PersistCohortData(const string& prefs_key, - const string& new_value) { - if (new_value.empty() && system_state_->prefs()->Exists(prefs_key)) { - LOG(INFO) << "Removing stored " << prefs_key << " value."; - return system_state_->prefs()->Delete(prefs_key); - } else if (!new_value.empty()) { - LOG(INFO) << "Storing new setting " << prefs_key << " as " << new_value; - return system_state_->prefs()->SetString(prefs_key, new_value); +void OmahaRequestAction::PersistCohortData(const string& prefs_key, + const Optional& new_value) { + if (!new_value) + return; + const string& value = new_value.value(); + if (value.empty() && system_state_->prefs()->Exists(prefs_key)) { + if (!system_state_->prefs()->Delete(prefs_key)) + LOG(ERROR) << "Failed to remove stored " << prefs_key << "value."; + else + LOG(INFO) << "Removed stored " << prefs_key << " value."; + } else if (!value.empty()) { + if (!system_state_->prefs()->SetString(prefs_key, value)) + LOG(INFO) << "Failed to store new setting " << prefs_key << " as " + << value; + else + LOG(INFO) << "Stored cohort setting " << prefs_key << " as " << value; + } +} + +void OmahaRequestAction::PersistCohorts(const OmahaParserData& parser_data) { + for (const auto& app : parser_data.apps) { + // For platform App ID. + if (app.id == params_->GetAppId()) { + PersistCohortData(kPrefsOmahaCohort, app.cohort); + PersistCohortData(kPrefsOmahaCohortName, app.cohortname); + PersistCohortData(kPrefsOmahaCohortHint, app.cohorthint); + } else if (params_->IsDlcAppId(app.id)) { + string dlc_id; + if (!params_->GetDlcId(app.id, &dlc_id)) { + LOG(WARNING) << "Skip persisting cohorts for DLC App ID=" << app.id + << " as it is not in the request params."; + continue; + } + PrefsInterface* prefs = system_state_->prefs(); + PersistCohortData( + prefs->CreateSubKey({kDlcPrefsSubDir, dlc_id, kPrefsOmahaCohort}), + app.cohort); + PersistCohortData( + prefs->CreateSubKey({kDlcPrefsSubDir, dlc_id, kPrefsOmahaCohortName}), + app.cohortname); + PersistCohortData( + prefs->CreateSubKey({kDlcPrefsSubDir, dlc_id, kPrefsOmahaCohortHint}), + app.cohorthint); + } else { + LOG(WARNING) << "Skip persisting cohorts for unknown App ID=" << app.id; + } } - return true; } bool OmahaRequestAction::PersistEolInfo(const map& attrs) { diff --git a/cros/omaha_request_action.h b/cros/omaha_request_action.h index 1a3a912f..9576a056 100644 --- a/cros/omaha_request_action.h +++ b/cros/omaha_request_action.h @@ -28,6 +28,7 @@ #include // for FRIEND_TEST +#include #include #include @@ -176,12 +177,17 @@ class OmahaRequestAction : public Action, int install_date_days, InstallDateProvisioningSource source); - // Persist the new cohort* value received in the XML file in the |prefs_key| - // preference file. If the |new_value| is empty, the currently stored value - // will be deleted. Don't call this function with an empty |new_value| if the - // value was not set in the XML, since that would delete the stored value. - bool PersistCohortData(const std::string& prefs_key, - const std::string& new_value); + // Persist the new cohort value received in the XML file in the |prefs_key| + // preference file. If the |new_value| is empty, do nothing. If the + // |new_value| stores and empty value, the currently stored value will be + // deleted. Don't call this function with an empty |new_value| if the value + // was not set in the XML, since that would delete the stored value. + void PersistCohortData(const std::string& prefs_key, + const base::Optional& new_value); + + // Parses and persists the cohorts sent back in the updatecheck tag + // attributes. + void PersistCohorts(const OmahaParserData& parser_data); // Parses and persists the end-of-life date flag sent back in the updatecheck // tag attributes. The flags will be validated and stored in the Prefs. diff --git a/cros/omaha_request_action_unittest.cc b/cros/omaha_request_action_unittest.cc index c3842b85..8d94195b 100644 --- a/cros/omaha_request_action_unittest.cc +++ b/cros/omaha_request_action_unittest.cc @@ -204,7 +204,13 @@ struct FakeUpdateResponse { : "") + (dlc_app_update ? "" + "\" " + + (include_dlc_cohorts + ? "cohort=\"" + dlc_cohort + "\" cohorthint=\"" + + dlc_cohorthint + "\" cohortname=\"" + + dlc_cohortname + "\" " + : "") + + "status=\"ok\">" "" + +"\" " + + (include_dlc_cohorts + ? "cohort=\"" + dlc_cohort + "\" cohorthint=\"" + + dlc_cohorthint + "\" cohortname=\"" + + dlc_cohortname + "\" " + : "") + + ">" : "") + ""; } @@ -252,6 +264,11 @@ struct FakeUpdateResponse { string cohort = ""; string cohorthint = ""; string cohortname = ""; + // Whether to include Omaha cohorts for DLC apps. + bool include_dlc_cohorts = false; + string dlc_cohort = ""; + string dlc_cohorthint = ""; + string dlc_cohortname = ""; // Whether to include the CrOS in the XML response. bool include_entity = false; @@ -1240,8 +1257,16 @@ TEST_F(OmahaRequestActionTest, CohortsArePersisted) { fake_update_response_.cohort = "s/154454/8479665"; fake_update_response_.cohorthint = "please-put-me-on-beta"; fake_update_response_.cohortname = "stable"; + request_params_.set_dlc_apps_params( + {{request_params_.GetDlcAppId(kDlcId1), {.name = kDlcId1}}}); + fake_update_response_.dlc_app_update = true; + fake_update_response_.include_dlc_cohorts = true; + fake_update_response_.dlc_cohort = "s/154454/8479665/dlc"; + fake_update_response_.dlc_cohorthint = "please-put-me-on-beta-dlc"; + fake_update_response_.dlc_cohortname = "stable-dlc"; tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); + EXPECT_CALL(mock_excluder_, IsExcluded(_)).WillRepeatedly(Return(false)); ASSERT_TRUE(TestUpdateCheck()); string value; @@ -1253,18 +1278,52 @@ TEST_F(OmahaRequestActionTest, CohortsArePersisted) { EXPECT_TRUE(fake_prefs_.GetString(kPrefsOmahaCohortName, &value)); EXPECT_EQ(fake_update_response_.cohortname, value); + + EXPECT_TRUE(fake_prefs_.GetString( + fake_prefs_.CreateSubKey({kDlcPrefsSubDir, kDlcId1, kPrefsOmahaCohort}), + &value)); + EXPECT_EQ(fake_update_response_.dlc_cohort, value); + + EXPECT_TRUE(fake_prefs_.GetString( + fake_prefs_.CreateSubKey( + {kDlcPrefsSubDir, kDlcId1, kPrefsOmahaCohortHint}), + &value)); + EXPECT_EQ(fake_update_response_.dlc_cohorthint, value); + + EXPECT_TRUE(fake_prefs_.GetString( + fake_prefs_.CreateSubKey( + {kDlcPrefsSubDir, kDlcId1, kPrefsOmahaCohortName}), + &value)); + EXPECT_EQ(fake_update_response_.dlc_cohortname, value); } TEST_F(OmahaRequestActionTest, CohortsAreUpdated) { EXPECT_TRUE(fake_prefs_.SetString(kPrefsOmahaCohort, "old_value")); EXPECT_TRUE(fake_prefs_.SetString(kPrefsOmahaCohortHint, "old_hint")); EXPECT_TRUE(fake_prefs_.SetString(kPrefsOmahaCohortName, "old_name")); + const string dlc_cohort_key = + fake_prefs_.CreateSubKey({kDlcPrefsSubDir, kDlcId1, kPrefsOmahaCohort}); + const string dlc_cohort_hint_key = fake_prefs_.CreateSubKey( + {kDlcPrefsSubDir, kDlcId1, kPrefsOmahaCohortHint}); + const string dlc_cohort_name_key = fake_prefs_.CreateSubKey( + {kDlcPrefsSubDir, kDlcId1, kPrefsOmahaCohortName}); + request_params_.set_dlc_apps_params( + {{request_params_.GetDlcAppId(kDlcId1), {.name = kDlcId1}}}); + EXPECT_TRUE(fake_prefs_.SetString(dlc_cohort_key, "old_value_dlc")); + EXPECT_TRUE(fake_prefs_.SetString(dlc_cohort_hint_key, "old_hint_dlc")); + EXPECT_TRUE(fake_prefs_.SetString(dlc_cohort_name_key, "old_name_dlc")); fake_update_response_.include_cohorts = true; fake_update_response_.cohort = "s/154454/8479665"; fake_update_response_.cohorthint = "please-put-me-on-beta"; fake_update_response_.cohortname = ""; + fake_update_response_.dlc_app_update = true; + fake_update_response_.include_dlc_cohorts = true; + fake_update_response_.dlc_cohort = "s/154454/8479665/dlc"; + fake_update_response_.dlc_cohorthint = "please-put-me-on-beta-dlc"; + fake_update_response_.dlc_cohortname = ""; tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); + EXPECT_CALL(mock_excluder_, IsExcluded(_)).WillRepeatedly(Return(false)); ASSERT_TRUE(TestUpdateCheck()); string value; @@ -1275,12 +1334,23 @@ TEST_F(OmahaRequestActionTest, CohortsAreUpdated) { EXPECT_EQ(fake_update_response_.cohorthint, value); EXPECT_FALSE(fake_prefs_.GetString(kPrefsOmahaCohortName, &value)); + + EXPECT_TRUE(fake_prefs_.GetString(dlc_cohort_key, &value)); + EXPECT_EQ(fake_update_response_.dlc_cohort, value); + + EXPECT_TRUE(fake_prefs_.GetString(dlc_cohort_hint_key, &value)); + EXPECT_EQ(fake_update_response_.dlc_cohorthint, value); + + EXPECT_FALSE(fake_prefs_.GetString(dlc_cohort_name_key, &value)); } TEST_F(OmahaRequestActionTest, CohortsAreNotModifiedWhenMissing) { tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); EXPECT_TRUE(fake_prefs_.SetString(kPrefsOmahaCohort, "old_value")); + const string dlc_cohort_key = + fake_prefs_.CreateSubKey({kDlcPrefsSubDir, kDlcId1, kPrefsOmahaCohort}); + EXPECT_TRUE(fake_prefs_.SetString(dlc_cohort_key, "old_value_dlc")); ASSERT_TRUE(TestUpdateCheck()); string value; @@ -1289,6 +1359,18 @@ TEST_F(OmahaRequestActionTest, CohortsAreNotModifiedWhenMissing) { EXPECT_FALSE(fake_prefs_.GetString(kPrefsOmahaCohortHint, &value)); EXPECT_FALSE(fake_prefs_.GetString(kPrefsOmahaCohortName, &value)); + + EXPECT_TRUE(fake_prefs_.GetString(dlc_cohort_key, &value)); + EXPECT_EQ("old_value_dlc", value); + + EXPECT_FALSE(fake_prefs_.GetString( + fake_prefs_.CreateSubKey( + {kDlcPrefsSubDir, kDlcId1, kPrefsOmahaCohortHint}), + &value)); + EXPECT_FALSE(fake_prefs_.GetString( + fake_prefs_.CreateSubKey( + {kDlcPrefsSubDir, kDlcId1, kPrefsOmahaCohortName}), + &value)); } TEST_F(OmahaRequestActionTest, CohortsArePersistedWhenNoUpdate) { @@ -1319,8 +1401,18 @@ TEST_F(OmahaRequestActionTest, MultiAppCohortTest) { fake_update_response_.cohort = "s/154454/8479665"; fake_update_response_.cohorthint = "please-put-me-on-beta"; fake_update_response_.cohortname = "stable"; + request_params_.set_dlc_apps_params( + {{request_params_.GetDlcAppId(kDlcId1), {.name = kDlcId1}}, + {request_params_.GetDlcAppId(kDlcId2), {.name = kDlcId2}}}); + fake_update_response_.dlc_app_update = true; + fake_update_response_.dlc_app_no_update = true; + fake_update_response_.include_dlc_cohorts = true; + fake_update_response_.dlc_cohort = "s/154454/8479665/dlc"; + fake_update_response_.dlc_cohorthint = "please-put-me-on-beta-dlc"; + fake_update_response_.dlc_cohortname = "stable-dlc"; tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); + EXPECT_CALL(mock_excluder_, IsExcluded(_)).WillRepeatedly(Return(false)); ASSERT_TRUE(TestUpdateCheck()); string value; @@ -1332,6 +1424,37 @@ TEST_F(OmahaRequestActionTest, MultiAppCohortTest) { EXPECT_TRUE(fake_prefs_.GetString(kPrefsOmahaCohortName, &value)); EXPECT_EQ(fake_update_response_.cohortname, value); + + EXPECT_TRUE(fake_prefs_.GetString( + fake_prefs_.CreateSubKey({kDlcPrefsSubDir, kDlcId1, kPrefsOmahaCohort}), + &value)); + EXPECT_EQ(fake_update_response_.dlc_cohort, value); + EXPECT_TRUE(fake_prefs_.GetString( + fake_prefs_.CreateSubKey({kDlcPrefsSubDir, kDlcId2, kPrefsOmahaCohort}), + &value)); + EXPECT_EQ(fake_update_response_.dlc_cohort, value); + + EXPECT_TRUE(fake_prefs_.GetString( + fake_prefs_.CreateSubKey( + {kDlcPrefsSubDir, kDlcId1, kPrefsOmahaCohortHint}), + &value)); + EXPECT_EQ(fake_update_response_.dlc_cohorthint, value); + EXPECT_TRUE(fake_prefs_.GetString( + fake_prefs_.CreateSubKey( + {kDlcPrefsSubDir, kDlcId2, kPrefsOmahaCohortHint}), + &value)); + EXPECT_EQ(fake_update_response_.dlc_cohorthint, value); + + EXPECT_TRUE(fake_prefs_.GetString( + fake_prefs_.CreateSubKey( + {kDlcPrefsSubDir, kDlcId1, kPrefsOmahaCohortName}), + &value)); + EXPECT_EQ(fake_update_response_.dlc_cohortname, value); + EXPECT_TRUE(fake_prefs_.GetString( + fake_prefs_.CreateSubKey( + {kDlcPrefsSubDir, kDlcId2, kPrefsOmahaCohortName}), + &value)); + EXPECT_EQ(fake_update_response_.dlc_cohortname, value); } TEST_F(OmahaRequestActionTest, NoOutputPipeTest) { diff --git a/cros/omaha_request_builder_xml.cc b/cros/omaha_request_builder_xml.cc index 43ee5486..739abbff 100644 --- a/cros/omaha_request_builder_xml.cc +++ b/cros/omaha_request_builder_xml.cc @@ -215,9 +215,10 @@ string OmahaRequestBuilderXml::GetAppBody(const OmahaAppData& app_data) const { return app_body; } -string OmahaRequestBuilderXml::GetCohortArg(const string arg_name, - const string prefs_key, - const string override_value) const { +string OmahaRequestBuilderXml::GetCohortArg( + const string& arg_name, + const string& prefs_key, + const string& override_value) const { string cohort_value; if (!override_value.empty()) { // |override_value| take precedence over pref value. @@ -296,14 +297,30 @@ string OmahaRequestBuilderXml::GetApp(const OmahaAppData& app_data) const { } string app_cohort_args; - app_cohort_args += GetCohortArg("cohort", kPrefsOmahaCohort); - app_cohort_args += GetCohortArg("cohortname", kPrefsOmahaCohortName); + string cohort_key = kPrefsOmahaCohort; + string cohortname_key = kPrefsOmahaCohortName; + string cohorthint_key = kPrefsOmahaCohortHint; + + // Override the cohort keys for DLC App IDs. + const auto& dlc_apps_params = params_->dlc_apps_params(); + auto itr = dlc_apps_params.find(app_data.id); + if (itr != dlc_apps_params.end()) { + auto dlc_id = itr->second.name; + cohort_key = + prefs_->CreateSubKey({kDlcPrefsSubDir, dlc_id, kPrefsOmahaCohort}); + cohortname_key = + prefs_->CreateSubKey({kDlcPrefsSubDir, dlc_id, kPrefsOmahaCohortName}); + cohorthint_key = + prefs_->CreateSubKey({kDlcPrefsSubDir, dlc_id, kPrefsOmahaCohortHint}); + } + app_cohort_args += GetCohortArg("cohort", cohort_key); + app_cohort_args += GetCohortArg("cohortname", cohortname_key); // Policy provided value overrides pref. - string autoupdate_token = params_->autoupdate_token(); - app_cohort_args += GetCohortArg("cohorthint", - kPrefsOmahaCohortHint, - autoupdate_token /* override_value */); + app_cohort_args += + GetCohortArg("cohorthint", + cohorthint_key, + params_->autoupdate_token() /* override_value */); string fingerprint_arg; if (!params_->os_build_fingerprint().empty()) { diff --git a/cros/omaha_request_builder_xml.h b/cros/omaha_request_builder_xml.h index 4f860dd2..0aca7f33 100644 --- a/cros/omaha_request_builder_xml.h +++ b/cros/omaha_request_builder_xml.h @@ -168,9 +168,9 @@ class OmahaRequestBuilderXml : OmahaRequestBuilder { // Returns the cohort* argument to include in the tag for the passed // |arg_name| and |prefs_key|, if any. The return value is suitable to // concatenate to the list of arguments and includes a space at the end. - std::string GetCohortArg(const std::string arg_name, - const std::string prefs_key, - const std::string override_value = "") const; + std::string GetCohortArg(const std::string& arg_name, + const std::string& prefs_key, + const std::string& override_value = "") const; // Returns an XML ping element if any of the elapsed days need to be // sent, or an empty string otherwise. diff --git a/cros/omaha_request_builder_xml_unittest.cc b/cros/omaha_request_builder_xml_unittest.cc index 055d0f3d..c04c9944 100644 --- a/cros/omaha_request_builder_xml_unittest.cc +++ b/cros/omaha_request_builder_xml_unittest.cc @@ -21,13 +21,19 @@ #include #include +#include #include +#include "update_engine/common/fake_prefs.h" #include "update_engine/cros/fake_system_state.h" using std::pair; using std::string; using std::vector; +using testing::_; +using testing::DoAll; +using testing::Return; +using testing::SetArgPointee; namespace chromeos_update_engine { @@ -398,4 +404,74 @@ TEST_F(OmahaRequestBuilderXmlTest, "")) << request_xml; } + +TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlDlcCohortMissingCheck) { + OmahaRequestParams omaha_request_params{&fake_system_state_}; + constexpr char kDlcId[] = "test-dlc-id"; + omaha_request_params.set_dlc_apps_params( + {{omaha_request_params.GetDlcAppId(kDlcId), {.name = kDlcId}}}); + auto* mock_prefs = fake_system_state_.mock_prefs(); + OmahaEvent event(OmahaEvent::kTypeUpdateDownloadStarted); + OmahaRequestBuilderXml omaha_request{ + &event, &omaha_request_params, false, false, 0, 0, 0, mock_prefs, ""}; + // OS App ID Expectations. + EXPECT_CALL(*mock_prefs, Exists(kPrefsOmahaCohort)); + EXPECT_CALL(*mock_prefs, Exists(kPrefsOmahaCohortName)); + EXPECT_CALL(*mock_prefs, Exists(kPrefsOmahaCohortHint)); + // DLC App ID Expectations. + EXPECT_CALL(*mock_prefs, + Exists(PrefsInterface::CreateSubKey( + {kDlcPrefsSubDir, kDlcId, kPrefsOmahaCohort}))); + EXPECT_CALL(*mock_prefs, + Exists(PrefsInterface::CreateSubKey( + {kDlcPrefsSubDir, kDlcId, kPrefsOmahaCohortName}))); + EXPECT_CALL(*mock_prefs, + Exists(PrefsInterface::CreateSubKey( + {kDlcPrefsSubDir, kDlcId, kPrefsOmahaCohortHint}))); + const string request_xml = omaha_request.GetRequest(); + + // Check that no cohorts are in the request. + EXPECT_EQ(0, CountSubstringInString(request_xml, "cohort=")) << request_xml; + EXPECT_EQ(0, CountSubstringInString(request_xml, "cohortname=")) + << request_xml; + EXPECT_EQ(0, CountSubstringInString(request_xml, "cohorthint=")) + << request_xml; +} + +TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlDlcCohortCheck) { + OmahaRequestParams omaha_request_params{&fake_system_state_}; + const string kDlcId = "test-dlc-id"; + omaha_request_params.set_dlc_apps_params( + {{omaha_request_params.GetDlcAppId(kDlcId), {.name = kDlcId}}}); + FakePrefs fake_prefs; + fake_system_state_.set_prefs(&fake_prefs); + OmahaEvent event(OmahaEvent::kTypeUpdateDownloadStarted); + OmahaRequestBuilderXml omaha_request{ + &event, &omaha_request_params, false, false, 0, 0, 0, &fake_prefs, ""}; + // DLC App ID Expectations. + const string dlc_cohort_key = PrefsInterface::CreateSubKey( + {kDlcPrefsSubDir, kDlcId, kPrefsOmahaCohort}); + const string kDlcCohortVal = "test-cohort"; + EXPECT_TRUE(fake_prefs.SetString(dlc_cohort_key, kDlcCohortVal)); + const string dlc_cohort_name_key = PrefsInterface::CreateSubKey( + {kDlcPrefsSubDir, kDlcId, kPrefsOmahaCohortName}); + const string kDlcCohortNameVal = "test-cohortname"; + EXPECT_TRUE(fake_prefs.SetString(dlc_cohort_name_key, kDlcCohortNameVal)); + const string dlc_cohort_hint_key = PrefsInterface::CreateSubKey( + {kDlcPrefsSubDir, kDlcId, kPrefsOmahaCohortHint}); + const string kDlcCohortHintVal = "test-cohortval"; + EXPECT_TRUE(fake_prefs.SetString(dlc_cohort_hint_key, kDlcCohortHintVal)); + const string request_xml = omaha_request.GetRequest(); + + EXPECT_EQ(1, + CountSubstringInString( + request_xml, + base::StringPrintf( + "cohort=\"%s\" cohortname=\"%s\" cohorthint=\"%s\"", + kDlcCohortVal.c_str(), + kDlcCohortNameVal.c_str(), + kDlcCohortHintVal.c_str()))) + << request_xml; +} + } // namespace chromeos_update_engine diff --git a/cros/omaha_request_params.cc b/cros/omaha_request_params.cc index c814e00f..e7e719bd 100644 --- a/cros/omaha_request_params.cc +++ b/cros/omaha_request_params.cc @@ -274,6 +274,14 @@ bool OmahaRequestParams::IsDlcAppId(const std::string& app_id) const { return dlc_apps_params().find(app_id) != dlc_apps_params().end(); } +bool OmahaRequestParams::GetDlcId(const string& app_id, string* dlc_id) const { + auto itr = dlc_apps_params_.find(app_id); + if (itr == dlc_apps_params_.end()) + return false; + *dlc_id = itr->second.name; + return true; +} + void OmahaRequestParams::SetDlcNoUpdate(const string& app_id) { auto itr = dlc_apps_params_.find(app_id); if (itr == dlc_apps_params_.end()) diff --git a/cros/omaha_request_params.h b/cros/omaha_request_params.h index 26ea1c9a..fa452ce8 100644 --- a/cros/omaha_request_params.h +++ b/cros/omaha_request_params.h @@ -228,6 +228,10 @@ class OmahaRequestParams { // request parameters. virtual bool IsDlcAppId(const std::string& app_id) const; + // Returns the DLC App ID if the given App ID is a DLC that is currently part + // of the request parameters. + virtual bool GetDlcId(const std::string& app_id, std::string* dlc_id) const; + // If the App ID is a DLC App ID will set to no update. void SetDlcNoUpdate(const std::string& app_id); diff --git a/cros/omaha_request_params_unittest.cc b/cros/omaha_request_params_unittest.cc index 71f3d4c7..ff52fc2d 100644 --- a/cros/omaha_request_params_unittest.cc +++ b/cros/omaha_request_params_unittest.cc @@ -261,4 +261,33 @@ TEST_F(OmahaRequestParamsTest, RequisitionIsSetTest) { EXPECT_TRUE(params_.Init("", "", {})); EXPECT_EQ("fake_requisition", params_.device_requisition()); } + +TEST_F(OmahaRequestParamsTest, GetMissingDlcId) { + EXPECT_TRUE(params_.Init("", "", {})); + + string dlc_id; + EXPECT_FALSE(params_.GetDlcId("some-dlc-app-id", &dlc_id)); +} + +TEST_F(OmahaRequestParamsTest, GetDlcId) { + EXPECT_TRUE(params_.Init("", "", {})); + const string kExpectedDlcId = "test-dlc"; + const string dlc_app_id = params_.GetDlcAppId(kExpectedDlcId); + params_.set_dlc_apps_params({{dlc_app_id, {.name = kExpectedDlcId}}}); + + string dlc_id; + EXPECT_TRUE(params_.GetDlcId(dlc_app_id, &dlc_id)); + EXPECT_EQ(kExpectedDlcId, dlc_id); +} + +TEST_F(OmahaRequestParamsTest, GetDlcAppId) { + EXPECT_TRUE(params_.Init("", "", {})); + const string kAppId = "test-app-id"; + params_.set_app_id(kAppId); + const string kDlcId = "test-dlc"; + const string expected_dlc_app_id = kAppId + "_" + kDlcId; + + EXPECT_EQ(expected_dlc_app_id, params_.GetDlcAppId(kDlcId)); +} + } // namespace chromeos_update_engine From da35b878076eac28aae4ae6971f439f02d16cacb Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Wed, 4 Nov 2020 17:54:44 -0800 Subject: [PATCH 450/624] update_engine: Clean up a bit of logs Remove some logs that show the content of the response. We already log the response in its entirety. There is no need to individually log pieces of it. BUG=b:163075733 TEST=None Change-Id: Iec3fdfd2239c84662af91632c97a2c8722913b2d Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2520223 Commit-Queue: Amin Hassani Tested-by: Amin Hassani Reviewed-by: Jae Hoon Kim --- cros/omaha_request_action.cc | 40 +++++++++++++----------------------- 1 file changed, 14 insertions(+), 26 deletions(-) diff --git a/cros/omaha_request_action.cc b/cros/omaha_request_action.cc index 51c0ac92..faa7ddef 100644 --- a/cros/omaha_request_action.cc +++ b/cros/omaha_request_action.cc @@ -334,7 +334,7 @@ bool OmahaRequestAction::ShouldPing() const { } if (system_state_->hardware()->GetFirstActiveOmahaPingSent()) { LOG(INFO) << "Not sending ping with a=-1 r=-1 to omaha because " - << "the first_active_omaha_ping_sent is true"; + << "the first_active_omaha_ping_sent is true."; return false; } return true; @@ -401,7 +401,7 @@ int OmahaRequestAction::GetInstallDate(SystemState* system_state) { return -1; LOG(INFO) << "Set the Omaha InstallDate from OOBE time-stamp to " << num_days - << " days"; + << " days."; return num_days; } @@ -547,16 +547,15 @@ bool ParsePackage(OmahaParserData::App* app, return true; } if (app->packages.empty()) { - LOG(ERROR) << "Omaha Response has no packages"; + LOG(ERROR) << "Omaha Response has no packages."; completer->set_code(ErrorCode::kOmahaResponseInvalid); return false; } if (app->url_codebase.empty()) { - LOG(ERROR) << "No Omaha Response URLs"; + LOG(ERROR) << "No Omaha Response URLs."; completer->set_code(ErrorCode::kOmahaResponseInvalid); return false; } - LOG(INFO) << "Found " << app->url_codebase.size() << " url(s)"; vector metadata_sizes = base::SplitString(app->action_postinstall_attrs[kAttrMetadataSize], ":", @@ -575,23 +574,23 @@ bool ParsePackage(OmahaParserData::App* app, for (size_t i = 0; i < app->packages.size(); i++) { const auto& package = app->packages[i]; if (package.name.empty()) { - LOG(ERROR) << "Omaha Response has empty package name"; + LOG(ERROR) << "Omaha Response has empty package name."; completer->set_code(ErrorCode::kOmahaResponseInvalid); return false; } - LOG(INFO) << "Found package " << package.name; OmahaResponse::Package out_package; out_package.app_id = app->id; out_package.can_exclude = can_exclude; for (const string& codebase : app->url_codebase) { if (codebase.empty()) { - LOG(ERROR) << "Omaha Response URL has empty codebase"; + LOG(ERROR) << "Omaha Response URL has empty codebase."; completer->set_code(ErrorCode::kOmahaResponseInvalid); return false; } out_package.payload_urls.push_back(codebase + package.name); } + // Parse the payload size. base::StringToUint64(package.size, &out_package.size); if (out_package.size <= 0) { @@ -599,29 +598,22 @@ bool ParsePackage(OmahaParserData::App* app, completer->set_code(ErrorCode::kOmahaResponseInvalid); return false; } - LOG(INFO) << "Payload size = " << out_package.size << " bytes"; if (i < metadata_sizes.size()) base::StringToUint64(metadata_sizes[i], &out_package.metadata_size); - LOG(INFO) << "Payload metadata size = " << out_package.metadata_size - << " bytes"; if (i < metadata_signatures.size()) out_package.metadata_signature = metadata_signatures[i]; - LOG(INFO) << "Payload metadata signature = " - << out_package.metadata_signature; out_package.hash = package.hash; if (out_package.hash.empty()) { - LOG(ERROR) << "Omaha Response has empty hash_sha256 value"; + LOG(ERROR) << "Omaha Response has empty hash_sha256 value."; completer->set_code(ErrorCode::kOmahaResponseInvalid); return false; } - LOG(INFO) << "Payload hash = " << out_package.hash; if (i < is_delta_payloads.size()) out_package.is_delta = ParseBool(is_delta_payloads[i]); - LOG(INFO) << "Payload is delta = " << utils::ToString(out_package.is_delta); output_object->packages.push_back(std::move(out_package)); } @@ -724,7 +716,6 @@ bool OmahaRequestAction::ParseResponse(OmahaParserData* parser_data, completer->set_code(ErrorCode::kOmahaResponseInvalid); return false; } - LOG(INFO) << "Found " << parser_data->apps.size() << " ."; // chromium-os:37289: The PollInterval is not supported by Omaha server // currently. But still keeping this existing code in case we ever decide to @@ -758,7 +749,7 @@ bool OmahaRequestAction::ParseResponse(OmahaParserData* parser_data, install_date_days_rounded, kProvisionedFromOmahaResponse)) { LOG(INFO) << "Set the Omaha InstallDate from Omaha Response to " - << install_date_days_rounded << " days"; + << install_date_days_rounded << " days."; } } @@ -859,7 +850,7 @@ bool OmahaRequestAction::ParseParams(OmahaParserData* parser_data, app.manifest_version != params_->app_version()) { LOG(WARNING) << "An app has a different version (" << app.manifest_version << ") that is different than platform app version (" - << params_->app_version() << ")"; + << params_->app_version() << ")."; } if (!app.action_postinstall_attrs.empty() && attrs.empty()) { attrs = app.action_postinstall_attrs; @@ -875,11 +866,8 @@ bool OmahaRequestAction::ParseParams(OmahaParserData* parser_data, return false; } - LOG(INFO) << "Received omaha response to update to version " - << output_object->version; - if (attrs.empty()) { - LOG(ERROR) << "Omaha Response has no postinstall event action"; + LOG(ERROR) << "Omaha Response has no postinstall event action."; completer->set_code(ErrorCode::kOmahaResponseInvalid); return false; } @@ -1090,7 +1078,7 @@ void OmahaRequestAction::CompleteProcessing() { if (payload_state->ShouldBackoffDownload()) { output_object.update_exists = false; LOG(INFO) << "Ignoring Omaha updates in order to backoff our retry " - << "attempts"; + << "attempts."; completer.set_code(ErrorCode::kOmahaUpdateDeferredForBackoff); return; } @@ -1224,7 +1212,7 @@ OmahaRequestAction::IsWallClockBasedWaitingSatisfied( Time update_first_seen_at = LoadOrPersistUpdateFirstSeenAtPref(); if (update_first_seen_at == base::Time()) { LOG(INFO) << "Not scattering as UpdateFirstSeenAt value cannot be read or " - "persisted"; + "persisted."; return kWallClockWaitDoneAndUpdateCheckWaitNotRequired; } @@ -1250,7 +1238,7 @@ OmahaRequestAction::IsWallClockBasedWaitingSatisfied( // previous FSI, which means this update will be applied mostly in OOBE // cases. For these cases, we shouldn't scatter so as to finish the OOBE // quickly. - LOG(INFO) << "Not scattering as deadline flag is set"; + LOG(INFO) << "Not scattering as deadline flag is set."; return kWallClockWaitDoneAndUpdateCheckWaitNotRequired; } From c288bf627fd2fc1d0466b70e4d6fbdb78414309d Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Wed, 4 Nov 2020 19:33:43 -0800 Subject: [PATCH 451/624] update_engine: Move DaemonStateInterface implementation to UpdateAttempter It seems like UpdateAttempter is the best option for implementation of DaemonStateInterface. SystemState should only be doing state keeping not doing these startup logics. BUG=b:171829801 TEST=unittests TEST=CQ passes Change-Id: I47ec50107ffbeb544e061f39c900a1559f2cdcab Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2519843 Tested-by: Amin Hassani Reviewed-by: Jae Hoon Kim Commit-Queue: Jae Hoon Kim --- common/daemon_state_interface.h | 3 +- cros/daemon_chromeos.cc | 6 ++-- cros/daemon_chromeos.h | 5 ++-- cros/real_system_state.cc | 49 --------------------------------- cros/real_system_state.h | 18 ++---------- cros/update_attempter.cc | 31 +++++++++++++++++++++ cros/update_attempter.h | 14 ++++++---- 7 files changed, 48 insertions(+), 78 deletions(-) diff --git a/common/daemon_state_interface.h b/common/daemon_state_interface.h index 9509fa2f..831e38bc 100644 --- a/common/daemon_state_interface.h +++ b/common/daemon_state_interface.h @@ -19,7 +19,6 @@ #include "update_engine/common/service_observer_interface.h" -#include #include namespace chromeos_update_engine { @@ -42,6 +41,8 @@ class DaemonStateInterface { protected: DaemonStateInterface() = default; + + DISALLOW_COPY_AND_ASSIGN(DaemonStateInterface); }; } // namespace chromeos_update_engine diff --git a/cros/daemon_chromeos.cc b/cros/daemon_chromeos.cc index a7cad8cc..1e0e6d61 100644 --- a/cros/daemon_chromeos.cc +++ b/cros/daemon_chromeos.cc @@ -46,13 +46,13 @@ int DaemonChromeOS::OnInit() { // avoiding the explicit re-usage of the |bus| instance, shared between // D-Bus service and D-Bus client calls. RealSystemState* real_system_state = new RealSystemState(); - daemon_state_.reset(real_system_state); LOG_IF(ERROR, !real_system_state->Initialize()) << "Failed to initialize system state."; + system_state_.reset(real_system_state); // Create the DBus service. dbus_adaptor_.reset(new UpdateEngineAdaptor(real_system_state)); - daemon_state_->AddObserver(dbus_adaptor_.get()); + system_state_->update_attempter()->AddObserver(dbus_adaptor_.get()); dbus_adaptor_->RegisterAsync( base::Bind(&DaemonChromeOS::OnDBusRegistered, base::Unretained(this))); @@ -76,7 +76,7 @@ void DaemonChromeOS::OnDBusRegistered(bool succeeded) { QuitWithExitCode(1); return; } - daemon_state_->StartUpdater(); + system_state_->update_attempter()->StartUpdater(); } } // namespace chromeos_update_engine diff --git a/cros/daemon_chromeos.h b/cros/daemon_chromeos.h index 5d568c70..3b9c8dea 100644 --- a/cros/daemon_chromeos.h +++ b/cros/daemon_chromeos.h @@ -47,9 +47,8 @@ class DaemonChromeOS : public DaemonBase { // the main() function. Subprocess subprocess_; - // The daemon state with all the required daemon classes for the configured - // platform. - std::unique_ptr daemon_state_; + // The global context sysetm state. + std::unique_ptr system_state_; DISALLOW_COPY_AND_ASSIGN(DaemonChromeOS); }; diff --git a/cros/real_system_state.cc b/cros/real_system_state.cc index 4f572465..5715a39d 100644 --- a/cros/real_system_state.cc +++ b/cros/real_system_state.cc @@ -39,20 +39,10 @@ #if USE_DBUS #include "update_engine/cros/dbus_connection.h" #endif // USE_DBUS -#include "update_engine/update_boot_flags_action.h" #include "update_engine/update_manager/state_factory.h" -using brillo::MessageLoop; - namespace chromeos_update_engine { -RealSystemState::~RealSystemState() { - // Prevent any DBus communication from UpdateAttempter when shutting down the - // daemon. - if (update_attempter_) - update_attempter_->ClearObservers(); -} - bool RealSystemState::Initialize() { boot_control_ = boot_control::CreateBootControl(); if (!boot_control_) { @@ -201,43 +191,4 @@ bool RealSystemState::Initialize() { return true; } -bool RealSystemState::StartUpdater() { - // Initiate update checks. - update_attempter_->ScheduleUpdates(); - - auto update_boot_flags_action = - std::make_unique(boot_control_.get()); - processor_.EnqueueAction(std::move(update_boot_flags_action)); - // Update boot flags after 45 seconds. - MessageLoop::current()->PostDelayedTask( - FROM_HERE, - base::Bind(&ActionProcessor::StartProcessing, - base::Unretained(&processor_)), - base::TimeDelta::FromSeconds(45)); - - // Broadcast the update engine status on startup to ensure consistent system - // state on crashes. - MessageLoop::current()->PostTask( - FROM_HERE, - base::Bind(&UpdateAttempter::BroadcastStatus, - base::Unretained(update_attempter_.get()))); - - // Run the UpdateEngineStarted() method on |update_attempter|. - MessageLoop::current()->PostTask( - FROM_HERE, - base::Bind(&UpdateAttempter::UpdateEngineStarted, - base::Unretained(update_attempter_.get()))); - return true; -} - -void RealSystemState::AddObserver(ServiceObserverInterface* observer) { - CHECK(update_attempter_.get()); - update_attempter_->AddObserver(observer); -} - -void RealSystemState::RemoveObserver(ServiceObserverInterface* observer) { - CHECK(update_attempter_.get()); - update_attempter_->RemoveObserver(observer); -} - } // namespace chromeos_update_engine diff --git a/cros/real_system_state.h b/cros/real_system_state.h index 798fca0d..a93a9b70 100644 --- a/cros/real_system_state.h +++ b/cros/real_system_state.h @@ -48,29 +48,17 @@ namespace chromeos_update_engine { // A real implementation of the SystemStateInterface which is // used by the actual product code. -class RealSystemState : public SystemState, public DaemonStateInterface { +class RealSystemState : public SystemState { public: // Constructs all system objects that do not require separate initialization; // see Initialize() below for the remaining ones. RealSystemState() = default; - ~RealSystemState() override; + ~RealSystemState() = default; // Initializes and sets systems objects that require an initialization // separately from construction. Returns |true| on success. bool Initialize(); - // DaemonStateInterface overrides. - // Start the periodic update attempts. Must be called at the beginning of the - // program to start the periodic update check process. - bool StartUpdater() override; - - void AddObserver(ServiceObserverInterface* observer) override; - void RemoveObserver(ServiceObserverInterface* observer) override; - const std::set& service_observers() override { - CHECK(update_attempter_.get()); - return update_attempter_->service_observers(); - } - // SystemState overrides. inline void set_device_policy( const policy::DevicePolicy* device_policy) override { @@ -193,8 +181,6 @@ class RealSystemState : public SystemState, public DaemonStateInterface { // rebooted. Important for tracking whether you are running instance of the // update engine on first boot or due to a crash/restart. bool system_rebooted_{false}; - - ActionProcessor processor_; }; } // namespace chromeos_update_engine diff --git a/cros/update_attempter.cc b/cros/update_attempter.cc index e9098ded..e4174578 100644 --- a/cros/update_attempter.cc +++ b/cros/update_attempter.cc @@ -134,6 +134,10 @@ UpdateAttempter::UpdateAttempter(SystemState* system_state, is_install_(false) {} UpdateAttempter::~UpdateAttempter() { + // Prevent any DBus communication from UpdateAttempter when shutting down the + // daemon. + ClearObservers(); + // CertificateChecker might not be initialized in unittests. if (cert_checker_) cert_checker_->SetObserver(nullptr); @@ -177,6 +181,33 @@ bool UpdateAttempter::ScheduleUpdates() { return true; } +bool UpdateAttempter::StartUpdater() { + // Initiate update checks. + ScheduleUpdates(); + + auto update_boot_flags_action = + std::make_unique(system_state_->boot_control()); + processor_->EnqueueAction(std::move(update_boot_flags_action)); + // Update boot flags after 45 seconds. + MessageLoop::current()->PostDelayedTask( + FROM_HERE, + base::Bind(&ActionProcessor::StartProcessing, + base::Unretained(processor_.get())), + base::TimeDelta::FromSeconds(45)); + + // Broadcast the update engine status on startup to ensure consistent system + // state on crashes. + MessageLoop::current()->PostTask( + FROM_HERE, + base::Bind(&UpdateAttempter::BroadcastStatus, base::Unretained(this))); + + MessageLoop::current()->PostTask( + FROM_HERE, + base::Bind(&UpdateAttempter::UpdateEngineStarted, + base::Unretained(this))); + return true; +} + void UpdateAttempter::CertificateChecked(ServerToCheck server_to_check, CertificateCheckResult result) { system_state_->metrics_reporter()->ReportCertificateCheckMetrics( diff --git a/cros/update_attempter.h b/cros/update_attempter.h index 0f4c9524..bd0aef60 100644 --- a/cros/update_attempter.h +++ b/cros/update_attempter.h @@ -34,6 +34,7 @@ #include "update_engine/client_library/include/update_engine/update_status.h" #include "update_engine/common/action_processor.h" #include "update_engine/common/cpu_limiter.h" +#include "update_engine/common/daemon_state_interface.h" #include "update_engine/common/download_action.h" #include "update_engine/common/excluder_interface.h" #include "update_engine/common/proxy_resolver.h" @@ -59,7 +60,8 @@ namespace chromeos_update_engine { class UpdateAttempter : public ActionProcessorDelegate, public DownloadActionDelegate, public CertificateChecker::Observer, - public PostinstallRunnerAction::DelegateInterface { + public PostinstallRunnerAction::DelegateInterface, + public DaemonStateInterface { public: using UpdateStatus = update_engine::UpdateStatus; using UpdateAttemptFlags = update_engine::UpdateAttemptFlags; @@ -219,15 +221,15 @@ class UpdateAttempter : public ActionProcessorDelegate, // 'cros flash' to function properly). bool IsAnyUpdateSourceAllowed() const; - // Add and remove a service observer. - void AddObserver(ServiceObserverInterface* observer) { + // |DaemonStateInterface| overrides. + bool StartUpdater() override; + void AddObserver(ServiceObserverInterface* observer) override { service_observers_.insert(observer); } - void RemoveObserver(ServiceObserverInterface* observer) { + void RemoveObserver(ServiceObserverInterface* observer) override { service_observers_.erase(observer); } - - const std::set& service_observers() { + const std::set& service_observers() override { return service_observers_; } From 956a89cdd994f834baadef5e465aa2552f3fe348 Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Wed, 4 Nov 2020 19:59:16 -0800 Subject: [PATCH 452/624] update_engine: Remove inline identifier from RealSystemState RealSystemState is a subclass. Overrided virtual functions by definition cannot be inlined because they do get resolved at runtime if the base class pointer is used (which is almost everywhere). BUG=b:171829801 TEST=unittests Change-Id: I5bdd62d6b3bcde04e859e0e819631fe1e6897fe2 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2521791 Tested-by: Amin Hassani Commit-Queue: Jae Hoon Kim Reviewed-by: Jae Hoon Kim --- cros/real_system_state.h | 43 ++++++++++++++++------------------------ 1 file changed, 17 insertions(+), 26 deletions(-) diff --git a/cros/real_system_state.h b/cros/real_system_state.h index a93a9b70..644ba783 100644 --- a/cros/real_system_state.h +++ b/cros/real_system_state.h @@ -60,64 +60,55 @@ class RealSystemState : public SystemState { bool Initialize(); // SystemState overrides. - inline void set_device_policy( - const policy::DevicePolicy* device_policy) override { + void set_device_policy(const policy::DevicePolicy* device_policy) override { device_policy_ = device_policy; } - inline const policy::DevicePolicy* device_policy() override { + const policy::DevicePolicy* device_policy() override { return device_policy_; } - inline BootControlInterface* boot_control() override { - return boot_control_.get(); - } + BootControlInterface* boot_control() override { return boot_control_.get(); } - inline ClockInterface* clock() override { return &clock_; } + ClockInterface* clock() override { return &clock_; } - inline ConnectionManagerInterface* connection_manager() override { + ConnectionManagerInterface* connection_manager() override { return connection_manager_.get(); } - inline HardwareInterface* hardware() override { return hardware_.get(); } + HardwareInterface* hardware() override { return hardware_.get(); } - inline MetricsReporterInterface* metrics_reporter() override { + MetricsReporterInterface* metrics_reporter() override { return &metrics_reporter_; } - inline PrefsInterface* prefs() override { return prefs_.get(); } + PrefsInterface* prefs() override { return prefs_.get(); } - inline PrefsInterface* powerwash_safe_prefs() override { + PrefsInterface* powerwash_safe_prefs() override { return powerwash_safe_prefs_.get(); } - inline PayloadStateInterface* payload_state() override { - return &payload_state_; - } + PayloadStateInterface* payload_state() override { return &payload_state_; } - inline UpdateAttempter* update_attempter() override { + UpdateAttempter* update_attempter() override { return update_attempter_.get(); } - inline OmahaRequestParams* request_params() override { - return &request_params_; - } + OmahaRequestParams* request_params() override { return &request_params_; } - inline P2PManager* p2p_manager() override { return p2p_manager_.get(); } + P2PManager* p2p_manager() override { return p2p_manager_.get(); } - inline chromeos_update_manager::UpdateManager* update_manager() override { + chromeos_update_manager::UpdateManager* update_manager() override { return update_manager_.get(); } - inline PowerManagerInterface* power_manager() override { + PowerManagerInterface* power_manager() override { return power_manager_.get(); } - inline bool system_rebooted() override { return system_rebooted_; } + bool system_rebooted() override { return system_rebooted_; } - inline DlcServiceInterface* dlcservice() override { - return dlcservice_.get(); - } + DlcServiceInterface* dlcservice() override { return dlcservice_.get(); } private: // Real DBus proxies using the DBus connection. From 5ff76a25cd374b0cc55c1efb1929718395ee6fc1 Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Thu, 5 Nov 2020 12:55:38 -0800 Subject: [PATCH 453/624] update_engine: Make PayloadStateTests class tests There is already a class in there, we should use it so we can setup common code. BUG=b:171829801 TEST=unittests Change-Id: I5490b56e125fabb84f37d7a1d9814459bf053ba4 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2521635 Tested-by: Amin Hassani Commit-Queue: Jae Hoon Kim Reviewed-by: Jae Hoon Kim --- cros/payload_state_unittest.cc | 92 +++++++++++++++++----------------- 1 file changed, 47 insertions(+), 45 deletions(-) diff --git a/cros/payload_state_unittest.cc b/cros/payload_state_unittest.cc index b48cff48..107c6e2b 100644 --- a/cros/payload_state_unittest.cc +++ b/cros/payload_state_unittest.cc @@ -108,7 +108,7 @@ static void SetupPayloadStateWith2Urls(string hash, class PayloadStateTest : public ::testing::Test {}; -TEST(PayloadStateTest, SetResponseWorksWithEmptyResponse) { +TEST_F(PayloadStateTest, SetResponseWorksWithEmptyResponse) { OmahaResponse response; FakeSystemState fake_system_state; NiceMock* prefs = fake_system_state.mock_prefs(); @@ -146,7 +146,7 @@ TEST(PayloadStateTest, SetResponseWorksWithEmptyResponse) { EXPECT_EQ(1, payload_state.GetNumResponsesSeen()); } -TEST(PayloadStateTest, SetResponseWorksWithSingleUrl) { +TEST_F(PayloadStateTest, SetResponseWorksWithSingleUrl) { OmahaResponse response; response.packages.push_back({.payload_urls = {"https://single.url.test"}, .size = 123456789, @@ -197,7 +197,7 @@ TEST(PayloadStateTest, SetResponseWorksWithSingleUrl) { EXPECT_EQ(1, payload_state.GetNumResponsesSeen()); } -TEST(PayloadStateTest, SetResponseWorksWithMultipleUrls) { +TEST_F(PayloadStateTest, SetResponseWorksWithMultipleUrls) { OmahaResponse response; response.packages.push_back({.payload_urls = {"http://multiple.url.test", "https://multiple.url.test"}, @@ -247,7 +247,7 @@ TEST(PayloadStateTest, SetResponseWorksWithMultipleUrls) { EXPECT_EQ(1, payload_state.GetNumResponsesSeen()); } -TEST(PayloadStateTest, CanAdvanceUrlIndexCorrectly) { +TEST_F(PayloadStateTest, CanAdvanceUrlIndexCorrectly) { OmahaResponse response; FakeSystemState fake_system_state; NiceMock* prefs = fake_system_state.mock_prefs(); @@ -302,7 +302,7 @@ TEST(PayloadStateTest, CanAdvanceUrlIndexCorrectly) { EXPECT_EQ(3U, payload_state.GetUrlSwitchCount()); } -TEST(PayloadStateTest, NewResponseResetsPayloadState) { +TEST_F(PayloadStateTest, NewResponseResetsPayloadState) { OmahaResponse response; FakeSystemState fake_system_state; PayloadState payload_state; @@ -349,7 +349,7 @@ TEST(PayloadStateTest, NewResponseResetsPayloadState) { payload_state.GetTotalBytesDownloaded(kDownloadSourceHttpsServer)); } -TEST(PayloadStateTest, AllCountersGetUpdatedProperlyOnErrorCodesAndEvents) { +TEST_F(PayloadStateTest, AllCountersGetUpdatedProperlyOnErrorCodesAndEvents) { OmahaResponse response; PayloadState payload_state; FakeSystemState fake_system_state; @@ -495,7 +495,8 @@ TEST(PayloadStateTest, AllCountersGetUpdatedProperlyOnErrorCodesAndEvents) { EXPECT_FALSE(payload_state.ShouldBackoffDownload()); } -TEST(PayloadStateTest, PayloadAttemptNumberIncreasesOnSuccessfulFullDownload) { +TEST_F(PayloadStateTest, + PayloadAttemptNumberIncreasesOnSuccessfulFullDownload) { OmahaResponse response; PayloadState payload_state; FakeSystemState fake_system_state; @@ -534,7 +535,8 @@ TEST(PayloadStateTest, PayloadAttemptNumberIncreasesOnSuccessfulFullDownload) { EXPECT_EQ(0U, payload_state.GetUrlSwitchCount()); } -TEST(PayloadStateTest, PayloadAttemptNumberIncreasesOnSuccessfulDeltaDownload) { +TEST_F(PayloadStateTest, + PayloadAttemptNumberIncreasesOnSuccessfulDeltaDownload) { OmahaResponse response; PayloadState payload_state; FakeSystemState fake_system_state; @@ -571,7 +573,7 @@ TEST(PayloadStateTest, PayloadAttemptNumberIncreasesOnSuccessfulDeltaDownload) { EXPECT_EQ(0U, payload_state.GetUrlSwitchCount()); } -TEST(PayloadStateTest, SetResponseResetsInvalidUrlIndex) { +TEST_F(PayloadStateTest, SetResponseResetsInvalidUrlIndex) { OmahaResponse response; PayloadState payload_state; FakeSystemState fake_system_state; @@ -625,7 +627,7 @@ TEST(PayloadStateTest, SetResponseResetsInvalidUrlIndex) { EXPECT_EQ(0U, payload_state.GetUrlSwitchCount()); } -TEST(PayloadStateTest, NoBackoffInteractiveChecks) { +TEST_F(PayloadStateTest, NoBackoffInteractiveChecks) { OmahaResponse response; PayloadState payload_state; FakeSystemState fake_system_state; @@ -648,7 +650,7 @@ TEST(PayloadStateTest, NoBackoffInteractiveChecks) { EXPECT_FALSE(payload_state.ShouldBackoffDownload()); } -TEST(PayloadStateTest, NoBackoffForP2PUpdates) { +TEST_F(PayloadStateTest, NoBackoffForP2PUpdates) { OmahaResponse response; PayloadState payload_state; FakeSystemState fake_system_state; @@ -679,7 +681,7 @@ TEST(PayloadStateTest, NoBackoffForP2PUpdates) { EXPECT_TRUE(payload_state.ShouldBackoffDownload()); } -TEST(PayloadStateTest, NoBackoffForDeltaPayloads) { +TEST_F(PayloadStateTest, NoBackoffForDeltaPayloads) { OmahaResponse response; PayloadState payload_state; FakeSystemState fake_system_state; @@ -723,7 +725,7 @@ static void CheckPayloadBackoffState(PayloadState* payload_state, backoff_expiry_time.ToInternalValue()); } -TEST(PayloadStateTest, BackoffPeriodsAreInCorrectRange) { +TEST_F(PayloadStateTest, BackoffPeriodsAreInCorrectRange) { OmahaResponse response; PayloadState payload_state; FakeSystemState fake_system_state; @@ -744,7 +746,7 @@ TEST(PayloadStateTest, BackoffPeriodsAreInCorrectRange) { CheckPayloadBackoffState(&payload_state, 10, TimeDelta::FromDays(16)); } -TEST(PayloadStateTest, BackoffLogicCanBeDisabled) { +TEST_F(PayloadStateTest, BackoffLogicCanBeDisabled) { OmahaResponse response; response.disable_payload_backoff = true; PayloadState payload_state; @@ -771,7 +773,7 @@ TEST(PayloadStateTest, BackoffLogicCanBeDisabled) { EXPECT_FALSE(payload_state.ShouldBackoffDownload()); } -TEST(PayloadStateTest, BytesDownloadedMetricsGetAddedToCorrectSources) { +TEST_F(PayloadStateTest, BytesDownloadedMetricsGetAddedToCorrectSources) { OmahaResponse response; response.disable_payload_backoff = true; PayloadState payload_state; @@ -880,7 +882,7 @@ TEST(PayloadStateTest, BytesDownloadedMetricsGetAddedToCorrectSources) { EXPECT_EQ(0, payload_state.GetNumResponsesSeen()); } -TEST(PayloadStateTest, DownloadSourcesUsedIsCorrect) { +TEST_F(PayloadStateTest, DownloadSourcesUsedIsCorrect) { OmahaResponse response; PayloadState payload_state; FakeSystemState fake_system_state; @@ -920,7 +922,7 @@ TEST(PayloadStateTest, DownloadSourcesUsedIsCorrect) { payload_state.UpdateSucceeded(); } -TEST(PayloadStateTest, RestartingUpdateResetsMetrics) { +TEST_F(PayloadStateTest, RestartingUpdateResetsMetrics) { OmahaResponse response; FakeSystemState fake_system_state; PayloadState payload_state; @@ -950,7 +952,7 @@ TEST(PayloadStateTest, RestartingUpdateResetsMetrics) { payload_state.GetTotalBytesDownloaded(kDownloadSourceHttpServer)); } -TEST(PayloadStateTest, NumRebootsIncrementsCorrectly) { +TEST_F(PayloadStateTest, NumRebootsIncrementsCorrectly) { FakeSystemState fake_system_state; PayloadState payload_state; @@ -978,7 +980,7 @@ TEST(PayloadStateTest, NumRebootsIncrementsCorrectly) { EXPECT_EQ(0U, payload_state.GetNumReboots()); } -TEST(PayloadStateTest, RollbackHappened) { +TEST_F(PayloadStateTest, RollbackHappened) { FakeSystemState fake_system_state; PayloadState payload_state; @@ -1009,7 +1011,7 @@ TEST(PayloadStateTest, RollbackHappened) { EXPECT_TRUE(payload_state.GetRollbackHappened()); } -TEST(PayloadStateTest, RollbackVersion) { +TEST_F(PayloadStateTest, RollbackVersion) { FakeSystemState fake_system_state; PayloadState payload_state; @@ -1052,7 +1054,7 @@ TEST(PayloadStateTest, RollbackVersion) { payload_state.UpdateSucceeded(); } -TEST(PayloadStateTest, DurationsAreCorrect) { +TEST_F(PayloadStateTest, DurationsAreCorrect) { OmahaResponse response; response.packages.resize(1); PayloadState payload_state; @@ -1115,7 +1117,7 @@ TEST(PayloadStateTest, DurationsAreCorrect) { 16000000); } -TEST(PayloadStateTest, RebootAfterSuccessfulUpdateTest) { +TEST_F(PayloadStateTest, RebootAfterSuccessfulUpdateTest) { OmahaResponse response; PayloadState payload_state; FakeSystemState fake_system_state; @@ -1158,7 +1160,7 @@ TEST(PayloadStateTest, RebootAfterSuccessfulUpdateTest) { EXPECT_FALSE(fake_prefs.Exists(kPrefsSystemUpdatedMarker)); } -TEST(PayloadStateTest, RestartAfterCrash) { +TEST_F(PayloadStateTest, RestartAfterCrash) { PayloadState payload_state; FakeSystemState fake_system_state; testing::StrictMock mock_metrics_reporter; @@ -1183,7 +1185,7 @@ TEST(PayloadStateTest, RestartAfterCrash) { payload_state.UpdateEngineStarted(); } -TEST(PayloadStateTest, AbnormalTerminationAttemptMetricsNoReporting) { +TEST_F(PayloadStateTest, AbnormalTerminationAttemptMetricsNoReporting) { PayloadState payload_state; FakeSystemState fake_system_state; @@ -1195,7 +1197,7 @@ TEST(PayloadStateTest, AbnormalTerminationAttemptMetricsNoReporting) { payload_state.UpdateEngineStarted(); } -TEST(PayloadStateTest, AbnormalTerminationAttemptMetricsReported) { +TEST_F(PayloadStateTest, AbnormalTerminationAttemptMetricsReported) { PayloadState payload_state; FakeSystemState fake_system_state; FakePrefs fake_prefs; @@ -1215,7 +1217,7 @@ TEST(PayloadStateTest, AbnormalTerminationAttemptMetricsReported) { EXPECT_FALSE(fake_prefs.Exists(kPrefsAttemptInProgress)); } -TEST(PayloadStateTest, AbnormalTerminationAttemptMetricsClearedOnSucceess) { +TEST_F(PayloadStateTest, AbnormalTerminationAttemptMetricsClearedOnSucceess) { PayloadState payload_state; FakeSystemState fake_system_state; FakePrefs fake_prefs; @@ -1247,7 +1249,7 @@ TEST(PayloadStateTest, AbnormalTerminationAttemptMetricsClearedOnSucceess) { EXPECT_FALSE(fake_prefs.Exists(kPrefsAttemptInProgress)); } -TEST(PayloadStateTest, CandidateUrlsComputedCorrectly) { +TEST_F(PayloadStateTest, CandidateUrlsComputedCorrectly) { OmahaResponse response; FakeSystemState fake_system_state; PayloadState payload_state; @@ -1320,7 +1322,7 @@ TEST(PayloadStateTest, CandidateUrlsComputedCorrectly) { EXPECT_EQ(0U, payload_state.GetUrlFailureCount()); } -TEST(PayloadStateTest, PayloadTypeMetricWhenTypeIsDelta) { +TEST_F(PayloadStateTest, PayloadTypeMetricWhenTypeIsDelta) { OmahaResponse response; PayloadState payload_state; FakeSystemState fake_system_state; @@ -1352,7 +1354,7 @@ TEST(PayloadStateTest, PayloadTypeMetricWhenTypeIsDelta) { payload_state.UpdateSucceeded(); } -TEST(PayloadStateTest, PayloadTypeMetricWhenTypeIsForcedFull) { +TEST_F(PayloadStateTest, PayloadTypeMetricWhenTypeIsForcedFull) { OmahaResponse response; PayloadState payload_state; FakeSystemState fake_system_state; @@ -1375,7 +1377,7 @@ TEST(PayloadStateTest, PayloadTypeMetricWhenTypeIsForcedFull) { payload_state.UpdateSucceeded(); } -TEST(PayloadStateTest, PayloadTypeMetricWhenTypeIsFull) { +TEST_F(PayloadStateTest, PayloadTypeMetricWhenTypeIsFull) { OmahaResponse response; PayloadState payload_state; FakeSystemState fake_system_state; @@ -1399,7 +1401,7 @@ TEST(PayloadStateTest, PayloadTypeMetricWhenTypeIsFull) { payload_state.UpdateSucceeded(); } -TEST(PayloadStateTest, RebootAfterUpdateFailedMetric) { +TEST_F(PayloadStateTest, RebootAfterUpdateFailedMetric) { FakeSystemState fake_system_state; OmahaResponse response; PayloadState payload_state; @@ -1439,7 +1441,7 @@ TEST(PayloadStateTest, RebootAfterUpdateFailedMetric) { Mock::VerifyAndClearExpectations(fake_system_state.mock_metrics_reporter()); } -TEST(PayloadStateTest, RebootAfterUpdateSucceed) { +TEST_F(PayloadStateTest, RebootAfterUpdateSucceed) { FakeSystemState fake_system_state; OmahaResponse response; PayloadState payload_state; @@ -1472,7 +1474,7 @@ TEST(PayloadStateTest, RebootAfterUpdateSucceed) { payload_state.ReportFailedBootIfNeeded(); } -TEST(PayloadStateTest, RebootAfterCanceledUpdate) { +TEST_F(PayloadStateTest, RebootAfterCanceledUpdate) { FakeSystemState fake_system_state; OmahaResponse response; PayloadState payload_state; @@ -1499,7 +1501,7 @@ TEST(PayloadStateTest, RebootAfterCanceledUpdate) { payload_state.ReportFailedBootIfNeeded(); } -TEST(PayloadStateTest, UpdateSuccessWithWipedPrefs) { +TEST_F(PayloadStateTest, UpdateSuccessWithWipedPrefs) { FakeSystemState fake_system_state; PayloadState payload_state; FakePrefs fake_prefs; @@ -1515,7 +1517,7 @@ TEST(PayloadStateTest, UpdateSuccessWithWipedPrefs) { payload_state.ReportFailedBootIfNeeded(); } -TEST(PayloadStateTest, DisallowP2PAfterTooManyAttempts) { +TEST_F(PayloadStateTest, DisallowP2PAfterTooManyAttempts) { OmahaResponse response; PayloadState payload_state; FakeSystemState fake_system_state; @@ -1536,7 +1538,7 @@ TEST(PayloadStateTest, DisallowP2PAfterTooManyAttempts) { EXPECT_FALSE(payload_state.P2PAttemptAllowed()); } -TEST(PayloadStateTest, DisallowP2PAfterDeadline) { +TEST_F(PayloadStateTest, DisallowP2PAfterDeadline) { OmahaResponse response; PayloadState payload_state; FakeSystemState fake_system_state; @@ -1582,7 +1584,7 @@ TEST(PayloadStateTest, DisallowP2PAfterDeadline) { EXPECT_FALSE(payload_state.P2PAttemptAllowed()); } -TEST(PayloadStateTest, P2PStateVarsInitialValue) { +TEST_F(PayloadStateTest, P2PStateVarsInitialValue) { OmahaResponse response; PayloadState payload_state; FakeSystemState fake_system_state; @@ -1598,7 +1600,7 @@ TEST(PayloadStateTest, P2PStateVarsInitialValue) { EXPECT_EQ(0, payload_state.GetP2PNumAttempts()); } -TEST(PayloadStateTest, P2PStateVarsArePersisted) { +TEST_F(PayloadStateTest, P2PStateVarsArePersisted) { OmahaResponse response; PayloadState payload_state; FakeSystemState fake_system_state; @@ -1627,7 +1629,7 @@ TEST(PayloadStateTest, P2PStateVarsArePersisted) { EXPECT_EQ(time, payload_state2.GetP2PFirstAttemptTimestamp()); } -TEST(PayloadStateTest, P2PStateVarsAreClearedOnNewResponse) { +TEST_F(PayloadStateTest, P2PStateVarsAreClearedOnNewResponse) { OmahaResponse response; PayloadState payload_state; FakeSystemState fake_system_state; @@ -1659,7 +1661,7 @@ TEST(PayloadStateTest, P2PStateVarsAreClearedOnNewResponse) { EXPECT_EQ(null_time, payload_state.GetP2PFirstAttemptTimestamp()); } -TEST(PayloadStateTest, NextPayloadResetsUrlIndex) { +TEST_F(PayloadStateTest, NextPayloadResetsUrlIndex) { PayloadState payload_state; FakeSystemState fake_system_state; StrictMock mock_excluder; @@ -1689,7 +1691,7 @@ TEST(PayloadStateTest, NextPayloadResetsUrlIndex) { EXPECT_EQ(payload_state.GetCurrentUrl(), "http://test1b"); } -TEST(PayloadStateTest, ExcludeNoopForNonExcludables) { +TEST_F(PayloadStateTest, ExcludeNoopForNonExcludables) { PayloadState payload_state; FakeSystemState fake_system_state; StrictMock mock_excluder; @@ -1711,7 +1713,7 @@ TEST(PayloadStateTest, ExcludeNoopForNonExcludables) { payload_state.ExcludeCurrentPayload(); } -TEST(PayloadStateTest, ExcludeOnlyCanExcludables) { +TEST_F(PayloadStateTest, ExcludeOnlyCanExcludables) { PayloadState payload_state; FakeSystemState fake_system_state; StrictMock mock_excluder; @@ -1734,7 +1736,7 @@ TEST(PayloadStateTest, ExcludeOnlyCanExcludables) { payload_state.ExcludeCurrentPayload(); } -TEST(PayloadStateTest, IncrementFailureExclusionTest) { +TEST_F(PayloadStateTest, IncrementFailureExclusionTest) { PayloadState payload_state; FakeSystemState fake_system_state; StrictMock mock_excluder; @@ -1778,7 +1780,7 @@ TEST(PayloadStateTest, IncrementFailureExclusionTest) { payload_state.IncrementFailureCount(); } -TEST(PayloadStateTest, HaltExclusionPostPayloadExhaustion) { +TEST_F(PayloadStateTest, HaltExclusionPostPayloadExhaustion) { PayloadState payload_state; FakeSystemState fake_system_state; StrictMock mock_excluder; @@ -1809,7 +1811,7 @@ TEST(PayloadStateTest, HaltExclusionPostPayloadExhaustion) { payload_state.ExcludeCurrentPayload(); } -TEST(PayloadStateTest, NonInfinitePayloadIndexIncrement) { +TEST_F(PayloadStateTest, NonInfinitePayloadIndexIncrement) { PayloadState payload_state; FakeSystemState fake_system_state; EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); From 4c5413d0cae0c29c00257000b856890e59bab412 Mon Sep 17 00:00:00 2001 From: Vyshu Khota Date: Wed, 4 Nov 2020 16:17:25 -0800 Subject: [PATCH 454/624] update_engine: Store fingerprint value from Omaha response. Store the unique fp value from response into prefs. Value is later sent to Omaha to determine if there is a subsequent update available while the system is waiting to be rebooted. BUG=b:161259884 TEST=cros_workon_make --board=hatch --test update_engine Change-Id: Ie37aa5da3cd8a0820e633f5ef426fb50e8a02838 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2491618 Tested-by: Vyshu Khota Commit-Queue: Vyshu Khota Reviewed-by: Amin Hassani --- common/constants.cc | 1 + common/constants.h | 1 + common/fake_prefs.cc | 16 +++ common/fake_prefs.h | 2 + common/mock_prefs.h | 3 + common/prefs.cc | 20 +++- common/prefs.h | 2 + common/prefs_interface.h | 10 ++ common/prefs_unittest.cc | 82 +++++++++++++++ cros/omaha_request_action.cc | 7 +- cros/omaha_request_action_unittest.cc | 34 +++++-- cros/omaha_response.h | 2 + cros/omaha_response_handler_action.cc | 4 +- .../omaha_response_handler_action_unittest.cc | 99 +++++++++++++++---- cros/update_attempter.cc | 24 ++++- cros/update_attempter.h | 5 + payload_consumer/install_plan.cc | 7 +- payload_consumer/install_plan.h | 5 +- 18 files changed, 289 insertions(+), 35 deletions(-) diff --git a/common/constants.cc b/common/constants.cc index 8883668a..a9cf238d 100644 --- a/common/constants.cc +++ b/common/constants.cc @@ -71,6 +71,7 @@ const char kPrefsTestUpdateCheckIntervalTimeout[] = const char kPrefsPingActive[] = "active"; const char kPrefsPingLastActive[] = "date_last_active"; const char kPrefsPingLastRollcall[] = "date_last_rollcall"; +const char kPrefsLastFp[] = "last-fp"; const char kPrefsPostInstallSucceeded[] = "post-install-succeeded"; const char kPrefsPreviousVersion[] = "previous-version"; const char kPrefsResumedUpdateFailures[] = "resumed-update-failures"; diff --git a/common/constants.h b/common/constants.h index 36851020..1e972493 100644 --- a/common/constants.h +++ b/common/constants.h @@ -71,6 +71,7 @@ extern const char kPrefsTestUpdateCheckIntervalTimeout[]; extern const char kPrefsPingActive[]; extern const char kPrefsPingLastActive[]; extern const char kPrefsPingLastRollcall[]; +extern const char kPrefsLastFp[]; extern const char kPrefsPostInstallSucceeded[]; extern const char kPrefsPreviousVersion[]; extern const char kPrefsResumedUpdateFailures[]; diff --git a/common/fake_prefs.cc b/common/fake_prefs.cc index 73559c52..275667e3 100644 --- a/common/fake_prefs.cc +++ b/common/fake_prefs.cc @@ -106,6 +106,22 @@ bool FakePrefs::Delete(const string& key) { return true; } +bool FakePrefs::Delete(const string& key, const vector& nss) { + bool success = Delete(key); + for (const auto& ns : nss) { + vector ns_keys; + success = GetSubKeys(ns, &ns_keys) && success; + for (const auto& sub_key : ns_keys) { + auto last_key_seperator = sub_key.find_last_of(kKeySeparator); + if (last_key_seperator != string::npos && + key == sub_key.substr(last_key_seperator + 1)) { + success = Delete(sub_key) && success; + } + } + } + return success; +} + bool FakePrefs::GetSubKeys(const string& ns, vector* keys) const { for (const auto& pr : values_) if (pr.first.compare(0, ns.length(), ns) == 0) diff --git a/common/fake_prefs.h b/common/fake_prefs.h index b24ff4d8..9af2550f 100644 --- a/common/fake_prefs.h +++ b/common/fake_prefs.h @@ -48,6 +48,8 @@ class FakePrefs : public PrefsInterface { bool Exists(const std::string& key) const override; bool Delete(const std::string& key) override; + bool Delete(const std::string& key, + const std::vector& nss) override; bool GetSubKeys(const std::string& ns, std::vector* keys) const override; diff --git a/common/mock_prefs.h b/common/mock_prefs.h index 62417a8c..c91664e4 100644 --- a/common/mock_prefs.h +++ b/common/mock_prefs.h @@ -41,6 +41,9 @@ class MockPrefs : public PrefsInterface { MOCK_CONST_METHOD1(Exists, bool(const std::string& key)); MOCK_METHOD1(Delete, bool(const std::string& key)); + MOCK_METHOD2(Delete, + bool(const std::string& key, + const std::vector& nss)); MOCK_CONST_METHOD2(GetSubKeys, bool(const std::string&, std::vector*)); diff --git a/common/prefs.cc b/common/prefs.cc index 615014f4..52a58b71 100644 --- a/common/prefs.cc +++ b/common/prefs.cc @@ -34,8 +34,6 @@ namespace chromeos_update_engine { namespace { -const char kKeySeparator = '/'; - void DeleteEmptyDirectories(const base::FilePath& path) { base::FileEnumerator path_enum( path, false /* recursive */, base::FileEnumerator::DIRECTORIES); @@ -112,6 +110,24 @@ bool PrefsBase::Delete(const string& key) { return true; } +bool PrefsBase::Delete(const string& pref_key, const vector& nss) { + // Delete pref key for platform. + bool success = Delete(pref_key); + // Delete pref key in each namespace. + for (const auto& ns : nss) { + vector namespace_keys; + success = GetSubKeys(ns, &namespace_keys) && success; + for (const auto& key : namespace_keys) { + auto last_key_seperator = key.find_last_of(kKeySeparator); + if (last_key_seperator != string::npos && + pref_key == key.substr(last_key_seperator + 1)) { + success = Delete(key) && success; + } + } + } + return success; +} + bool PrefsBase::GetSubKeys(const string& ns, vector* keys) const { return storage_->GetSubKeys(ns, keys); } diff --git a/common/prefs.h b/common/prefs.h index 3fc1d891..d6ef6683 100644 --- a/common/prefs.h +++ b/common/prefs.h @@ -74,6 +74,8 @@ class PrefsBase : public PrefsInterface { bool Exists(const std::string& key) const override; bool Delete(const std::string& key) override; + bool Delete(const std::string& pref_key, + const std::vector& nss) override; bool GetSubKeys(const std::string& ns, std::vector* keys) const override; diff --git a/common/prefs_interface.h b/common/prefs_interface.h index 1311cb44..866d0ca1 100644 --- a/common/prefs_interface.h +++ b/common/prefs_interface.h @@ -80,6 +80,12 @@ class PrefsInterface { // this key. Calling with non-existent keys does nothing. virtual bool Delete(const std::string& key) = 0; + // Deletes the pref key from platform and given namespace subdirectories. + // Keys are matched against end of pref keys in each namespace. + // Returns true if all deletes were successful. + virtual bool Delete(const std::string& pref_key, + const std::vector& nss) = 0; + // Creates a key which is part of a sub preference. static std::string CreateSubKey(const std::vector& ns_with_key); @@ -98,6 +104,10 @@ class PrefsInterface { // anymore for future Set*() and Delete() method calls. virtual void RemoveObserver(const std::string& key, ObserverInterface* observer) = 0; + + protected: + // Key separator used to create sub key and get file names, + static const char kKeySeparator = '/'; }; } // namespace chromeos_update_engine diff --git a/common/prefs_unittest.cc b/common/prefs_unittest.cc index 6dd26c09..e8efd8a8 100644 --- a/common/prefs_unittest.cc +++ b/common/prefs_unittest.cc @@ -118,6 +118,23 @@ class BasePrefsTest : public ::testing::Test { for (const auto& key : keys0corner) EXPECT_TRUE(common_prefs_->Delete(key)); EXPECT_FALSE(common_prefs_->Exists(key0corner)); + + // Test sub directory namespace. + const string kDlcPrefsSubDir = "foo-dir"; + key1A = common_prefs_->CreateSubKey({kDlcPrefsSubDir, "dlc1", "keyA"}); + EXPECT_TRUE(common_prefs_->SetString(key1A, "fp_1A")); + key1B = common_prefs_->CreateSubKey({kDlcPrefsSubDir, "dlc1", "keyB"}); + EXPECT_TRUE(common_prefs_->SetString(key1B, "fp_1B")); + auto key2A = common_prefs_->CreateSubKey({kDlcPrefsSubDir, "dlc2", "keyA"}); + EXPECT_TRUE(common_prefs_->SetString(key2A, "fp_A2")); + + vector fpKeys; + EXPECT_TRUE(common_prefs_->GetSubKeys(kDlcPrefsSubDir, &fpKeys)); + EXPECT_EQ(fpKeys.size(), 3); + EXPECT_TRUE(common_prefs_->Delete(fpKeys[0])); + EXPECT_TRUE(common_prefs_->Delete(fpKeys[1])); + EXPECT_TRUE(common_prefs_->Delete(fpKeys[2])); + EXPECT_FALSE(common_prefs_->Exists(key1A)); } PrefsInterface* common_prefs_; @@ -423,6 +440,71 @@ TEST_F(PrefsTest, SetDeleteSubKey) { EXPECT_FALSE(base::PathExists(prefs_dir_.Append(name_space))); } +TEST_F(PrefsTest, DeletePrefs) { + const string kPrefsSubDir = "foo-dir"; + const string kFpKey = "kPrefFp"; + const string kNotFpKey = "NotkPrefFp"; + const string kOtherKey = "kPrefNotFp"; + + EXPECT_TRUE(prefs_.SetString(kFpKey, "3.000")); + EXPECT_TRUE(prefs_.SetString(kOtherKey, "not_fp_val")); + + auto key1_fp = prefs_.CreateSubKey({kPrefsSubDir, "id-1", kFpKey}); + EXPECT_TRUE(prefs_.SetString(key1_fp, "3.7")); + auto key_not_fp = prefs_.CreateSubKey({kPrefsSubDir, "id-1", kOtherKey}); + EXPECT_TRUE(prefs_.SetString(key_not_fp, "not_fp_val")); + auto key2_fp = prefs_.CreateSubKey({kPrefsSubDir, "id-2", kFpKey}); + EXPECT_TRUE(prefs_.SetString(key2_fp, "3.9")); + auto key3_fp = prefs_.CreateSubKey({kPrefsSubDir, "id-3", kFpKey}); + EXPECT_TRUE(prefs_.SetString(key3_fp, "3.45")); + + // Pref key does not match full subkey at end, should not delete. + auto key_middle_fp = prefs_.CreateSubKey({kPrefsSubDir, kFpKey, kOtherKey}); + EXPECT_TRUE(prefs_.SetString(key_middle_fp, "not_fp_val")); + auto key_end_not_fp = prefs_.CreateSubKey({kPrefsSubDir, "id-1", kNotFpKey}); + EXPECT_TRUE(prefs_.SetString(key_end_not_fp, "not_fp_val")); + + // Delete key in platform and one namespace. + prefs_.Delete(kFpKey, {kPrefsSubDir}); + + EXPECT_FALSE(prefs_.Exists(kFpKey)); + EXPECT_FALSE(prefs_.Exists(key1_fp)); + EXPECT_FALSE(prefs_.Exists(key2_fp)); + EXPECT_FALSE(prefs_.Exists(key3_fp)); + + // Check other keys are not deleted. + EXPECT_TRUE(prefs_.Exists(kOtherKey)); + EXPECT_TRUE(prefs_.Exists(key_not_fp)); + EXPECT_TRUE(prefs_.Exists(key_middle_fp)); + EXPECT_TRUE(prefs_.Exists(key_end_not_fp)); +} + +TEST_F(PrefsTest, DeleteMultipleNamespaces) { + const string kFirstSubDir = "foo-dir"; + const string kSecondarySubDir = "bar-dir"; + const string kTertiarySubDir = "ter-dir"; + const string kFpKey = "kPrefFp"; + + EXPECT_TRUE(prefs_.SetString(kFpKey, "3.000")); + // Set pref key in different namespaces. + auto key1_fp = prefs_.CreateSubKey({kFirstSubDir, "id-1", kFpKey}); + EXPECT_TRUE(prefs_.SetString(key1_fp, "3.7")); + auto key2_fp = prefs_.CreateSubKey({kSecondarySubDir, "id-3", kFpKey}); + EXPECT_TRUE(prefs_.SetString(key2_fp, "7.45")); + auto key3_fp = prefs_.CreateSubKey({kTertiarySubDir, "id-3", kFpKey}); + EXPECT_TRUE(prefs_.SetString(key3_fp, "7.45")); + + // Delete key in platform and given namespaces. + prefs_.Delete(kFpKey, {kFirstSubDir, kSecondarySubDir}); + + EXPECT_FALSE(prefs_.Exists(kFpKey)); + EXPECT_FALSE(prefs_.Exists(key1_fp)); + EXPECT_FALSE(prefs_.Exists(key2_fp)); + + // Tertiary namespace not given to delete. Key should still exist. + EXPECT_TRUE(prefs_.Exists(key3_fp)); +} + class MockPrefsObserver : public PrefsInterface::ObserverInterface { public: MOCK_METHOD1(OnPrefSet, void(const string&)); diff --git a/cros/omaha_request_action.cc b/cros/omaha_request_action.cc index faa7ddef..cad0c674 100644 --- a/cros/omaha_request_action.cc +++ b/cros/omaha_request_action.cc @@ -98,6 +98,7 @@ constexpr char kAttrCohortName[] = "cohortname"; constexpr char kAttrElapsedDays[] = "elapsed_days"; constexpr char kAttrElapsedSeconds[] = "elapsed_seconds"; constexpr char kAttrEvent[] = "event"; +constexpr char kAttrFp[] = "fp"; constexpr char kAttrHashSha256[] = "hash_sha256"; // Deprecated: "hash"; Although we still need to pass it from the server for // backward compatibility. @@ -150,6 +151,7 @@ struct OmahaParserData { string name; string size; string hash; + string fp; }; vector packages; }; @@ -214,7 +216,8 @@ void ParserHandlerStart(void* user_data, if (!data->apps.empty()) data->apps.back().packages.push_back({.name = attrs[kAttrName], .size = attrs[kAttrSize], - .hash = attrs[kAttrHashSha256]}); + .hash = attrs[kAttrHashSha256], + .fp = attrs[kAttrFp]}); } else if (data->current_path == "/response/app/updatecheck/manifest") { // Get the version. if (!data->apps.empty()) @@ -612,6 +615,8 @@ bool ParsePackage(OmahaParserData::App* app, return false; } + out_package.fp = package.fp; + if (i < is_delta_payloads.size()) out_package.is_delta = ParseBool(is_delta_payloads[i]); diff --git a/cros/omaha_request_action_unittest.cc b/cros/omaha_request_action_unittest.cc index 8d94195b..9f9c75f4 100644 --- a/cros/omaha_request_action_unittest.cc +++ b/cros/omaha_request_action_unittest.cc @@ -158,10 +158,10 @@ struct FakeUpdateResponse { version + "\">" "" + - (multi_package ? "" + filename + "\" size=\"" + base::NumberToString(size) + "\" fp=\"" + + fp + "\" hash_sha256=\"" + hash + "\"/>" + + (multi_package ? "" : "") + "" "" - "" + "" "" - "" + "fp=\"" + + fp2 + + "\" hash_sha256=\"hash3\"/>" + "" "" "" : "") + @@ -248,6 +251,8 @@ struct FakeUpdateResponse { string codebase2 = "http://code/base/2/"; string filename = "file.signed"; string hash = "4841534831323334"; + string fp = "3.98ba213e"; + string fp2 = "3.755aff78e"; uint64_t size = 123; string deadline = ""; string max_days_to_scatter = "7"; @@ -670,6 +675,7 @@ TEST_F(OmahaRequestActionTest, ValidUpdateTest) { EXPECT_EQ(fake_update_response_.more_info_url, response.more_info_url); EXPECT_EQ(fake_update_response_.hash, response.packages[0].hash); EXPECT_EQ(fake_update_response_.size, response.packages[0].size); + EXPECT_EQ(fake_update_response_.fp, response.packages[0].fp); EXPECT_EQ(true, response.packages[0].is_delta); EXPECT_EQ(fake_update_response_.prompt == "true", response.prompt); EXPECT_EQ(fake_update_response_.deadline, response.deadline); @@ -695,11 +701,13 @@ TEST_F(OmahaRequestActionTest, MultiPackageUpdateTest) { response.packages[1].payload_urls[0]); EXPECT_EQ(fake_update_response_.hash, response.packages[0].hash); EXPECT_EQ(fake_update_response_.size, response.packages[0].size); + EXPECT_EQ(fake_update_response_.fp, response.packages[0].fp); EXPECT_EQ(true, response.packages[0].is_delta); EXPECT_EQ(11u, response.packages[0].metadata_size); ASSERT_EQ(2u, response.packages.size()); EXPECT_EQ(string("hash2"), response.packages[1].hash); EXPECT_EQ(222u, response.packages[1].size); + EXPECT_EQ(fake_update_response_.fp2, response.packages[1].fp); EXPECT_EQ(22u, response.packages[1].metadata_size); EXPECT_EQ(false, response.packages[1].is_delta); } @@ -718,11 +726,13 @@ TEST_F(OmahaRequestActionTest, MultiAppUpdateTest) { response.packages[1].payload_urls[0]); EXPECT_EQ(fake_update_response_.hash, response.packages[0].hash); EXPECT_EQ(fake_update_response_.size, response.packages[0].size); + EXPECT_EQ(fake_update_response_.fp, response.packages[0].fp); EXPECT_EQ(11u, response.packages[0].metadata_size); EXPECT_EQ(true, response.packages[0].is_delta); ASSERT_EQ(2u, response.packages.size()); EXPECT_EQ(string("hash3"), response.packages[1].hash); EXPECT_EQ(333u, response.packages[1].size); + EXPECT_EQ(fake_update_response_.fp2, response.packages[1].fp); EXPECT_EQ(33u, response.packages[1].metadata_size); EXPECT_EQ(false, response.packages[1].is_delta); } @@ -740,10 +750,12 @@ TEST_F(OmahaRequestActionTest, MultiAppPartialUpdateTest) { response.packages[0].payload_urls[0]); EXPECT_EQ(fake_update_response_.hash, response.packages[0].hash); EXPECT_EQ(fake_update_response_.size, response.packages[0].size); + EXPECT_EQ(fake_update_response_.fp, response.packages[0].fp); EXPECT_EQ(11u, response.packages[0].metadata_size); ASSERT_EQ(2u, response.packages.size()); EXPECT_EQ(string("hash3"), response.packages[1].hash); EXPECT_EQ(333u, response.packages[1].size); + EXPECT_EQ(fake_update_response_.fp2, response.packages[1].fp); EXPECT_EQ(33u, response.packages[1].metadata_size); EXPECT_EQ(true, response.packages[1].is_delta); } @@ -765,15 +777,18 @@ TEST_F(OmahaRequestActionTest, MultiAppMultiPackageUpdateTest) { response.packages[2].payload_urls[0]); EXPECT_EQ(fake_update_response_.hash, response.packages[0].hash); EXPECT_EQ(fake_update_response_.size, response.packages[0].size); + EXPECT_EQ(fake_update_response_.fp, response.packages[0].fp); EXPECT_EQ(11u, response.packages[0].metadata_size); EXPECT_EQ(true, response.packages[0].is_delta); ASSERT_EQ(3u, response.packages.size()); EXPECT_EQ(string("hash2"), response.packages[1].hash); EXPECT_EQ(222u, response.packages[1].size); + EXPECT_EQ(fake_update_response_.fp2, response.packages[1].fp); EXPECT_EQ(22u, response.packages[1].metadata_size); EXPECT_EQ(false, response.packages[1].is_delta); EXPECT_EQ(string("hash3"), response.packages[2].hash); EXPECT_EQ(333u, response.packages[2].size); + EXPECT_EQ(fake_update_response_.fp2, response.packages[2].fp); EXPECT_EQ(33u, response.packages[2].metadata_size); EXPECT_EQ(false, response.packages[2].is_delta); } @@ -1557,7 +1572,7 @@ TEST_F(OmahaRequestActionTest, MissingFieldTest) { "" "" "" + "size=\"587\" fp=\"3.789\" hash_sha256=\"lkq34j5345\"/>" " packages; diff --git a/cros/omaha_response_handler_action.cc b/cros/omaha_response_handler_action.cc index b6c223f0..52142a34 100644 --- a/cros/omaha_response_handler_action.cc +++ b/cros/omaha_response_handler_action.cc @@ -106,7 +106,9 @@ void OmahaResponseHandlerAction::PerformAction() { .metadata_signature = package.metadata_signature, .hash = raw_hash, .type = package.is_delta ? InstallPayloadType::kDelta - : InstallPayloadType::kFull}); + : InstallPayloadType::kFull, + .fp = package.fp, + .app_id = package.app_id}); update_check_response_hash += package.hash + ":"; } install_plan_.public_key_rsa = response.public_key_rsa; diff --git a/cros/omaha_response_handler_action_unittest.cc b/cros/omaha_response_handler_action_unittest.cc index b05660c4..74f4d048 100644 --- a/cros/omaha_response_handler_action_unittest.cc +++ b/cros/omaha_response_handler_action_unittest.cc @@ -117,6 +117,9 @@ const char* const kLongName = "-the_update_a.b.c.d_DELTA_.tgz"; const char* const kBadVersion = "don't update me"; const char* const kPayloadHashHex = "486173682b"; +const char* const kPayloadFp1 = "1.755aff78ec73dfc7f590893ac"; +const char* const kPayloadFp2 = "1.98ba213e0ccec0d0e8cdc74a5"; +const char* const kPayloadAppId = "test_app_id"; } // namespace bool OmahaResponseHandlerActionTest::DoTest(const OmahaResponse& in, @@ -185,7 +188,9 @@ TEST_F(OmahaResponseHandlerActionTest, SimpleTest) { in.packages.push_back( {.payload_urls = {"http://foo/the_update_a.b.c.d.tgz"}, .size = 12, - .hash = kPayloadHashHex}); + .hash = kPayloadHashHex, + .app_id = kPayloadAppId, + .fp = kPayloadFp1}); in.more_info_url = "http://more/info"; in.prompt = false; in.deadline = "20101020"; @@ -193,6 +198,8 @@ TEST_F(OmahaResponseHandlerActionTest, SimpleTest) { EXPECT_TRUE(DoTest(in, test_deadline_file.path(), &install_plan)); EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url); EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash); + EXPECT_EQ(in.packages[0].app_id, install_plan.payloads[0].app_id); + EXPECT_EQ(in.packages[0].fp, install_plan.payloads[0].fp); EXPECT_EQ(1U, install_plan.target_slot); string deadline; EXPECT_TRUE(utils::ReadFile(test_deadline_file.path(), &deadline)); @@ -211,7 +218,9 @@ TEST_F(OmahaResponseHandlerActionTest, SimpleTest) { in.packages.push_back( {.payload_urls = {"http://foo/the_update_a.b.c.d.tgz"}, .size = 12, - .hash = kPayloadHashHex}); + .hash = kPayloadHashHex, + .app_id = kPayloadAppId, + .fp = kPayloadFp1}); in.more_info_url = "http://more/info"; in.prompt = true; InstallPlan install_plan; @@ -220,6 +229,8 @@ TEST_F(OmahaResponseHandlerActionTest, SimpleTest) { EXPECT_TRUE(DoTest(in, test_deadline_file.path(), &install_plan)); EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url); EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash); + EXPECT_EQ(in.packages[0].app_id, install_plan.payloads[0].app_id); + EXPECT_EQ(in.packages[0].fp, install_plan.payloads[0].fp); EXPECT_EQ(0U, install_plan.target_slot); string deadline; EXPECT_TRUE(utils::ReadFile(test_deadline_file.path(), &deadline) && @@ -230,8 +241,11 @@ TEST_F(OmahaResponseHandlerActionTest, SimpleTest) { OmahaResponse in; in.update_exists = true; in.version = "a.b.c.d"; - in.packages.push_back( - {.payload_urls = {kLongName}, .size = 12, .hash = kPayloadHashHex}); + in.packages.push_back({.payload_urls = {kLongName}, + .size = 12, + .hash = kPayloadHashHex, + .app_id = kPayloadAppId, + .fp = kPayloadFp1}); in.more_info_url = "http://more/info"; in.prompt = true; in.deadline = "some-deadline"; @@ -245,6 +259,8 @@ TEST_F(OmahaResponseHandlerActionTest, SimpleTest) { EXPECT_TRUE(DoTest(in, test_deadline_file.path(), &install_plan)); EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url); EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash); + EXPECT_EQ(in.packages[0].app_id, install_plan.payloads[0].app_id); + EXPECT_EQ(in.packages[0].fp, install_plan.payloads[0].fp); EXPECT_EQ(1U, install_plan.target_slot); string deadline; EXPECT_TRUE(utils::ReadFile(test_deadline_file.path(), &deadline)); @@ -255,8 +271,11 @@ TEST_F(OmahaResponseHandlerActionTest, SimpleTest) { OmahaResponse in; in.update_exists = true; in.version = "a.b.c.d"; - in.packages.push_back( - {.payload_urls = {kLongName}, .size = 12, .hash = kPayloadHashHex}); + in.packages.push_back({.payload_urls = {kLongName}, + .size = 12, + .hash = kPayloadHashHex, + .app_id = kPayloadAppId, + .fp = kPayloadFp1}); in.more_info_url = "http://more/info"; in.prompt = true; in.deadline = "some-deadline"; @@ -268,6 +287,8 @@ TEST_F(OmahaResponseHandlerActionTest, SimpleTest) { EXPECT_TRUE(DoTest(in, test_deadline_file.path(), &install_plan)); EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url); EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash); + EXPECT_EQ(in.packages[0].app_id, install_plan.payloads[0].app_id); + EXPECT_EQ(in.packages[0].fp, install_plan.payloads[0].fp); EXPECT_EQ(1U, install_plan.target_slot); string deadline; EXPECT_TRUE(utils::ReadFile(test_deadline_file.path(), &deadline)); @@ -309,10 +330,14 @@ TEST_F(OmahaResponseHandlerActionTest, MultiPackageTest) { in.version = "a.b.c.d"; in.packages.push_back({.payload_urls = {"http://package/1"}, .size = 1, - .hash = kPayloadHashHex}); + .hash = kPayloadHashHex, + .app_id = kPayloadAppId, + .fp = kPayloadFp1}); in.packages.push_back({.payload_urls = {"http://package/2"}, .size = 2, - .hash = kPayloadHashHex}); + .hash = kPayloadHashHex, + .app_id = kPayloadAppId, + .fp = kPayloadFp2}); in.more_info_url = "http://more/info"; InstallPlan install_plan; EXPECT_TRUE(DoTest(in, "", &install_plan)); @@ -322,6 +347,10 @@ TEST_F(OmahaResponseHandlerActionTest, MultiPackageTest) { EXPECT_EQ(in.packages[1].size, install_plan.payloads[1].size); EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash); EXPECT_EQ(expected_hash_, install_plan.payloads[1].hash); + EXPECT_EQ(in.packages[0].app_id, install_plan.payloads[0].app_id); + EXPECT_EQ(in.packages[1].app_id, install_plan.payloads[1].app_id); + EXPECT_EQ(in.packages[0].fp, install_plan.payloads[0].fp); + EXPECT_EQ(in.packages[1].fp, install_plan.payloads[1].fp); EXPECT_EQ(in.version, install_plan.version); } @@ -332,7 +361,9 @@ TEST_F(OmahaResponseHandlerActionTest, HashChecksForHttpTest) { in.packages.push_back( {.payload_urls = {"http://test.should/need/hash.checks.signed"}, .size = 12, - .hash = kPayloadHashHex}); + .hash = kPayloadHashHex, + .app_id = kPayloadAppId, + .fp = kPayloadFp1}); in.more_info_url = "http://more/info"; // Hash checks are always skipped for non-official update URLs. EXPECT_CALL(*(fake_system_state_.mock_request_params()), @@ -342,6 +373,8 @@ TEST_F(OmahaResponseHandlerActionTest, HashChecksForHttpTest) { EXPECT_TRUE(DoTest(in, "", &install_plan)); EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url); EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash); + EXPECT_EQ(in.packages[0].app_id, install_plan.payloads[0].app_id); + EXPECT_EQ(in.packages[0].fp, install_plan.payloads[0].fp); EXPECT_TRUE(install_plan.hash_checks_mandatory); EXPECT_EQ(in.version, install_plan.version); } @@ -353,7 +386,9 @@ TEST_F(OmahaResponseHandlerActionTest, HashChecksForUnofficialUpdateUrl) { in.packages.push_back( {.payload_urls = {"http://url.normally/needs/hash.checks.signed"}, .size = 12, - .hash = kPayloadHashHex}); + .hash = kPayloadHashHex, + .app_id = kPayloadAppId, + .fp = kPayloadFp1}); in.more_info_url = "http://more/info"; EXPECT_CALL(*(fake_system_state_.mock_request_params()), IsUpdateUrlOfficial()) @@ -362,6 +397,8 @@ TEST_F(OmahaResponseHandlerActionTest, HashChecksForUnofficialUpdateUrl) { EXPECT_TRUE(DoTest(in, "", &install_plan)); EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url); EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash); + EXPECT_EQ(in.packages[0].app_id, install_plan.payloads[0].app_id); + EXPECT_EQ(in.packages[0].fp, install_plan.payloads[0].fp); EXPECT_FALSE(install_plan.hash_checks_mandatory); EXPECT_EQ(in.version, install_plan.version); } @@ -375,7 +412,9 @@ TEST_F(OmahaResponseHandlerActionTest, in.packages.push_back( {.payload_urls = {"http://url.normally/needs/hash.checks.signed"}, .size = 12, - .hash = kPayloadHashHex}); + .hash = kPayloadHashHex, + .app_id = kPayloadAppId, + .fp = kPayloadFp1}); in.more_info_url = "http://more/info"; EXPECT_CALL(*(fake_system_state_.mock_request_params()), IsUpdateUrlOfficial()) @@ -385,6 +424,8 @@ TEST_F(OmahaResponseHandlerActionTest, EXPECT_TRUE(DoTest(in, "", &install_plan)); EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url); EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash); + EXPECT_EQ(in.packages[0].app_id, install_plan.payloads[0].app_id); + EXPECT_EQ(in.packages[0].fp, install_plan.payloads[0].fp); EXPECT_FALSE(install_plan.hash_checks_mandatory); EXPECT_EQ(in.version, install_plan.version); } @@ -396,7 +437,9 @@ TEST_F(OmahaResponseHandlerActionTest, HashChecksForHttpsTest) { in.packages.push_back( {.payload_urls = {"https://test.should/need/hash.checks.signed"}, .size = 12, - .hash = kPayloadHashHex}); + .hash = kPayloadHashHex, + .app_id = kPayloadAppId, + .fp = kPayloadFp1}); in.more_info_url = "http://more/info"; EXPECT_CALL(*(fake_system_state_.mock_request_params()), IsUpdateUrlOfficial()) @@ -405,6 +448,8 @@ TEST_F(OmahaResponseHandlerActionTest, HashChecksForHttpsTest) { EXPECT_TRUE(DoTest(in, "", &install_plan)); EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url); EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash); + EXPECT_EQ(in.packages[0].app_id, install_plan.payloads[0].app_id); + EXPECT_EQ(in.packages[0].fp, install_plan.payloads[0].fp); EXPECT_TRUE(install_plan.hash_checks_mandatory); EXPECT_EQ(in.version, install_plan.version); } @@ -417,7 +462,9 @@ TEST_F(OmahaResponseHandlerActionTest, HashChecksForBothHttpAndHttpsTest) { {.payload_urls = {"http://test.should.still/need/hash.checks", "https://test.should.still/need/hash.checks"}, .size = 12, - .hash = kPayloadHashHex}); + .hash = kPayloadHashHex, + .app_id = kPayloadAppId, + .fp = kPayloadFp1}); in.more_info_url = "http://more/info"; EXPECT_CALL(*(fake_system_state_.mock_request_params()), IsUpdateUrlOfficial()) @@ -426,6 +473,8 @@ TEST_F(OmahaResponseHandlerActionTest, HashChecksForBothHttpAndHttpsTest) { EXPECT_TRUE(DoTest(in, "", &install_plan)); EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url); EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash); + EXPECT_EQ(in.packages[0].app_id, install_plan.payloads[0].app_id); + EXPECT_EQ(in.packages[0].fp, install_plan.payloads[0].fp); EXPECT_TRUE(install_plan.hash_checks_mandatory); EXPECT_EQ(in.version, install_plan.version); } @@ -675,7 +724,9 @@ TEST_F(OmahaResponseHandlerActionTest, P2PUrlIsUsedAndHashChecksMandatory) { in.packages.push_back( {.payload_urls = {"https://would.not/cause/hash/checks"}, .size = 12, - .hash = kPayloadHashHex}); + .hash = kPayloadHashHex, + .app_id = kPayloadAppId, + .fp = kPayloadFp1}); in.more_info_url = "http://more/info"; OmahaRequestParams params(&fake_system_state_); @@ -698,6 +749,8 @@ TEST_F(OmahaResponseHandlerActionTest, P2PUrlIsUsedAndHashChecksMandatory) { InstallPlan install_plan; EXPECT_TRUE(DoTest(in, "", &install_plan)); EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash); + EXPECT_EQ(in.packages[0].app_id, install_plan.payloads[0].app_id); + EXPECT_EQ(in.packages[0].fp, install_plan.payloads[0].fp); EXPECT_EQ(p2p_url, install_plan.download_url); EXPECT_TRUE(install_plan.hash_checks_mandatory); } @@ -899,10 +952,14 @@ TEST_F(OmahaResponseHandlerActionTest, SystemVersionTest) { in.version = "a.b.c.d"; in.packages.push_back({.payload_urls = {"http://package/1"}, .size = 1, - .hash = kPayloadHashHex}); + .hash = kPayloadHashHex, + .app_id = kPayloadAppId, + .fp = kPayloadFp1}); in.packages.push_back({.payload_urls = {"http://package/2"}, .size = 2, - .hash = kPayloadHashHex}); + .hash = kPayloadHashHex, + .app_id = kPayloadAppId, + .fp = kPayloadFp2}); in.more_info_url = "http://more/info"; InstallPlan install_plan; EXPECT_TRUE(DoTest(in, "", &install_plan)); @@ -912,6 +969,10 @@ TEST_F(OmahaResponseHandlerActionTest, SystemVersionTest) { EXPECT_EQ(in.packages[1].size, install_plan.payloads[1].size); EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash); EXPECT_EQ(expected_hash_, install_plan.payloads[1].hash); + EXPECT_EQ(in.packages[0].app_id, install_plan.payloads[0].app_id); + EXPECT_EQ(in.packages[1].app_id, install_plan.payloads[1].app_id); + EXPECT_EQ(in.packages[0].fp, install_plan.payloads[0].fp); + EXPECT_EQ(in.packages[1].fp, install_plan.payloads[1].fp); EXPECT_EQ(in.version, install_plan.version); } @@ -921,7 +982,9 @@ TEST_F(OmahaResponseHandlerActionTest, TestDeferredByPolicy) { in.version = "a.b.c.d"; in.packages.push_back({.payload_urls = {"http://foo/the_update_a.b.c.d.tgz"}, .size = 12, - .hash = kPayloadHashHex}); + .hash = kPayloadHashHex, + .app_id = kPayloadAppId, + .fp = kPayloadFp1}); // Setup the UpdateManager to disallow the update. FakeClock fake_clock; MockPolicy* mock_policy = new MockPolicy(&fake_clock); @@ -942,6 +1005,8 @@ TEST_F(OmahaResponseHandlerActionTest, TestDeferredByPolicy) { install_plan = *delegate_.response_handler_action_install_plan_; EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url); EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash); + EXPECT_EQ(in.packages[0].app_id, install_plan.payloads[0].app_id); + EXPECT_EQ(in.packages[0].fp, install_plan.payloads[0].fp); EXPECT_EQ(1U, install_plan.target_slot); EXPECT_EQ(in.version, install_plan.version); } diff --git a/cros/update_attempter.cc b/cros/update_attempter.cc index e4174578..5c21d04e 100644 --- a/cros/update_attempter.cc +++ b/cros/update_attempter.cc @@ -158,10 +158,12 @@ void UpdateAttempter::Init() { // In case of update_engine restart without a reboot we need to restore the // reboot needed state. - if (GetBootTimeAtUpdate(nullptr)) + if (GetBootTimeAtUpdate(nullptr)) { status_ = UpdateStatus::UPDATED_NEED_REBOOT; - else + } else { status_ = UpdateStatus::IDLE; + prefs_->Delete(kPrefsLastFp, {kDlcPrefsSubDir}); + } } bool UpdateAttempter::ScheduleUpdates() { @@ -646,6 +648,20 @@ bool UpdateAttempter::ResetDlcPrefs(const string& dlc_id) { return failures.size() == 0; } +void UpdateAttempter::SetPref(const string& pref_key, + const string& pref_value, + const string& payload_id) { + string dlc_id; + if (!omaha_request_params_->GetDlcId(payload_id, &dlc_id)) { + // Not a DLC ID, set fingerprint in perf for platform ID. + prefs_->SetString(pref_key, pref_value); + } else { + // Set fingerprint in pref for DLC ID. + auto key = prefs_->CreateSubKey({kDlcPrefsSubDir, dlc_id, pref_key}); + prefs_->SetString(key, pref_value); + } +} + bool UpdateAttempter::SetDlcActiveValue(bool is_active, const string& dlc_id) { if (dlc_id.empty()) { LOG(ERROR) << "Empty DLC ID passed."; @@ -1198,6 +1214,9 @@ void UpdateAttempter::ProcessingDoneUpdate(const ActionProcessor* processor, for (const auto& payload : install_plan_->payloads) { target_version_uid += brillo::data_encoding::Base64Encode(payload.hash) + ":" + payload.metadata_signature + ":"; + // Set fingerprint value for updates only. + if (!is_install_) + SetPref(kPrefsLastFp, payload.fp, payload.app_id); } // If we just downloaded a rollback image, we should preserve this fact @@ -1419,6 +1438,7 @@ bool UpdateAttempter::ResetStatus() { // UpdateStatus::UPDATED_NEED_REBOOT state. ret_value = prefs_->Delete(kPrefsUpdateCompletedOnBootId) && ret_value; ret_value = prefs_->Delete(kPrefsUpdateCompletedBootTime) && ret_value; + ret_value = prefs_->Delete(kPrefsLastFp, {kDlcPrefsSubDir}) && ret_value; // Update the boot flags so the current slot has higher priority. BootControlInterface* boot_control = system_state_->boot_control(); diff --git a/cros/update_attempter.h b/cros/update_attempter.h index bd0aef60..a201acfc 100644 --- a/cros/update_attempter.h +++ b/cros/update_attempter.h @@ -433,6 +433,11 @@ class UpdateAttempter : public ActionProcessorDelegate, // Resets all the DLC prefs. bool ResetDlcPrefs(const std::string& dlc_id); + // Sets given pref key for DLC and platform. + void SetPref(const std::string& pref_key, + const std::string& pref_value, + const std::string& payload_id); + // Get the integer values from the DLC metadata for |kPrefsPingLastActive| // or |kPrefsPingLastRollcall|. // The value is equal to -2 when the value cannot be read or is not numeric. diff --git a/payload_consumer/install_plan.cc b/payload_consumer/install_plan.cc index 4a37836f..3aae6630 100644 --- a/payload_consumer/install_plan.cc +++ b/payload_consumer/install_plan.cc @@ -72,13 +72,16 @@ void InstallPlan::Dump() const { for (const auto& payload : payloads) { payloads_str += base::StringPrintf( ", payload: (urls: %s, size: %" PRIu64 ", metadata_size: %" PRIu64 - ", metadata signature: %s, hash: %s, payload type: %s)", + ", metadata signature: %s, hash: %s, payload type: %s" + ", fingerprint: %s, app id: %s)", PayloadUrlsToString(payload.payload_urls).c_str(), payload.size, payload.metadata_size, payload.metadata_signature.c_str(), base::HexEncode(payload.hash.data(), payload.hash.size()).c_str(), - InstallPayloadTypeToString(payload.type).c_str()); + InstallPayloadTypeToString(payload.type).c_str(), + payload.fp.c_str(), + payload.app_id.c_str()); } string version_str = base::StringPrintf(", version: %s", version.c_str()); diff --git a/payload_consumer/install_plan.h b/payload_consumer/install_plan.h index ee1a72b7..16e5674f 100644 --- a/payload_consumer/install_plan.h +++ b/payload_consumer/install_plan.h @@ -62,6 +62,8 @@ struct InstallPlan { std::string metadata_signature; // signature of the metadata in base64 brillo::Blob hash; // SHA256 hash of the payload InstallPayloadType type{InstallPayloadType::kUnknown}; + std::string fp; // fingerprint value unique to the payload + std::string app_id; // App ID of the payload // Only download manifest and fill in partitions in install plan without // apply the payload if true. Will be set by DownloadAction when resuming // multi-payload. @@ -72,7 +74,8 @@ struct InstallPlan { metadata_size == that.metadata_size && metadata_signature == that.metadata_signature && hash == that.hash && type == that.type && - already_applied == that.already_applied; + already_applied == that.already_applied && fp == that.fp && + app_id == that.app_id; } }; std::vector payloads; From 4fec0f26e790dc63afcd5adfbc192233eb1bb0cb Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Mon, 9 Nov 2020 16:38:14 -0800 Subject: [PATCH 455/624] update_engine: Turn on Chrome network proxy by default This flag is always true in Chorme OS and android's code doesn't compile this. So it can be removed. BUG=b:171829801 TEST=cros_workon_make --board reef --test Change-Id: I1e28955ba8fcd7ca395e538275ba68a2997bcc31 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2528757 Tested-by: Amin Hassani Reviewed-by: Jae Hoon Kim Commit-Queue: Amin Hassani --- Android.bp | 1 - BUILD.gn | 1 - cros/update_attempter.h | 6 ------ 3 files changed, 8 deletions(-) diff --git a/Android.bp b/Android.bp index 9cf7bd44..41eaa550 100644 --- a/Android.bp +++ b/Android.bp @@ -29,7 +29,6 @@ cc_defaults { cflags: [ "-DBASE_VER=576279", - "-DUSE_CHROME_NETWORK_PROXY=0", "-DUSE_CHROME_KIOSK_APP=0", "-DUSE_HWID_OVERRIDE=0", "-D_FILE_OFFSET_BITS=64", diff --git a/BUILD.gn b/BUILD.gn index 90913cba..c5430f9a 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -82,7 +82,6 @@ pkg_config("target_defaults") { "USE_FEC=0", "USE_HWID_OVERRIDE=${use.hwid_override}", "USE_CHROME_KIOSK_APP=${use.chrome_kiosk_app}", - "USE_CHROME_NETWORK_PROXY=${use.chrome_network_proxy}", "USE_SHILL=1", ] include_dirs = [ diff --git a/cros/update_attempter.h b/cros/update_attempter.h index a201acfc..24c6f54e 100644 --- a/cros/update_attempter.h +++ b/cros/update_attempter.h @@ -40,9 +40,7 @@ #include "update_engine/common/proxy_resolver.h" #include "update_engine/common/service_observer_interface.h" #include "update_engine/common/system_state.h" -#if USE_CHROME_NETWORK_PROXY #include "update_engine/cros/chrome_browser_proxy_resolver.h" -#endif // USE_CHROME_NETWORK_PROXY #include "update_engine/cros/omaha_request_builder_xml.h" #include "update_engine/cros/omaha_request_params.h" #include "update_engine/cros/omaha_response_handler_action.h" @@ -341,10 +339,8 @@ class UpdateAttempter : public ActionProcessorDelegate, void MarkDeltaUpdateFailure(); ProxyResolver* GetProxyResolver() { -#if USE_CHROME_NETWORK_PROXY if (obeying_proxies_) return &chrome_proxy_resolver_; -#endif // USE_CHROME_NETWORK_PROXY return &direct_proxy_resolver_; } @@ -460,9 +456,7 @@ class UpdateAttempter : public ActionProcessorDelegate, // Our two proxy resolvers DirectProxyResolver direct_proxy_resolver_; -#if USE_CHROME_NETWORK_PROXY ChromeBrowserProxyResolver chrome_proxy_resolver_; -#endif // USE_CHROME_NETWORK_PROXY std::unique_ptr processor_; From 64a80160593434d07ee1d00364bf88a56496f694 Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Mon, 9 Nov 2020 16:58:17 -0800 Subject: [PATCH 456/624] update_engine: Turn on Chrome Kiosk app by default This flag is always true in Chorme OS and android's code doesn't compile this. So the flag can be removed and be turned on by default. BUG=b:171829801 TEST=cros_workon_make --board reef --test Change-Id: Ibb7423196f24793adf85ef5bfe7da7b1edef012a Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2528758 Tested-by: Amin Hassani Reviewed-by: Jae Hoon Kim Commit-Queue: Amin Hassani --- Android.bp | 1 - BUILD.gn | 6 +----- cros/real_system_state.cc | 8 ------- cros/real_system_state.h | 5 ----- update_manager/real_system_provider.cc | 4 ---- update_manager/real_system_provider.h | 8 +------ .../real_system_provider_unittest.cc | 21 ++----------------- 7 files changed, 4 insertions(+), 49 deletions(-) diff --git a/Android.bp b/Android.bp index 41eaa550..8f465f63 100644 --- a/Android.bp +++ b/Android.bp @@ -29,7 +29,6 @@ cc_defaults { cflags: [ "-DBASE_VER=576279", - "-DUSE_CHROME_KIOSK_APP=0", "-DUSE_HWID_OVERRIDE=0", "-D_FILE_OFFSET_BITS=64", "-D_POSIX_C_SOURCE=199309L", diff --git a/BUILD.gn b/BUILD.gn index c5430f9a..a6e4b812 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -81,7 +81,6 @@ pkg_config("target_defaults") { "USE_DBUS=${use.dbus}", "USE_FEC=0", "USE_HWID_OVERRIDE=${use.hwid_override}", - "USE_CHROME_KIOSK_APP=${use.chrome_kiosk_app}", "USE_SHILL=1", ] include_dirs = [ @@ -270,6 +269,7 @@ static_library("libupdate_engine") { deps = [ ":libpayload_consumer", ":update_engine-dbus-adaptor", + ":update_engine-dbus-kiosk-app-client", ":update_metadata-protos", ] @@ -281,10 +281,6 @@ static_library("libupdate_engine") { sources += [ "cros/chrome_browser_proxy_resolver.cc" ] } - if (use.chrome_kiosk_app) { - deps += [ ":update_engine-dbus-kiosk-app-client" ] - } - if (use.dlc) { sources += [ "cros/dlcservice_chromeos.cc", diff --git a/cros/real_system_state.cc b/cros/real_system_state.cc index 5715a39d..aff9863d 100644 --- a/cros/real_system_state.cc +++ b/cros/real_system_state.cc @@ -25,9 +25,7 @@ #include #include #include -#if USE_CHROME_KIOSK_APP #include -#endif // USE_CHROME_KIOSK_APP #include "update_engine/common/boot_control.h" #include "update_engine/common/boot_control_stub.h" @@ -57,10 +55,8 @@ bool RealSystemState::Initialize() { return false; } -#if USE_CHROME_KIOSK_APP kiosk_app_proxy_.reset(new org::chromium::KioskAppServiceInterfaceProxy( DBusConnection::Get()->GetDBus(), chromeos::kKioskAppServiceName)); -#endif // USE_CHROME_KIOSK_APP LOG_IF(INFO, !hardware_->IsNormalBootMode()) << "Booted in dev mode."; LOG_IF(INFO, !hardware_->IsOfficialBuild()) << "Booted non-official build."; @@ -146,11 +142,7 @@ bool RealSystemState::Initialize() { // Initialize the Update Manager using the default state factory. chromeos_update_manager::State* um_state = chromeos_update_manager::DefaultStateFactory(&policy_provider_, -#if USE_CHROME_KIOSK_APP kiosk_app_proxy_.get(), -#else - nullptr, -#endif // USE_CHROME_KIOSK_APP this); if (!um_state) { diff --git a/cros/real_system_state.h b/cros/real_system_state.h index 644ba783..1e45dc1a 100644 --- a/cros/real_system_state.h +++ b/cros/real_system_state.h @@ -23,10 +23,7 @@ #include #include - -#if USE_CHROME_KIOSK_APP #include -#endif // USE_CHROME_KIOSK_APP #include "update_engine/certificate_checker.h" #include "update_engine/common/boot_control_interface.h" @@ -112,10 +109,8 @@ class RealSystemState : public SystemState { private: // Real DBus proxies using the DBus connection. -#if USE_CHROME_KIOSK_APP std::unique_ptr kiosk_app_proxy_; -#endif // USE_CHROME_KIOSK_APP // Interface for the power manager. std::unique_ptr power_manager_; diff --git a/update_manager/real_system_provider.cc b/update_manager/real_system_provider.cc index 4e88b07f..8d30f7f1 100644 --- a/update_manager/real_system_provider.cc +++ b/update_manager/real_system_provider.cc @@ -20,9 +20,7 @@ #include #include #include -#if USE_CHROME_KIOSK_APP #include -#endif // USE_CHROME_KIOSK_APP #include "update_engine/common/boot_control_interface.h" #include "update_engine/common/hardware_interface.h" @@ -129,7 +127,6 @@ bool RealSystemProvider::Init() { bool RealSystemProvider::GetKioskAppRequiredPlatformVersion( string* required_platform_version) { -#if USE_CHROME_KIOSK_APP brillo::ErrorPtr error; if (!kiosk_app_proxy_->GetRequiredPlatformVersion(required_platform_version, &error)) { @@ -137,7 +134,6 @@ bool RealSystemProvider::GetKioskAppRequiredPlatformVersion( required_platform_version->clear(); return false; } -#endif // USE_CHROME_KIOSK_APP return true; } diff --git a/update_manager/real_system_provider.h b/update_manager/real_system_provider.h index ffa1467b..91fee7f7 100644 --- a/update_manager/real_system_provider.h +++ b/update_manager/real_system_provider.h @@ -39,13 +39,8 @@ class RealSystemProvider : public SystemProvider { RealSystemProvider( chromeos_update_engine::SystemState* system_state, org::chromium::KioskAppServiceInterfaceProxyInterface* kiosk_app_proxy) -#if USE_CHROME_KIOSK_APP : system_state_(system_state), kiosk_app_proxy_(kiosk_app_proxy) { } -#else - system_state_(system_state) { - } -#endif // USE_CHROME_KIOSK_APP // Initializes the provider and returns whether it succeeded. bool Init(); @@ -86,9 +81,8 @@ class RealSystemProvider : public SystemProvider { std::unique_ptr> var_chromeos_version_; chromeos_update_engine::SystemState* const system_state_; -#if USE_CHROME_KIOSK_APP + org::chromium::KioskAppServiceInterfaceProxyInterface* const kiosk_app_proxy_; -#endif // USE_CHROME_KIOSK_APP DISALLOW_COPY_AND_ASSIGN(RealSystemProvider); }; diff --git a/update_manager/real_system_provider_unittest.cc b/update_manager/real_system_provider_unittest.cc index 8add6906..3c77ac75 100644 --- a/update_manager/real_system_provider_unittest.cc +++ b/update_manager/real_system_provider_unittest.cc @@ -21,35 +21,30 @@ #include #include #include +#include +#include #include "update_engine/common/fake_boot_control.h" #include "update_engine/common/fake_hardware.h" #include "update_engine/cros/fake_system_state.h" #include "update_engine/update_manager/umtest_utils.h" -#if USE_CHROME_KIOSK_APP -#include "kiosk-app/dbus-proxies.h" -#include "kiosk-app/dbus-proxy-mocks.h" using org::chromium::KioskAppServiceInterfaceProxyMock; -#endif // USE_CHROME_KIOSK_APP using std::unique_ptr; using testing::_; using testing::DoAll; using testing::Return; using testing::SetArgPointee; -#if USE_CHROME_KIOSK_APP namespace { const char kRequiredPlatformVersion[] = "1234.0.0"; } // namespace -#endif // USE_CHROME_KIOSK_APP namespace chromeos_update_manager { class UmRealSystemProviderTest : public ::testing::Test { protected: void SetUp() override { -#if USE_CHROME_KIOSK_APP kiosk_app_proxy_mock_.reset(new KioskAppServiceInterfaceProxyMock()); ON_CALL(*kiosk_app_proxy_mock_, GetRequiredPlatformVersion(_, _, _)) .WillByDefault( @@ -57,18 +52,13 @@ class UmRealSystemProviderTest : public ::testing::Test { provider_.reset(new RealSystemProvider(&fake_system_state_, kiosk_app_proxy_mock_.get())); -#else - provider_.reset(new RealSystemProvider(&fake_system_state, nullptr)); -#endif // USE_CHROME_KIOSK_APP EXPECT_TRUE(provider_->Init()); } chromeos_update_engine::FakeSystemState fake_system_state_; unique_ptr provider_; -#if USE_CHROME_KIOSK_APP unique_ptr kiosk_app_proxy_mock_; -#endif // USE_CHROME_KIOSK_APP }; TEST_F(UmRealSystemProviderTest, InitTest) { @@ -99,7 +89,6 @@ TEST_F(UmRealSystemProviderTest, VersionFromRequestParams) { provider_->var_chromeos_version()); } -#if USE_CHROME_KIOSK_APP TEST_F(UmRealSystemProviderTest, KioskRequiredPlatformVersion) { UmTestUtils::ExpectVariableHasValue( std::string(kRequiredPlatformVersion), @@ -145,11 +134,5 @@ TEST_F(UmRealSystemProviderTest, KioskRequiredPlatformVersionRepeatedFailure) { UmTestUtils::ExpectVariableHasValue( std::string(""), provider_->var_kiosk_required_platform_version()); } -#else -TEST_F(UmRealSystemProviderTest, KioskRequiredPlatformVersion) { - UmTestUtils::ExpectVariableHasValue( - std::string(), provider_->var_kiosk_required_platform_version()); -} -#endif // USE_CHROME_KIOSK_APP } // namespace chromeos_update_manager From f36873504703bb43d785589ef797c3368b58b34c Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Tue, 10 Nov 2020 10:41:40 -0800 Subject: [PATCH 457/624] update_engine: Turn on Shill by default This flag is always true in Chorme OS and android's code doesn't compile this. So the flag can be removed and be turned on by default. BUG=b:171829801 TEST=cros_workon_make --board reef --test Change-Id: I3520cd4313bf3bff0d6f3f3775e35f8cf29f6322 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2529870 Tested-by: Amin Hassani Reviewed-by: Jae Hoon Kim Commit-Queue: Amin Hassani --- BUILD.gn | 1 - update_manager/state_factory.cc | 12 ++---------- 2 files changed, 2 insertions(+), 11 deletions(-) diff --git a/BUILD.gn b/BUILD.gn index a6e4b812..f7296ac6 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -81,7 +81,6 @@ pkg_config("target_defaults") { "USE_DBUS=${use.dbus}", "USE_FEC=0", "USE_HWID_OVERRIDE=${use.hwid_override}", - "USE_SHILL=1", ] include_dirs = [ # We need this include dir because we include all the local code as diff --git a/update_manager/state_factory.cc b/update_manager/state_factory.cc index a95a5a8f..21689744 100644 --- a/update_manager/state_factory.cc +++ b/update_manager/state_factory.cc @@ -27,18 +27,16 @@ #if USE_DBUS #include "update_engine/cros/dbus_connection.h" #endif // USE_DBUS +#include "update_engine/cros/shill_proxy.h" #include "update_engine/update_manager/fake_shill_provider.h" #include "update_engine/update_manager/real_config_provider.h" #include "update_engine/update_manager/real_device_policy_provider.h" #include "update_engine/update_manager/real_random_provider.h" +#include "update_engine/update_manager/real_shill_provider.h" #include "update_engine/update_manager/real_state.h" #include "update_engine/update_manager/real_system_provider.h" #include "update_engine/update_manager/real_time_provider.h" #include "update_engine/update_manager/real_updater_provider.h" -#if USE_SHILL -#include "update_engine/cros/shill_proxy.h" -#include "update_engine/update_manager/real_shill_provider.h" -#endif // USE_SHILL using std::unique_ptr; @@ -62,12 +60,8 @@ State* DefaultStateFactory( unique_ptr device_policy_provider( new RealDevicePolicyProvider(policy_provider)); #endif // USE_DBUS -#if USE_SHILL unique_ptr shill_provider( new RealShillProvider(new chromeos_update_engine::ShillProxy(), clock)); -#else - unique_ptr shill_provider(new FakeShillProvider()); -#endif // USE_SHILL unique_ptr random_provider(new RealRandomProvider()); unique_ptr system_provider( new RealSystemProvider(system_state, kiosk_app_proxy)); @@ -78,9 +72,7 @@ State* DefaultStateFactory( if (!(config_provider->Init() && device_policy_provider->Init() && random_provider->Init() && -#if USE_SHILL shill_provider->Init() && -#endif // USE_SHILL system_provider->Init() && time_provider->Init() && updater_provider->Init())) { LOG(ERROR) << "Error initializing providers"; From 59928f18f768caea1c5f3d504fa1cca6ac7a6edf Mon Sep 17 00:00:00 2001 From: Kelvin Zhang Date: Wed, 11 Nov 2020 21:21:27 +0000 Subject: [PATCH 458/624] Revert "Handle resume of VABC updates by emitting labels" This reverts commit 24599af599acf74b71a555a8eeb827bedcd672b5. Reason for revert: b/173009837 Test: 1. update_device.py ota.zip --extra-headers="SWITCH_SLOT_ON_REBOOT=0" 2. update_device.py ota.zip 3. Verity that 2 did not re-start the entire update, only fs verification and postinstall may re-run. Bug: 173009837 Change-Id: Ia31025ebc68a5e6a72d7a0919994d614213270d1 --- common/constants.cc | 2 - common/constants.h | 1 - payload_consumer/delta_performer.cc | 11 ---- .../delta_performer_integration_test.cc | 2 - payload_consumer/partition_writer.cc | 4 +- payload_consumer/partition_writer.h | 10 --- .../partition_writer_factory_android.cc | 4 -- .../partition_writer_factory_chromeos.cc | 2 - payload_consumer/partition_writer_unittest.cc | 8 +-- payload_consumer/vabc_partition_writer.cc | 66 +------------------ payload_consumer/vabc_partition_writer.h | 1 - 11 files changed, 5 insertions(+), 106 deletions(-) diff --git a/common/constants.cc b/common/constants.cc index 16485960..8883668a 100644 --- a/common/constants.cc +++ b/common/constants.cc @@ -98,8 +98,6 @@ const char kPrefsUpdateServerCertificate[] = "update-server-cert"; const char kPrefsUpdateStateNextDataLength[] = "update-state-next-data-length"; const char kPrefsUpdateStateNextDataOffset[] = "update-state-next-data-offset"; const char kPrefsUpdateStateNextOperation[] = "update-state-next-operation"; -const char kPrefsUpdateStatePartitionNextOperation[] = - "update-state-partition-next-operation"; const char kPrefsUpdateStatePayloadIndex[] = "update-state-payload-index"; const char kPrefsUpdateStateSHA256Context[] = "update-state-sha-256-context"; const char kPrefsUpdateStateSignatureBlob[] = "update-state-signature-blob"; diff --git a/common/constants.h b/common/constants.h index 2a2a62aa..36851020 100644 --- a/common/constants.h +++ b/common/constants.h @@ -95,7 +95,6 @@ extern const char kPrefsUpdateServerCertificate[]; extern const char kPrefsUpdateStateNextDataLength[]; extern const char kPrefsUpdateStateNextDataOffset[]; extern const char kPrefsUpdateStateNextOperation[]; -extern const char kPrefsUpdateStatePartitionNextOperation[]; extern const char kPrefsUpdateStatePayloadIndex[]; extern const char kPrefsUpdateStateSHA256Context[]; extern const char kPrefsUpdateStateSignatureBlob[]; diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc index b75c8cfd..30bd1ef5 100644 --- a/payload_consumer/delta_performer.cc +++ b/payload_consumer/delta_performer.cc @@ -48,7 +48,6 @@ #include "update_engine/common/prefs_interface.h" #include "update_engine/common/subprocess.h" #include "update_engine/common/terminator.h" -#include "update_engine/common/utils.h" #include "update_engine/payload_consumer/bzip_extent_writer.h" #include "update_engine/payload_consumer/cached_file_descriptor.h" #include "update_engine/payload_consumer/certificate_parser_interface.h" @@ -248,7 +247,6 @@ bool DeltaPerformer::OpenCurrentPartition() { install_part, dynamic_control, block_size_, - prefs_, interactive_, IsDynamicPartition(install_part.name)); // Open source fds if we have a delta payload, or for partitions in the @@ -1337,13 +1335,6 @@ bool DeltaPerformer::CanResumeUpdate(PrefsInterface* prefs, next_operation != kUpdateStateOperationInvalid && next_operation > 0)) return false; - int64_t partition_next_op = -1; - if (!(prefs->GetInt64(kPrefsUpdateStatePartitionNextOperation, - &partition_next_op) && - partition_next_op >= 0)) { - return false; - } - string interrupted_hash; if (!(prefs->GetString(kPrefsUpdateCheckResponseHash, &interrupted_hash) && !interrupted_hash.empty() && @@ -1396,7 +1387,6 @@ bool DeltaPerformer::ResetUpdateProgress( prefs->SetString(kPrefsUpdateStateSignatureBlob, ""); prefs->SetInt64(kPrefsManifestMetadataSize, -1); prefs->SetInt64(kPrefsManifestSignatureSize, -1); - prefs->SetInt64(kPrefsUpdateStatePartitionNextOperation, -1); prefs->SetInt64(kPrefsResumedUpdateFailures, 0); prefs->Delete(kPrefsPostInstallSucceeded); prefs->Delete(kPrefsVerityWritten); @@ -1441,7 +1431,6 @@ bool DeltaPerformer::CheckpointUpdateProgress(bool force) { partitions_[partition_index].operations(partition_operation_num); TEST_AND_RETURN_FALSE( prefs_->SetInt64(kPrefsUpdateStateNextDataLength, op.data_length())); - partition_writer_->CheckpointUpdateProgress(partition_operation_num); } else { TEST_AND_RETURN_FALSE( prefs_->SetInt64(kPrefsUpdateStateNextDataLength, 0)); diff --git a/payload_consumer/delta_performer_integration_test.cc b/payload_consumer/delta_performer_integration_test.cc index e83a20d0..374131ef 100644 --- a/payload_consumer/delta_performer_integration_test.cc +++ b/payload_consumer/delta_performer_integration_test.cc @@ -718,8 +718,6 @@ static void ApplyDeltaFile(bool full_kernel, .WillOnce(Return(true)); EXPECT_CALL(prefs, SetInt64(kPrefsUpdateStateNextOperation, _)) .WillRepeatedly(Return(true)); - EXPECT_CALL(prefs, SetInt64(kPrefsUpdateStatePartitionNextOperation, _)) - .WillRepeatedly(Return(true)); EXPECT_CALL(prefs, GetInt64(kPrefsUpdateStateNextOperation, _)) .WillOnce(Return(false)); EXPECT_CALL(prefs, SetInt64(kPrefsUpdateStateNextDataOffset, _)) diff --git a/payload_consumer/partition_writer.cc b/payload_consumer/partition_writer.cc index 12b206e4..ec36d069 100644 --- a/payload_consumer/partition_writer.cc +++ b/payload_consumer/partition_writer.cc @@ -242,14 +242,12 @@ PartitionWriter::PartitionWriter( const InstallPlan::Partition& install_part, DynamicPartitionControlInterface* dynamic_control, size_t block_size, - PrefsInterface* prefs, bool is_interactive) : partition_update_(partition_update), install_part_(install_part), dynamic_control_(dynamic_control), interactive_(is_interactive), - block_size_(block_size), - prefs_(prefs) {} + block_size_(block_size) {} PartitionWriter::~PartitionWriter() { Close(); diff --git a/payload_consumer/partition_writer.h b/payload_consumer/partition_writer.h index a67339ec..1acbddcb 100644 --- a/payload_consumer/partition_writer.h +++ b/payload_consumer/partition_writer.h @@ -25,7 +25,6 @@ #include #include "update_engine/common/dynamic_partition_control_interface.h" -#include "update_engine/common/prefs_interface.h" #include "update_engine/payload_consumer/extent_writer.h" #include "update_engine/payload_consumer/file_descriptor.h" #include "update_engine/payload_consumer/install_plan.h" @@ -37,7 +36,6 @@ class PartitionWriter { const InstallPlan::Partition& install_part, DynamicPartitionControlInterface* dynamic_control, size_t block_size, - PrefsInterface* prefs, bool is_interactive); virtual ~PartitionWriter(); static bool ValidateSourceHash(const brillo::Blob& calculated_hash, @@ -50,11 +48,6 @@ class PartitionWriter { [[nodiscard]] virtual bool Init(const InstallPlan* install_plan, bool source_may_exist); - // This will be called by DeltaPerformer after applying an InstallOp. - // |next_op_index| is index of next operation that should be applied. - // |next_op_index-1| is the last operation that is already applied. - virtual void CheckpointUpdateProgress(size_t next_op_index) {} - int Close(); // These perform a specific type of operation and return true on success. @@ -118,8 +111,6 @@ class PartitionWriter { // Used to avoid re-opening the same source partition if it is not actually // error corrected. bool source_ecc_open_failure_{false}; - - PrefsInterface* prefs_; }; namespace partition_writer { @@ -130,7 +121,6 @@ std::unique_ptr CreatePartitionWriter( const InstallPlan::Partition& install_part, DynamicPartitionControlInterface* dynamic_control, size_t block_size, - PrefsInterface* prefs, bool is_interactive, bool is_dynamic_partition); } // namespace partition_writer diff --git a/payload_consumer/partition_writer_factory_android.cc b/payload_consumer/partition_writer_factory_android.cc index 5960d9b2..0c9f7ea2 100644 --- a/payload_consumer/partition_writer_factory_android.cc +++ b/payload_consumer/partition_writer_factory_android.cc @@ -19,7 +19,6 @@ #include -#include "update_engine/common/prefs_interface.h" #include "update_engine/payload_consumer/vabc_partition_writer.h" namespace chromeos_update_engine::partition_writer { @@ -29,7 +28,6 @@ std::unique_ptr CreatePartitionWriter( const InstallPlan::Partition& install_part, DynamicPartitionControlInterface* dynamic_control, size_t block_size, - PrefsInterface* prefs, bool is_interactive, bool is_dynamic_partition) { if (dynamic_control && @@ -42,7 +40,6 @@ std::unique_ptr CreatePartitionWriter( install_part, dynamic_control, block_size, - prefs, is_interactive); } else { LOG(INFO) << "Virtual AB Compression disabled, using Partition Writer for `" @@ -51,7 +48,6 @@ std::unique_ptr CreatePartitionWriter( install_part, dynamic_control, block_size, - prefs, is_interactive); } } diff --git a/payload_consumer/partition_writer_factory_chromeos.cc b/payload_consumer/partition_writer_factory_chromeos.cc index d89beb70..609f0431 100644 --- a/payload_consumer/partition_writer_factory_chromeos.cc +++ b/payload_consumer/partition_writer_factory_chromeos.cc @@ -27,14 +27,12 @@ std::unique_ptr CreatePartitionWriter( const InstallPlan::Partition& install_part, DynamicPartitionControlInterface* dynamic_control, size_t block_size, - PrefsInterface* prefs, bool is_interactive, bool is_dynamic_partition) { return std::make_unique(partition_update, install_part, dynamic_control, block_size, - prefs, is_interactive); } } // namespace chromeos_update_engine::partition_writer diff --git a/payload_consumer/partition_writer_unittest.cc b/payload_consumer/partition_writer_unittest.cc index ca2ef413..1ef4783b 100644 --- a/payload_consumer/partition_writer_unittest.cc +++ b/payload_consumer/partition_writer_unittest.cc @@ -112,12 +112,8 @@ class PartitionWriterTest : public testing::Test { DeltaArchiveManifest manifest_{}; PartitionUpdate partition_update_{}; InstallPlan::Partition install_part_{}; - PartitionWriter writer_{partition_update_, - install_part_, - &dynamic_control_, - kBlockSize, - &prefs_, - false}; + PartitionWriter writer_{ + partition_update_, install_part_, &dynamic_control_, kBlockSize, false}; }; // Test that the error-corrected file descriptor is used to read a partition // when no hash is available for SOURCE_COPY but it falls back to the normal diff --git a/payload_consumer/vabc_partition_writer.cc b/payload_consumer/vabc_partition_writer.cc index e8994b40..d95103b9 100644 --- a/payload_consumer/vabc_partition_writer.cc +++ b/payload_consumer/vabc_partition_writer.cc @@ -30,30 +30,6 @@ #include "update_engine/payload_consumer/snapshot_extent_writer.h" namespace chromeos_update_engine { -// Expected layout of COW file: -// === Beginning of Cow Image === -// All Source Copy Operations -// ========== Label 0 ========== -// Operation 0 in PartitionUpdate -// ========== Label 1 ========== -// Operation 1 in PartitionUpdate -// ========== label 2 ========== -// Operation 2 in PartitionUpdate -// ========== label 3 ========== -// . -// . -// . - -// When resuming, pass |kPrefsUpdateStatePartitionNextOperation| as label to -// |InitializeWithAppend|. -// For example, suppose we finished writing SOURCE_COPY, and we finished writing -// operation 2 completely. Update is suspended when we are half way through -// operation 3. -// |kPrefsUpdateStatePartitionNextOperation| would be 3, so we pass 3 as -// label to |InitializeWithAppend|. The CowWriter will retain all data before -// label 3, Which contains all operation 2's data, but none of operation 3's -// data. - bool VABCPartitionWriter::Init(const InstallPlan* install_plan, bool source_may_exist) { TEST_AND_RETURN_FALSE(install_plan != nullptr); @@ -62,41 +38,19 @@ bool VABCPartitionWriter::Init(const InstallPlan* install_plan, install_part_.name, install_part_.source_path, install_plan->is_resume); TEST_AND_RETURN_FALSE(cow_writer_ != nullptr); - // Emit a label before writing SOURCE_COPY. When resuming, + // TODO(zhangkelvin) Emit a label before writing SOURCE_COPY. When resuming, // use pref or CowWriter::GetLastLabel to determine if the SOURCE_COPY ops are // written. No need to handle SOURCE_COPY operations when resuming. // ===== Resume case handling code goes here ==== - if (install_plan->is_resume) { - int64_t next_op = 0; - if (!prefs_->GetInt64(kPrefsUpdateStatePartitionNextOperation, &next_op)) { - LOG(ERROR) - << "Resuming an update but can't fetch |next_op| from saved prefs."; - return false; - } - if (next_op < 0) { - TEST_AND_RETURN_FALSE(cow_writer_->Initialize()); - } else { - TEST_AND_RETURN_FALSE(cow_writer_->InitializeAppend(next_op)); - return true; - } - } else { - TEST_AND_RETURN_FALSE(cow_writer_->Initialize()); - } // ============================================== - TEST_AND_RETURN_FALSE( - prefs_->SetInt64(kPrefsUpdateStatePartitionNextOperation, -1)); // TODO(zhangkelvin) Rewrite this in C++20 coroutine once that's available. auto converted = ConvertToCowOperations(partition_update_.operations(), partition_update_.merge_operations()); WriteAllCowOps(block_size_, converted, cow_writer_.get(), source_fd_); - // Emit label 0 to mark end of all SOURCE_COPY operations - cow_writer_->AddLabel(0); - TEST_AND_RETURN_FALSE( - prefs_->SetInt64(kPrefsUpdateStatePartitionNextOperation, 0)); return true; } @@ -153,27 +107,11 @@ std::unique_ptr VABCPartitionWriter::CreateBaseExtentWriter() { } bool VABCPartitionWriter::Flush() { - // No need to call fsync/sync, as CowWriter flushes after a label is added - // added. - int64_t next_op = 0; - // |kPrefsUpdateStatePartitionNextOperation| will be maintained and set by - // CheckpointUpdateProgress() - TEST_AND_RETURN_FALSE( - prefs_->GetInt64(kPrefsUpdateStatePartitionNextOperation, &next_op)); - // +1 because label 0 is reserved for SOURCE_COPY. See beginning of this - // file for explanation for cow format. - cow_writer_->AddLabel(next_op + 1); + // No need to do anything, as CowWriter automatically flushes every OP added. return true; } -void VABCPartitionWriter::CheckpointUpdateProgress(size_t next_op_index) { - prefs_->SetInt64(kPrefsUpdateStatePartitionNextOperation, next_op_index); -} - VABCPartitionWriter::~VABCPartitionWriter() { - // Reset |kPrefsUpdateStatePartitionNextOperation| once we finished a - // partition. - prefs_->SetInt64(kPrefsUpdateStatePartitionNextOperation, -1); cow_writer_->Finalize(); } diff --git a/payload_consumer/vabc_partition_writer.h b/payload_consumer/vabc_partition_writer.h index ddade704..7657cb44 100644 --- a/payload_consumer/vabc_partition_writer.h +++ b/payload_consumer/vabc_partition_writer.h @@ -44,7 +44,6 @@ class VABCPartitionWriter final : public PartitionWriter { [[nodiscard]] bool PerformSourceCopyOperation( const InstallOperation& operation, ErrorCode* error) override; [[nodiscard]] bool Flush() override; - void CheckpointUpdateProgress(size_t next_op_index) override; static bool WriteAllCowOps(size_t block_size, const std::vector& converted, From 79fec2da41db55983cd45036deb17a2f76f934d8 Mon Sep 17 00:00:00 2001 From: Saurabh Nijhara Date: Tue, 10 Nov 2020 23:22:58 +0100 Subject: [PATCH 459/624] update_engine: Add minimum version policy impl This CL adds minimum version policy handler which checks if the current Chrome OS version is less than the highest version specified in the DeviceMinimumVersion policy. The intent is to consult this policy in a later CL in ChromeOSPolicy::UpdateCanBeApplied as download time restrictions will not be applied if current Chrome OS version is less than version provided by DeviceMinimumVersion policy. BUG=chromium:1117450 TEST=FEATURES=test emerge-${BOARD} update_engine Change-Id: I06ce66c4c85ac2718d9256c944160d63a6ac7e31 Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2530630 Commit-Queue: Saurabh Nijhara Tested-by: Saurabh Nijhara Reviewed-by: Jae Hoon Kim Reviewed-by: Amin Hassani --- BUILD.gn | 2 + update_manager/minimum_version_policy_impl.cc | 56 +++++++++ update_manager/minimum_version_policy_impl.h | 54 +++++++++ .../minimum_version_policy_impl_unittest.cc | 111 ++++++++++++++++++ 4 files changed, 223 insertions(+) create mode 100644 update_manager/minimum_version_policy_impl.cc create mode 100644 update_manager/minimum_version_policy_impl.h create mode 100644 update_manager/minimum_version_policy_impl_unittest.cc diff --git a/BUILD.gn b/BUILD.gn index f7296ac6..1f5dc7f8 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -226,6 +226,7 @@ static_library("libupdate_engine") { "update_manager/enterprise_device_policy_impl.cc", "update_manager/evaluation_context.cc", "update_manager/interactive_update_policy_impl.cc", + "update_manager/minimum_version_policy_impl.cc", "update_manager/next_update_check_policy_impl.cc", "update_manager/official_build_check_policy_impl.cc", "update_manager/out_of_box_experience_policy_impl.cc", @@ -528,6 +529,7 @@ if (use.test) { "update_manager/enterprise_device_policy_impl_unittest.cc", "update_manager/evaluation_context_unittest.cc", "update_manager/generic_variables_unittest.cc", + "update_manager/minimum_version_policy_impl_unittest.cc", "update_manager/prng_unittest.cc", "update_manager/real_device_policy_provider_unittest.cc", "update_manager/real_random_provider_unittest.cc", diff --git a/update_manager/minimum_version_policy_impl.cc b/update_manager/minimum_version_policy_impl.cc new file mode 100644 index 00000000..fb94ee49 --- /dev/null +++ b/update_manager/minimum_version_policy_impl.cc @@ -0,0 +1,56 @@ +// +// Copyright (C) 2020 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include "update_engine/update_manager/minimum_version_policy_impl.h" + +#include + +using chromeos_update_engine::ErrorCode; +using chromeos_update_engine::InstallPlan; + +namespace chromeos_update_manager { + +EvalStatus MinimumVersionPolicyImpl::UpdateCanBeApplied( + EvaluationContext* ec, + State* state, + std::string* error, + ErrorCode* result, + InstallPlan* install_plan) const { + const base::Version* current_version( + ec->GetValue(state->system_provider()->var_chromeos_version())); + if (current_version == nullptr || !current_version->IsValid()) { + LOG(WARNING) << "Unable to access current version"; + return EvalStatus::kContinue; + } + + const base::Version* minimum_version = ec->GetValue( + state->device_policy_provider()->var_device_minimum_version()); + if (minimum_version == nullptr || !minimum_version->IsValid()) { + LOG(WARNING) << "Unable to access minimum version"; + return EvalStatus::kContinue; + } + + if (*current_version < *minimum_version) { + LOG(INFO) << "Updating from version less than minimum required" + ", allowing update to be applied."; + *result = ErrorCode::kSuccess; + return EvalStatus::kSucceeded; + } + + return EvalStatus::kContinue; +} + +} // namespace chromeos_update_manager diff --git a/update_manager/minimum_version_policy_impl.h b/update_manager/minimum_version_policy_impl.h new file mode 100644 index 00000000..600d6244 --- /dev/null +++ b/update_manager/minimum_version_policy_impl.h @@ -0,0 +1,54 @@ +// +// Copyright (C) 2020 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef UPDATE_ENGINE_UPDATE_MANAGER_MINIMUM_VERSION_POLICY_IMPL_H_ +#define UPDATE_ENGINE_UPDATE_MANAGER_MINIMUM_VERSION_POLICY_IMPL_H_ + +#include + +#include "update_engine/common/error_code.h" +#include "update_engine/payload_consumer/install_plan.h" +#include "update_engine/update_manager/policy_utils.h" + +namespace chromeos_update_manager { + +// Check to see if an update happens from a version less than the minimum +// required one. +class MinimumVersionPolicyImpl : public PolicyImplBase { + public: + MinimumVersionPolicyImpl() = default; + ~MinimumVersionPolicyImpl() override = default; + + // If current version is less than the minimum required one, then this should + // not block the update to be applied. + EvalStatus UpdateCanBeApplied( + EvaluationContext* ec, + State* state, + std::string* error, + chromeos_update_engine::ErrorCode* result, + chromeos_update_engine::InstallPlan* install_plan) const override; + + protected: + std::string PolicyName() const override { return "MinimumVersionPolicyImpl"; } + + private: + MinimumVersionPolicyImpl(const MinimumVersionPolicyImpl&) = delete; + MinimumVersionPolicyImpl& operator=(const MinimumVersionPolicyImpl&) = delete; +}; + +} // namespace chromeos_update_manager + +#endif // UPDATE_ENGINE_UPDATE_MANAGER_MINIMUM_VERSION_POLICY_IMPL_H_ diff --git a/update_manager/minimum_version_policy_impl_unittest.cc b/update_manager/minimum_version_policy_impl_unittest.cc new file mode 100644 index 00000000..8e4dba55 --- /dev/null +++ b/update_manager/minimum_version_policy_impl_unittest.cc @@ -0,0 +1,111 @@ +// +// Copyright (C) 2020 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include + +#include "update_engine/update_manager/minimum_version_policy_impl.h" +#include "update_engine/update_manager/policy_test_utils.h" + +using chromeos_update_engine::ErrorCode; +using chromeos_update_engine::InstallPlan; + +namespace { + +const char* kInvalidVersion = "13315.woops.12"; +const char* kOldVersion = "13315.60.12"; +const char* kNewVersion = "13315.60.15"; + +} // namespace + +namespace chromeos_update_manager { + +class UmMinimumVersionPolicyImplTest : public UmPolicyTestBase { + protected: + UmMinimumVersionPolicyImplTest() { + policy_ = std::make_unique(); + } + + void SetCurrentVersion(const std::string& version) { + fake_state_.system_provider()->var_chromeos_version()->reset( + new base::Version(version)); + } + + void SetMinimumVersion(const std::string& version) { + fake_state_.device_policy_provider()->var_device_minimum_version()->reset( + new base::Version(version)); + } + + void TestPolicy(const EvalStatus& expected_status) { + InstallPlan install_plan; + ErrorCode result; + ExpectPolicyStatus( + expected_status, &Policy::UpdateCanBeApplied, &result, &install_plan); + if (expected_status == EvalStatus::kSucceeded) + EXPECT_EQ(result, ErrorCode::kSuccess); + } +}; + +TEST_F(UmMinimumVersionPolicyImplTest, ContinueWhenCurrentVersionIsNotSet) { + SetMinimumVersion(kNewVersion); + + TestPolicy(EvalStatus::kContinue); +} + +TEST_F(UmMinimumVersionPolicyImplTest, ContinueWhenCurrentVersionIsInvalid) { + SetCurrentVersion(kInvalidVersion); + SetMinimumVersion(kNewVersion); + + TestPolicy(EvalStatus::kContinue); +} + +TEST_F(UmMinimumVersionPolicyImplTest, ContinueWhenMinumumVersionIsNotSet) { + SetCurrentVersion(kOldVersion); + + TestPolicy(EvalStatus::kContinue); +} + +TEST_F(UmMinimumVersionPolicyImplTest, ContinueWhenMinumumVersionIsInvalid) { + SetCurrentVersion(kOldVersion); + SetMinimumVersion(kInvalidVersion); + + TestPolicy(EvalStatus::kContinue); +} + +TEST_F(UmMinimumVersionPolicyImplTest, + ContinueWhenCurrentVersionIsGreaterThanMinimumVersion) { + SetCurrentVersion(kNewVersion); + SetMinimumVersion(kOldVersion); + + TestPolicy(EvalStatus::kContinue); +} + +TEST_F(UmMinimumVersionPolicyImplTest, + ContinueWhenCurrentVersionIsEqualToMinimumVersion) { + SetCurrentVersion(kNewVersion); + SetMinimumVersion(kNewVersion); + + TestPolicy(EvalStatus::kContinue); +} + +TEST_F(UmMinimumVersionPolicyImplTest, + SuccessWhenCurrentVersionIsLessThanMinimumVersion) { + SetCurrentVersion(kOldVersion); + SetMinimumVersion(kNewVersion); + + TestPolicy(EvalStatus::kSucceeded); +} + +} // namespace chromeos_update_manager From b65868ccc045f9fcf5a6e6b2a7534d409792e9c5 Mon Sep 17 00:00:00 2001 From: Kelvin Zhang Date: Mon, 2 Nov 2020 15:06:53 -0500 Subject: [PATCH 460/624] Add small testcase for merge sequence generator Test: treehugger Change-Id: Ibea610fcc0d166a44f9425c9fc2e6d17cf877322 --- .../merge_sequence_generator_unittest.cc | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/payload_generator/merge_sequence_generator_unittest.cc b/payload_generator/merge_sequence_generator_unittest.cc index 567ede1e..1f0c2ea8 100644 --- a/payload_generator/merge_sequence_generator_unittest.cc +++ b/payload_generator/merge_sequence_generator_unittest.cc @@ -116,6 +116,20 @@ TEST_F(MergeSequenceGeneratorTest, FindDependency) { merge_after.at(transfers[2])); } +TEST_F(MergeSequenceGeneratorTest, FindDependencyEdgeCase) { + std::vector transfers = { + CreateCowMergeOperation(ExtentForRange(10, 10), ExtentForRange(15, 10)), + CreateCowMergeOperation(ExtentForRange(40, 10), ExtentForRange(50, 10)), + CreateCowMergeOperation(ExtentForRange(59, 10), ExtentForRange(60, 10)), + }; + + std::map> merge_after; + FindDependency(transfers, &merge_after); + ASSERT_EQ(std::set(), merge_after.at(transfers[0])); + ASSERT_EQ(std::set(), merge_after.at(transfers[1])); + ASSERT_EQ(merge_after[transfers[2]].size(), 1U); +} + TEST_F(MergeSequenceGeneratorTest, FindDependency_ReusedSourceBlocks) { std::vector transfers = { CreateCowMergeOperation(ExtentForRange(5, 10), ExtentForRange(15, 10)), From b138ab5e1d2b1e26803ed68dbc301004ef267fbf Mon Sep 17 00:00:00 2001 From: Kelvin Zhang Date: Fri, 6 Nov 2020 15:56:41 -0500 Subject: [PATCH 461/624] Add FileDescriptorPtr interface to verity writer In VABC, read/write of target partition is no longer done via unix file descriptor. Instead it's done via CowWriter and FiledescriptorPtr. We need abstraction for verity writer. Test: treehugger Change-Id: Id638b4e5a2cea4ab97927a6e7089170a3e257dee --- payload_consumer/verity_writer_android.cc | 84 +++++++++++++++------- payload_consumer/verity_writer_android.h | 17 ++++- payload_consumer/verity_writer_interface.h | 4 ++ payload_consumer/verity_writer_stub.cc | 4 +- payload_consumer/verity_writer_stub.h | 4 +- 5 files changed, 83 insertions(+), 30 deletions(-) diff --git a/payload_consumer/verity_writer_android.cc b/payload_consumer/verity_writer_android.cc index d5437b64..864d9a1d 100644 --- a/payload_consumer/verity_writer_android.cc +++ b/payload_consumer/verity_writer_android.cc @@ -29,6 +29,7 @@ extern "C" { } #include "update_engine/common/utils.h" +#include "update_engine/payload_consumer/file_descriptor.h" namespace chromeos_update_engine { @@ -39,7 +40,16 @@ std::unique_ptr CreateVerityWriter() { } // namespace verity_writer bool VerityWriterAndroid::Init(const InstallPlan::Partition& partition) { + auto read_fd = FileDescriptorPtr(new EintrSafeFileDescriptor()); + TEST_AND_RETURN_FALSE(read_fd->Open(partition.target_path.c_str(), O_RDWR)); + return Init(partition, read_fd, read_fd); +} +bool VerityWriterAndroid::Init(const InstallPlan::Partition& partition, + FileDescriptorPtr read_fd, + FileDescriptorPtr write_fd) { partition_ = &partition; + read_fd_ = read_fd; + write_fd_ = write_fd; if (partition_->hash_tree_size != 0 || partition_->fec_size != 0) { utils::SetBlockDeviceReadOnly(partition_->target_path, false); @@ -82,18 +92,18 @@ bool VerityWriterAndroid::Update(uint64_t offset, if (end_offset == hash_tree_data_end) { // All hash tree data blocks has been hashed, write hash tree to disk. - int fd = HANDLE_EINTR(open(partition_->target_path.c_str(), O_WRONLY)); - if (fd < 0) { - PLOG(ERROR) << "Failed to open " << partition_->target_path - << " to write hash tree."; - return false; - } - ScopedFdCloser fd_closer(&fd); - LOG(INFO) << "Writing verity hash tree to " << partition_->target_path; TEST_AND_RETURN_FALSE(hash_tree_builder_->BuildHashTree()); - TEST_AND_RETURN_FALSE(hash_tree_builder_->WriteHashTreeToFd( - fd, partition_->hash_tree_offset)); + TEST_AND_RETURN_FALSE_ERRNO( + write_fd_->Seek(partition_->hash_tree_offset, SEEK_SET)); + auto success = hash_tree_builder_->WriteHashTree( + [write_fd_(this->write_fd_)](auto data, auto size) { + return utils::WriteAll(write_fd_, data, size); + }); + // hashtree builder already prints error messages. + if (!success) { + return false; + } hash_tree_builder_.reset(); } } @@ -103,7 +113,8 @@ bool VerityWriterAndroid::Update(uint64_t offset, partition_->fec_data_offset + partition_->fec_data_size; if (offset < fec_data_end && offset + size >= fec_data_end) { LOG(INFO) << "Writing verity FEC to " << partition_->target_path; - TEST_AND_RETURN_FALSE(EncodeFEC(partition_->target_path, + TEST_AND_RETURN_FALSE(EncodeFEC(read_fd_, + write_fd_, partition_->fec_data_offset, partition_->fec_data_size, partition_->fec_offset, @@ -116,7 +127,8 @@ bool VerityWriterAndroid::Update(uint64_t offset, return true; } -bool VerityWriterAndroid::EncodeFEC(const std::string& path, +bool VerityWriterAndroid::EncodeFEC(FileDescriptorPtr read_fd, + FileDescriptorPtr write_fd, uint64_t data_offset, uint64_t data_size, uint64_t fec_offset, @@ -135,13 +147,6 @@ bool VerityWriterAndroid::EncodeFEC(const std::string& path, init_rs_char(FEC_PARAMS(fec_roots)), &free_rs_char); TEST_AND_RETURN_FALSE(rs_char != nullptr); - int fd = HANDLE_EINTR(open(path.c_str(), verify_mode ? O_RDONLY : O_RDWR)); - if (fd < 0) { - PLOG(ERROR) << "Failed to open " << path << " to write FEC."; - return false; - } - ScopedFdCloser fd_closer(&fd); - for (size_t i = 0; i < rounds; i++) { // Encodes |block_size| number of rs blocks each round so that we can read // one block each time instead of 1 byte to increase random read @@ -154,13 +159,13 @@ bool VerityWriterAndroid::EncodeFEC(const std::string& path, // Don't read past |data_size|, treat them as 0. if (offset < data_size) { ssize_t bytes_read = 0; - TEST_AND_RETURN_FALSE(utils::PReadAll(fd, + TEST_AND_RETURN_FALSE(utils::PReadAll(read_fd, buffer.data(), buffer.size(), data_offset + offset, &bytes_read)); - TEST_AND_RETURN_FALSE(bytes_read == - static_cast(buffer.size())); + TEST_AND_RETURN_FALSE(bytes_read >= 0); + TEST_AND_RETURN_FALSE(static_cast(bytes_read) == buffer.size()); } for (size_t k = 0; k < buffer.size(); k++) { rs_blocks[k * rs_n + j] = buffer[k]; @@ -179,17 +184,42 @@ bool VerityWriterAndroid::EncodeFEC(const std::string& path, brillo::Blob fec_read(fec.size()); ssize_t bytes_read = 0; TEST_AND_RETURN_FALSE(utils::PReadAll( - fd, fec_read.data(), fec_read.size(), fec_offset, &bytes_read)); - TEST_AND_RETURN_FALSE(bytes_read == - static_cast(fec_read.size())); + read_fd, fec_read.data(), fec_read.size(), fec_offset, &bytes_read)); + TEST_AND_RETURN_FALSE(bytes_read >= 0); + TEST_AND_RETURN_FALSE(static_cast(bytes_read) == fec_read.size()); TEST_AND_RETURN_FALSE(fec == fec_read); } else { - TEST_AND_RETURN_FALSE( - utils::PWriteAll(fd, fec.data(), fec.size(), fec_offset)); + CHECK(write_fd); + if (!utils::PWriteAll(write_fd, fec.data(), fec.size(), fec_offset)) { + PLOG(ERROR) << "EncodeFEC write() failed"; + return false; + } } fec_offset += fec.size(); } return true; } + +bool VerityWriterAndroid::EncodeFEC(const std::string& path, + uint64_t data_offset, + uint64_t data_size, + uint64_t fec_offset, + uint64_t fec_size, + uint32_t fec_roots, + uint32_t block_size, + bool verify_mode) { + FileDescriptorPtr fd(new EintrSafeFileDescriptor()); + TEST_AND_RETURN_FALSE( + fd->Open(path.c_str(), verify_mode ? O_RDONLY : O_RDWR)); + return EncodeFEC(fd, + fd, + data_offset, + data_size, + fec_offset, + fec_size, + fec_roots, + block_size, + verify_mode); +} } // namespace chromeos_update_engine diff --git a/payload_consumer/verity_writer_android.h b/payload_consumer/verity_writer_android.h index 05a58566..7dfac0fe 100644 --- a/payload_consumer/verity_writer_android.h +++ b/payload_consumer/verity_writer_android.h @@ -22,6 +22,7 @@ #include +#include "payload_consumer/file_descriptor.h" #include "update_engine/payload_consumer/verity_writer_interface.h" namespace chromeos_update_engine { @@ -31,7 +32,10 @@ class VerityWriterAndroid : public VerityWriterInterface { VerityWriterAndroid() = default; ~VerityWriterAndroid() override = default; - bool Init(const InstallPlan::Partition& partition) override; + bool Init(const InstallPlan::Partition& partition, + FileDescriptorPtr read_fd, + FileDescriptorPtr write_fd) override; + bool Init(const InstallPlan::Partition& partition); bool Update(uint64_t offset, const uint8_t* buffer, size_t size) override; // Read [data_offset : data_offset + data_size) from |path| and encode FEC @@ -40,6 +44,15 @@ class VerityWriterAndroid : public VerityWriterInterface { // in each Update() like hash tree, because for every rs block, its data are // spreaded across entire |data_size|, unless we can cache all data in // memory, we have to re-read them from disk. + static bool EncodeFEC(FileDescriptorPtr read_fd, + FileDescriptorPtr write_fd, + uint64_t data_offset, + uint64_t data_size, + uint64_t fec_offset, + uint64_t fec_size, + uint32_t fec_roots, + uint32_t block_size, + bool verify_mode); static bool EncodeFEC(const std::string& path, uint64_t data_offset, uint64_t data_size, @@ -52,6 +65,8 @@ class VerityWriterAndroid : public VerityWriterInterface { private: const InstallPlan::Partition* partition_ = nullptr; + FileDescriptorPtr read_fd_; + FileDescriptorPtr write_fd_; std::unique_ptr hash_tree_builder_; DISALLOW_COPY_AND_ASSIGN(VerityWriterAndroid); diff --git a/payload_consumer/verity_writer_interface.h b/payload_consumer/verity_writer_interface.h index a3ecef3c..db7988e9 100644 --- a/payload_consumer/verity_writer_interface.h +++ b/payload_consumer/verity_writer_interface.h @@ -22,6 +22,7 @@ #include +#include "payload_consumer/file_descriptor.h" #include "update_engine/payload_consumer/install_plan.h" namespace chromeos_update_engine { @@ -30,6 +31,9 @@ class VerityWriterInterface { public: virtual ~VerityWriterInterface() = default; + virtual bool Init(const InstallPlan::Partition& partition, + FileDescriptorPtr read_fd, + FileDescriptorPtr write_fd) = 0; virtual bool Init(const InstallPlan::Partition& partition) = 0; // Update partition data at [offset : offset + size) stored in |buffer|. // Data not in |hash_tree_data_extent| or |fec_data_extent| is ignored. diff --git a/payload_consumer/verity_writer_stub.cc b/payload_consumer/verity_writer_stub.cc index a0e24673..314ec7ee 100644 --- a/payload_consumer/verity_writer_stub.cc +++ b/payload_consumer/verity_writer_stub.cc @@ -26,7 +26,9 @@ std::unique_ptr CreateVerityWriter() { } } // namespace verity_writer -bool VerityWriterStub::Init(const InstallPlan::Partition& partition) { +bool VerityWriterStub::Init(const InstallPlan::Partition& partition, + FileDescriptorPtr read_fd, + FileDescriptorPtr write_fd) { return partition.hash_tree_size == 0 && partition.fec_size == 0; } diff --git a/payload_consumer/verity_writer_stub.h b/payload_consumer/verity_writer_stub.h index ea5e5749..f8d68ca8 100644 --- a/payload_consumer/verity_writer_stub.h +++ b/payload_consumer/verity_writer_stub.h @@ -26,7 +26,9 @@ class VerityWriterStub : public VerityWriterInterface { VerityWriterStub() = default; ~VerityWriterStub() override = default; - bool Init(const InstallPlan::Partition& partition) override; + bool Init(const InstallPlan::Partition& partition, + FileDescriptorPtr read_fd, + FileDescriptorPtr write_fd) override; bool Update(uint64_t offset, const uint8_t* buffer, size_t size) override; private: From 877ddbe3a71705c5bb0aa248b5a58f0239137a07 Mon Sep 17 00:00:00 2001 From: Kelvin Zhang Date: Mon, 9 Nov 2020 13:37:56 -0500 Subject: [PATCH 462/624] Add timeout to OpenCowWriter Without a timeout, update_engine might observe race conditions where a device is mapped but it doesn't appear on filesystem yet. Bug: 168554689 Test: treehugger Change-Id: I91f7cd82f5a10a8d60bdb157a57bfcbbc5c9f964 --- aosp/dynamic_partition_control_android.cc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/aosp/dynamic_partition_control_android.cc b/aosp/dynamic_partition_control_android.cc index d4618073..d296c53a 100644 --- a/aosp/dynamic_partition_control_android.cc +++ b/aosp/dynamic_partition_control_android.cc @@ -1250,11 +1250,11 @@ DynamicPartitionControlAndroid::OpenCowWriter( .metadata_slot = target_slot_, .partition_name = partition_name + suffix, .force_writable = true, - }; + .timeout_ms = kMapSnapshotTimeout}; // TODO(zhangkelvin) Open an APPEND mode CowWriter once there's an API to do // it. return snapshot_->OpenSnapshotWriter(params, std::move(source_path)); -} +} // namespace chromeos_update_engine std::optional DynamicPartitionControlAndroid::GetSuperDevice() { std::string device_dir_str; @@ -1268,7 +1268,7 @@ std::optional DynamicPartitionControlAndroid::GetSuperDevice() { } bool DynamicPartitionControlAndroid::MapAllPartitions() { - return snapshot_->MapAllSnapshots(); + return snapshot_->MapAllSnapshots(kMapSnapshotTimeout); } } // namespace chromeos_update_engine From 538bd590de3245349050d38f9ca197deb3eaa2a4 Mon Sep 17 00:00:00 2001 From: Amin Hassani Date: Wed, 4 Nov 2020 20:46:08 -0800 Subject: [PATCH 463/624] update_engine: Make SystemState accessible from everywhere SystemState is supposed to be a global context and is used lamost everywhere. So instead of passing it to functions and keeping multiple pointers to it, its better to do what we did in dlcservice and make it a singleton class with a getter that can be get from everywhere. BUG=b:171829801 TEST=unittests Change-Id: I3b2de9394b7769b3911195ca52d61dbe49afd4dd Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/2521792 Commit-Queue: Amin Hassani Tested-by: Amin Hassani Reviewed-by: Jae Hoon Kim --- Android.bp | 7 +- BUILD.gn | 6 +- aosp/metrics_reporter_android.cc | 1 - aosp/metrics_reporter_android.h | 4 +- aosp/update_attempter_android.cc | 2 - aosp/update_attempter_android_unittest.cc | 3 +- common/download_action.h | 9 +- common/metrics_reporter_interface.h | 17 +- common/metrics_reporter_stub.h | 4 +- common/mock_metrics_reporter.h | 10 +- common/system_state.cc | 23 ++ common/system_state.h | 18 +- cros/common_service.cc | 68 ++-- cros/common_service.h | 6 +- cros/common_service_unittest.cc | 27 +- cros/connection_manager.cc | 20 +- cros/connection_manager.h | 6 +- cros/connection_manager_interface.h | 5 +- cros/connection_manager_unittest.cc | 20 +- cros/daemon_chromeos.cc | 19 +- cros/daemon_chromeos.h | 3 - cros/dbus_service.cc | 8 +- cros/dbus_service.h | 4 +- cros/fake_system_state.cc | 6 +- cros/fake_system_state.h | 9 +- cros/image_properties.h | 9 +- cros/image_properties_chromeos.cc | 9 +- cros/image_properties_chromeos_unittest.cc | 31 +- cros/metrics_reporter_omaha.cc | 35 +- cros/metrics_reporter_omaha.h | 12 +- cros/metrics_reporter_omaha_unittest.cc | 106 +++--- cros/mock_omaha_request_params.h | 3 +- cros/mock_payload_state.h | 3 +- cros/omaha_request_action.cc | 154 +++++---- cros/omaha_request_action.h | 14 +- cros/omaha_request_action_fuzzer.cc | 3 +- cros/omaha_request_action_unittest.cc | 169 +++++----- cros/omaha_request_builder_xml.h | 1 - cros/omaha_request_builder_xml_unittest.cc | 63 ++-- cros/omaha_request_params.cc | 10 +- cros/omaha_request_params.h | 10 +- cros/omaha_request_params_unittest.cc | 18 +- cros/omaha_response_handler_action.cc | 52 +-- cros/omaha_response_handler_action.h | 6 +- .../omaha_response_handler_action_unittest.cc | 183 ++++++----- cros/payload_state.cc | 75 ++--- cros/payload_state.h | 5 +- cros/payload_state_unittest.cc | 305 ++++++++---------- cros/real_system_state.cc | 10 +- cros/real_system_state.h | 22 +- cros/update_attempter.cc | 267 ++++++++------- cros/update_attempter.h | 10 +- cros/update_attempter_unittest.cc | 290 +++++++++-------- download_action.cc | 23 +- download_action_android_unittest.cc | 1 - download_action_unittest.cc | 61 ++-- metrics_utils.h | 2 - payload_generator/generate_delta_main.cc | 1 - update_manager/real_system_provider.cc | 13 +- update_manager/real_system_provider.h | 7 +- .../real_system_provider_unittest.cc | 12 +- update_manager/real_updater_provider.cc | 151 ++++----- update_manager/real_updater_provider.h | 7 +- .../real_updater_provider_unittest.cc | 93 +++--- update_manager/staging_utils.cc | 1 - update_manager/state_factory.cc | 15 +- update_manager/state_factory.h | 4 +- 67 files changed, 1220 insertions(+), 1351 deletions(-) create mode 100644 common/system_state.cc diff --git a/Android.bp b/Android.bp index 8f465f63..e5f8c315 100644 --- a/Android.bp +++ b/Android.bp @@ -160,7 +160,6 @@ cc_library_static { "common/subprocess.cc", "common/terminator.cc", "common/utils.cc", - "download_action.cc", "payload_consumer/bzip_extent_writer.cc", "payload_consumer/cached_file_descriptor.cc", "payload_consumer/certificate_parser_android.cc", @@ -297,6 +296,7 @@ cc_library_static { srcs: [ ":libupdate_engine_aidl", + "common/system_state.cc", "aosp/binder_service_android.cc", "aosp/binder_service_stable_android.cc", "aosp/daemon_android.cc", @@ -306,6 +306,7 @@ cc_library_static { "aosp/network_selector_android.cc", "aosp/update_attempter_android.cc", "certificate_checker.cc", + "download_action.cc", "libcurl_http_fetcher.cc", "metrics_utils.cc", "update_boot_flags_action.cc", @@ -360,6 +361,8 @@ cc_binary { "aosp/update_attempter_android.cc", "common/metrics_reporter_stub.cc", "common/network_selector_stub.cc", + "common/system_state.cc", + "download_action.cc", "metrics_utils.cc", "update_boot_flags_action.cc", "update_status_utils.cc", @@ -491,6 +494,8 @@ cc_library_static { host_supported: true, srcs: [ + "common/system_state.cc", + "download_action.cc", "payload_generator/ab_generator.cc", "payload_generator/annotated_operation.cc", "payload_generator/blob_file_writer.cc", diff --git a/BUILD.gn b/BUILD.gn index 1f5dc7f8..5ac0a3fa 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -146,7 +146,6 @@ static_library("libpayload_consumer") { "common/terminator.cc", "common/utils.cc", "cros/platform_constants_chromeos.cc", - "download_action.cc", "payload_consumer/bzip_extent_writer.cc", "payload_consumer/cached_file_descriptor.cc", "payload_consumer/certificate_parser_stub.cc", @@ -194,6 +193,7 @@ static_library("libupdate_engine") { sources = [ "certificate_checker.cc", "common/connection_utils.cc", + "common/system_state.cc", "cros/boot_control_chromeos.cc", "cros/common_service.cc", "cros/connection_manager.cc", @@ -216,6 +216,7 @@ static_library("libupdate_engine") { "cros/requisition_util.cc", "cros/shill_proxy.cc", "cros/update_attempter.cc", + "download_action.cc", "libcurl_http_fetcher.cc", "metrics_utils.cc", "update_boot_flags_action.cc", @@ -331,6 +332,9 @@ executable("update_engine_client") { static_library("libpayload_generator") { sources = [ "common/file_fetcher.cc", + "common/system_state.cc", + "cros/real_system_state.cc", + "download_action.cc", "payload_generator/ab_generator.cc", "payload_generator/annotated_operation.cc", "payload_generator/blob_file_writer.cc", diff --git a/aosp/metrics_reporter_android.cc b/aosp/metrics_reporter_android.cc index ea3bb6d2..22ebf0dc 100644 --- a/aosp/metrics_reporter_android.cc +++ b/aosp/metrics_reporter_android.cc @@ -61,7 +61,6 @@ std::unique_ptr CreateMetricsReporter() { } // namespace metrics void MetricsReporterAndroid::ReportUpdateAttemptMetrics( - SystemState* /* system_state */, int attempt_number, PayloadType payload_type, base::TimeDelta duration, diff --git a/aosp/metrics_reporter_android.h b/aosp/metrics_reporter_android.h index 4a173bf3..729542e2 100644 --- a/aosp/metrics_reporter_android.h +++ b/aosp/metrics_reporter_android.h @@ -39,13 +39,11 @@ class MetricsReporterAndroid : public MetricsReporterInterface { void ReportDailyMetrics(base::TimeDelta os_age) override {} void ReportUpdateCheckMetrics( - SystemState* system_state, metrics::CheckResult result, metrics::CheckReaction reaction, metrics::DownloadErrorCode download_error_code) override {} - void ReportUpdateAttemptMetrics(SystemState* system_state, - int attempt_number, + void ReportUpdateAttemptMetrics(int attempt_number, PayloadType payload_type, base::TimeDelta duration, base::TimeDelta duration_uptime, diff --git a/aosp/update_attempter_android.cc b/aosp/update_attempter_android.cc index 57430fea..d48293a4 100644 --- a/aosp/update_attempter_android.cc +++ b/aosp/update_attempter_android.cc @@ -733,7 +733,6 @@ void UpdateAttempterAndroid::BuildUpdateActions(HttpFetcher* fetcher) { std::make_unique(prefs_, boot_control_, hardware_, - nullptr, // system_state, not used. fetcher, // passes ownership true /* interactive */); download_action->set_delegate(this); @@ -804,7 +803,6 @@ void UpdateAttempterAndroid::CollectAndReportUpdateMetricsOnUpdateFinished( TimeDelta duration_uptime = clock_->GetMonotonicTime() - monotonic_time_start; metrics_reporter_->ReportUpdateAttemptMetrics( - nullptr, // system_state static_cast(attempt_number), payload_type, duration, diff --git a/aosp/update_attempter_android_unittest.cc b/aosp/update_attempter_android_unittest.cc index bb44450d..fc302684 100644 --- a/aosp/update_attempter_android_unittest.cc +++ b/aosp/update_attempter_android_unittest.cc @@ -138,8 +138,7 @@ TEST_F(UpdateAttempterAndroidTest, ReportMetricsOnUpdateTerminated) { TimeDelta duration_uptime = up_time - Time::FromInternalValue(12345); EXPECT_CALL( *metrics_reporter_, - ReportUpdateAttemptMetrics(_, - 2, + ReportUpdateAttemptMetrics(2, _, duration, duration_uptime, diff --git a/common/download_action.h b/common/download_action.h index c167c2d2..18e58531 100644 --- a/common/download_action.h +++ b/common/download_action.h @@ -28,7 +28,6 @@ #include "update_engine/common/boot_control_interface.h" #include "update_engine/common/http_fetcher.h" #include "update_engine/common/multi_range_http_fetcher.h" -#include "update_engine/common/system_state.h" #include "update_engine/payload_consumer/delta_performer.h" #include "update_engine/payload_consumer/install_plan.h" @@ -71,12 +70,11 @@ class DownloadAction : public InstallPlanAction, public HttpFetcherDelegate { // Takes ownership of the passed in HttpFetcher. Useful for testing. // A good calling pattern is: - // DownloadAction(prefs, boot_contol, hardware, system_state, + // DownloadAction(prefs, boot_contol, hardware, // new WhateverHttpFetcher, false); DownloadAction(PrefsInterface* prefs, BootControlInterface* boot_control, HardwareInterface* hardware, - SystemState* system_state, HttpFetcher* http_fetcher, bool interactive); ~DownloadAction() override; @@ -141,14 +139,11 @@ class DownloadAction : public InstallPlanAction, public HttpFetcherDelegate { // Pointer to the current payload in install_plan_.payloads. InstallPlan::Payload* payload_{nullptr}; - // SystemState required pointers. + // Required pointers. PrefsInterface* prefs_; BootControlInterface* boot_control_; HardwareInterface* hardware_; - // Global context for the system. - SystemState* system_state_; - // Pointer to the MultiRangeHttpFetcher that does the http work. std::unique_ptr http_fetcher_; diff --git a/common/metrics_reporter_interface.h b/common/metrics_reporter_interface.h index d7c53472..08636e38 100644 --- a/common/metrics_reporter_interface.h +++ b/common/metrics_reporter_interface.h @@ -25,19 +25,12 @@ #include "update_engine/common/constants.h" #include "update_engine/common/error_code.h" #include "update_engine/common/metrics_constants.h" -#include "update_engine/common/system_state.h" namespace chromeos_update_engine { enum class ServerToCheck; enum class CertificateCheckResult; -namespace metrics { - -std::unique_ptr CreateMetricsReporter(); - -} // namespace metrics - class MetricsReporterInterface { public: virtual ~MetricsReporterInterface() = default; @@ -92,7 +85,6 @@ class MetricsReporterInterface { // if it's set, |kMetricCheckRollbackTargetVersion| reports the same, but only // if rollback is also allowed using enterprise policy. virtual void ReportUpdateCheckMetrics( - SystemState* system_state, metrics::CheckResult result, metrics::CheckReaction reaction, metrics::DownloadErrorCode download_error_code) = 0; @@ -120,8 +112,7 @@ class MetricsReporterInterface { // |kMetricAttemptTimeSinceLastAttemptUptimeMinutes| metrics are // automatically calculated and reported by maintaining persistent and // process-local state variables. - virtual void ReportUpdateAttemptMetrics(SystemState* system_state, - int attempt_number, + virtual void ReportUpdateAttemptMetrics(int attempt_number, PayloadType payload_type, base::TimeDelta duration, base::TimeDelta duration_uptime, @@ -242,6 +233,12 @@ class MetricsReporterInterface { bool has_time_restriction_policy, int time_to_update_days) = 0; }; +namespace metrics { + +std::unique_ptr CreateMetricsReporter(); + +} // namespace metrics + } // namespace chromeos_update_engine #endif // UPDATE_ENGINE_COMMON_METRICS_REPORTER_INTERFACE_H_ diff --git a/common/metrics_reporter_stub.h b/common/metrics_reporter_stub.h index 1470aaab..80cf4693 100644 --- a/common/metrics_reporter_stub.h +++ b/common/metrics_reporter_stub.h @@ -39,13 +39,11 @@ class MetricsReporterStub : public MetricsReporterInterface { void ReportDailyMetrics(base::TimeDelta os_age) override {} void ReportUpdateCheckMetrics( - SystemState* system_state, metrics::CheckResult result, metrics::CheckReaction reaction, metrics::DownloadErrorCode download_error_code) override {} - void ReportUpdateAttemptMetrics(SystemState* system_state, - int attempt_number, + void ReportUpdateAttemptMetrics(int attempt_number, PayloadType payload_type, base::TimeDelta duration, base::TimeDelta duration_uptime, diff --git a/common/mock_metrics_reporter.h b/common/mock_metrics_reporter.h index 922d1ee2..1bb1e84b 100644 --- a/common/mock_metrics_reporter.h +++ b/common/mock_metrics_reporter.h @@ -36,15 +36,13 @@ class MockMetricsReporter : public MetricsReporterInterface { MOCK_METHOD1(ReportDailyMetrics, void(base::TimeDelta os_age)); - MOCK_METHOD4(ReportUpdateCheckMetrics, - void(SystemState* system_state, - metrics::CheckResult result, + MOCK_METHOD3(ReportUpdateCheckMetrics, + void(metrics::CheckResult result, metrics::CheckReaction reaction, metrics::DownloadErrorCode download_error_code)); - MOCK_METHOD8(ReportUpdateAttemptMetrics, - void(SystemState* system_state, - int attempt_number, + MOCK_METHOD7(ReportUpdateAttemptMetrics, + void(int attempt_number, PayloadType payload_type, base::TimeDelta duration, base::TimeDelta duration_uptime, diff --git a/common/system_state.cc b/common/system_state.cc new file mode 100644 index 00000000..40bf760a --- /dev/null +++ b/common/system_state.cc @@ -0,0 +1,23 @@ +// +// Copyright (C) 2020 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include "update_engine/common/system_state.h" + +namespace chromeos_update_engine { + +std::unique_ptr SystemState::g_instance_; + +} // namespace chromeos_update_engine diff --git a/common/system_state.h b/common/system_state.h index 7a670466..dc40d363 100644 --- a/common/system_state.h +++ b/common/system_state.h @@ -17,6 +17,10 @@ #ifndef UPDATE_ENGINE_COMMON_SYSTEM_STATE_H_ #define UPDATE_ENGINE_COMMON_SYSTEM_STATE_H_ +#include + +#include + namespace chromeos_update_manager { class UpdateManager; @@ -51,13 +55,14 @@ class UpdateAttempter; // the current state of the system, high-level objects whose lifetime is same // as main, system interfaces, etc. // Carved out separately so it can be mocked for unit tests. -// Currently it has only one method, but we should start migrating other -// methods to use this as and when needed to unit test them. -// TODO(jaysri): Consider renaming this to something like GlobalContext. class SystemState { public: - // Destructs this object. - virtual ~SystemState() {} + virtual ~SystemState() = default; + + static SystemState* Get() { + CHECK(g_instance_); + return g_instance_.get(); + } // Sets or gets the latest device policy. virtual void set_device_policy(const policy::DevicePolicy* device_policy) = 0; @@ -113,6 +118,9 @@ class SystemState { // Returns a pointer to the DlcServiceInterface singleton. virtual DlcServiceInterface* dlcservice() = 0; + + protected: + static std::unique_ptr g_instance_; }; } // namespace chromeos_update_engine diff --git a/cros/common_service.cc b/cros/common_service.cc index aecad8bd..0318999c 100644 --- a/cros/common_service.cc +++ b/cros/common_service.cc @@ -29,6 +29,7 @@ #include "update_engine/common/clock_interface.h" #include "update_engine/common/hardware_interface.h" #include "update_engine/common/prefs.h" +#include "update_engine/common/system_state.h" #include "update_engine/common/utils.h" #include "update_engine/cros/connection_manager_interface.h" #include "update_engine/cros/omaha_request_params.h" @@ -66,8 +67,7 @@ const char* const UpdateEngineService::kErrorDomain = "update_engine"; const char* const UpdateEngineService::kErrorFailed = "org.chromium.UpdateEngine.Error.Failed"; -UpdateEngineService::UpdateEngineService(SystemState* system_state) - : system_state_(system_state) {} +UpdateEngineService::UpdateEngineService() = default; // org::chromium::UpdateEngineInterfaceInterface methods implementation. @@ -79,7 +79,7 @@ bool UpdateEngineService::SetUpdateAttemptFlags(ErrorPtr* /* error */, << "RestrictDownload=" << ((flags & UpdateAttemptFlags::kFlagRestrictDownload) ? "yes" : "no"); - system_state_->update_attempter()->SetUpdateAttemptFlags(flags); + SystemState::Get()->update_attempter()->SetUpdateAttemptFlags(flags); return true; } @@ -98,7 +98,7 @@ bool UpdateEngineService::AttemptUpdate(ErrorPtr* /* error */, << "interactive=" << (interactive ? "yes " : "no ") << "RestrictDownload=" << (restrict_downloads ? "yes " : "no "); - *out_result = system_state_->update_attempter()->CheckForUpdate( + *out_result = SystemState::Get()->update_attempter()->CheckForUpdate( in_app_version, in_omaha_url, flags); return true; } @@ -106,7 +106,8 @@ bool UpdateEngineService::AttemptUpdate(ErrorPtr* /* error */, bool UpdateEngineService::AttemptInstall(brillo::ErrorPtr* error, const string& omaha_url, const vector& dlc_ids) { - if (!system_state_->update_attempter()->CheckForInstall(dlc_ids, omaha_url)) { + if (!SystemState::Get()->update_attempter()->CheckForInstall(dlc_ids, + omaha_url)) { // TODO(xiaochu): support more detailed error messages. LogAndSetError(error, FROM_HERE, "Could not schedule install operation."); return false; @@ -117,7 +118,7 @@ bool UpdateEngineService::AttemptInstall(brillo::ErrorPtr* error, bool UpdateEngineService::AttemptRollback(ErrorPtr* error, bool in_powerwash) { LOG(INFO) << "Attempting rollback to non-active partitions."; - if (!system_state_->update_attempter()->Rollback(in_powerwash)) { + if (!SystemState::Get()->update_attempter()->Rollback(in_powerwash)) { // TODO(dgarrett): Give a more specific error code/reason. LogAndSetError(error, FROM_HERE, "Rollback attempt failed."); return false; @@ -127,14 +128,14 @@ bool UpdateEngineService::AttemptRollback(ErrorPtr* error, bool in_powerwash) { bool UpdateEngineService::CanRollback(ErrorPtr* /* error */, bool* out_can_rollback) { - bool can_rollback = system_state_->update_attempter()->CanRollback(); + bool can_rollback = SystemState::Get()->update_attempter()->CanRollback(); LOG(INFO) << "Checking to see if we can rollback . Result: " << can_rollback; *out_can_rollback = can_rollback; return true; } bool UpdateEngineService::ResetStatus(ErrorPtr* error) { - if (!system_state_->update_attempter()->ResetStatus()) { + if (!SystemState::Get()->update_attempter()->ResetStatus()) { // TODO(dgarrett): Give a more specific error code/reason. LogAndSetError(error, FROM_HERE, "ResetStatus failed."); return false; @@ -145,8 +146,8 @@ bool UpdateEngineService::ResetStatus(ErrorPtr* error) { bool UpdateEngineService::SetDlcActiveValue(brillo::ErrorPtr* error, bool is_active, const string& dlc_id) { - if (!system_state_->update_attempter()->SetDlcActiveValue(is_active, - dlc_id)) { + if (!SystemState::Get()->update_attempter()->SetDlcActiveValue(is_active, + dlc_id)) { LogAndSetError(error, FROM_HERE, "SetDlcActiveValue failed."); return false; } @@ -155,7 +156,7 @@ bool UpdateEngineService::SetDlcActiveValue(brillo::ErrorPtr* error, bool UpdateEngineService::GetStatus(ErrorPtr* error, UpdateEngineStatus* out_status) { - if (!system_state_->update_attempter()->GetStatus(out_status)) { + if (!SystemState::Get()->update_attempter()->GetStatus(out_status)) { LogAndSetError(error, FROM_HERE, "GetStatus failed."); return false; } @@ -163,7 +164,7 @@ bool UpdateEngineService::GetStatus(ErrorPtr* error, } bool UpdateEngineService::RebootIfNeeded(ErrorPtr* error) { - if (!system_state_->update_attempter()->RebootIfNeeded()) { + if (!SystemState::Get()->update_attempter()->RebootIfNeeded()) { // TODO(dgarrett): Give a more specific error code/reason. LogAndSetError(error, FROM_HERE, "Reboot not needed, or attempt failed."); return false; @@ -174,15 +175,16 @@ bool UpdateEngineService::RebootIfNeeded(ErrorPtr* error) { bool UpdateEngineService::SetChannel(ErrorPtr* error, const string& in_target_channel, bool in_is_powerwash_allowed) { - const policy::DevicePolicy* device_policy = system_state_->device_policy(); + const policy::DevicePolicy* device_policy = + SystemState::Get()->device_policy(); // The device_policy is loaded in a lazy way before an update check. Load it // now from the libbrillo cache if it wasn't already loaded. if (!device_policy) { - UpdateAttempter* update_attempter = system_state_->update_attempter(); + UpdateAttempter* update_attempter = SystemState::Get()->update_attempter(); if (update_attempter) { update_attempter->RefreshDevicePolicy(); - device_policy = system_state_->device_policy(); + device_policy = SystemState::Get()->device_policy(); } } @@ -198,7 +200,7 @@ bool UpdateEngineService::SetChannel(ErrorPtr* error, LOG(INFO) << "Setting destination channel to: " << in_target_channel; string error_message; - if (!system_state_->request_params()->SetTargetChannel( + if (!SystemState::Get()->request_params()->SetTargetChannel( in_target_channel, in_is_powerwash_allowed, &error_message)) { LogAndSetError(error, FROM_HERE, error_message); return false; @@ -209,7 +211,7 @@ bool UpdateEngineService::SetChannel(ErrorPtr* error, bool UpdateEngineService::GetChannel(ErrorPtr* /* error */, bool in_get_current_channel, string* out_channel) { - OmahaRequestParams* rp = system_state_->request_params(); + OmahaRequestParams* rp = SystemState::Get()->request_params(); *out_channel = (in_get_current_channel ? rp->current_channel() : rp->target_channel()); return true; @@ -217,7 +219,7 @@ bool UpdateEngineService::GetChannel(ErrorPtr* /* error */, bool UpdateEngineService::SetCohortHint(ErrorPtr* error, const string& in_cohort_hint) { - PrefsInterface* prefs = system_state_->prefs(); + PrefsInterface* prefs = SystemState::Get()->prefs(); // It is ok to override the cohort hint with an invalid value since it is // stored in stateful partition. The code reading it should sanitize it @@ -235,7 +237,7 @@ bool UpdateEngineService::SetCohortHint(ErrorPtr* error, bool UpdateEngineService::GetCohortHint(ErrorPtr* error, string* out_cohort_hint) { - PrefsInterface* prefs = system_state_->prefs(); + PrefsInterface* prefs = SystemState::Get()->prefs(); *out_cohort_hint = ""; if (prefs->Exists(kPrefsOmahaCohortHint) && @@ -248,7 +250,7 @@ bool UpdateEngineService::GetCohortHint(ErrorPtr* error, bool UpdateEngineService::SetP2PUpdatePermission(ErrorPtr* error, bool in_enabled) { - PrefsInterface* prefs = system_state_->prefs(); + PrefsInterface* prefs = SystemState::Get()->prefs(); if (!prefs->SetBoolean(kPrefsP2PEnabled, in_enabled)) { LogAndSetError( @@ -263,7 +265,7 @@ bool UpdateEngineService::SetP2PUpdatePermission(ErrorPtr* error, bool UpdateEngineService::GetP2PUpdatePermission(ErrorPtr* error, bool* out_enabled) { - PrefsInterface* prefs = system_state_->prefs(); + PrefsInterface* prefs = SystemState::Get()->prefs(); bool p2p_pref = false; // Default if no setting is present. if (prefs->Exists(kPrefsP2PEnabled) && @@ -279,7 +281,7 @@ bool UpdateEngineService::GetP2PUpdatePermission(ErrorPtr* error, bool UpdateEngineService::SetUpdateOverCellularPermission(ErrorPtr* error, bool in_allowed) { ConnectionManagerInterface* connection_manager = - system_state_->connection_manager(); + SystemState::Get()->connection_manager(); // Check if this setting is allowed by the device policy. if (connection_manager->IsAllowedConnectionTypesForUpdateSet()) { @@ -293,7 +295,7 @@ bool UpdateEngineService::SetUpdateOverCellularPermission(ErrorPtr* error, // If the policy wasn't loaded yet, then it is still OK to change the local // setting because the policy will be checked again during the update check. - PrefsInterface* prefs = system_state_->prefs(); + PrefsInterface* prefs = SystemState::Get()->prefs(); if (!prefs || !prefs->SetBoolean(kPrefsUpdateOverCellularPermission, in_allowed)) { @@ -311,7 +313,7 @@ bool UpdateEngineService::SetUpdateOverCellularTarget( const std::string& target_version, int64_t target_size) { ConnectionManagerInterface* connection_manager = - system_state_->connection_manager(); + SystemState::Get()->connection_manager(); // Check if this setting is allowed by the device policy. if (connection_manager->IsAllowedConnectionTypesForUpdateSet()) { @@ -325,7 +327,7 @@ bool UpdateEngineService::SetUpdateOverCellularTarget( // If the policy wasn't loaded yet, then it is still OK to change the local // setting because the policy will be checked again during the update check. - PrefsInterface* prefs = system_state_->prefs(); + PrefsInterface* prefs = SystemState::Get()->prefs(); if (!prefs || !prefs->SetString(kPrefsUpdateOverCellularTargetVersion, @@ -341,14 +343,14 @@ bool UpdateEngineService::SetUpdateOverCellularTarget( bool UpdateEngineService::GetUpdateOverCellularPermission(ErrorPtr* error, bool* out_allowed) { ConnectionManagerInterface* connection_manager = - system_state_->connection_manager(); + SystemState::Get()->connection_manager(); if (connection_manager->IsAllowedConnectionTypesForUpdateSet()) { // We have device policy, so ignore the user preferences. *out_allowed = connection_manager->IsUpdateAllowedOver( ConnectionType::kCellular, ConnectionTethering::kUnknown); } else { - PrefsInterface* prefs = system_state_->prefs(); + PrefsInterface* prefs = SystemState::Get()->prefs(); if (!prefs || !prefs->Exists(kPrefsUpdateOverCellularPermission)) { // Update is not allowed as user preference is not set or not available. @@ -372,26 +374,26 @@ bool UpdateEngineService::GetUpdateOverCellularPermission(ErrorPtr* error, bool UpdateEngineService::GetDurationSinceUpdate(ErrorPtr* error, int64_t* out_usec_wallclock) { base::Time time; - if (!system_state_->update_attempter()->GetBootTimeAtUpdate(&time)) { + if (!SystemState::Get()->update_attempter()->GetBootTimeAtUpdate(&time)) { LogAndSetError(error, FROM_HERE, "No pending update."); return false; } - ClockInterface* clock = system_state_->clock(); + ClockInterface* clock = SystemState::Get()->clock(); *out_usec_wallclock = (clock->GetBootTime() - time).InMicroseconds(); return true; } bool UpdateEngineService::GetPrevVersion(ErrorPtr* /* error */, string* out_prev_version) { - *out_prev_version = system_state_->update_attempter()->GetPrevVersion(); + *out_prev_version = SystemState::Get()->update_attempter()->GetPrevVersion(); return true; } bool UpdateEngineService::GetRollbackPartition( ErrorPtr* /* error */, string* out_rollback_partition_name) { BootControlInterface::Slot rollback_slot = - system_state_->update_attempter()->GetRollbackSlot(); + SystemState::Get()->update_attempter()->GetRollbackSlot(); if (rollback_slot == BootControlInterface::kInvalidSlot) { out_rollback_partition_name->clear(); @@ -399,7 +401,7 @@ bool UpdateEngineService::GetRollbackPartition( } string name; - if (!system_state_->boot_control()->GetPartitionDevice( + if (!SystemState::Get()->boot_control()->GetPartitionDevice( "KERNEL", rollback_slot, &name)) { LOG(ERROR) << "Invalid rollback device"; return false; @@ -413,7 +415,7 @@ bool UpdateEngineService::GetRollbackPartition( bool UpdateEngineService::GetLastAttemptError(ErrorPtr* /* error */, int32_t* out_last_attempt_error) { ErrorCode error_code = - system_state_->update_attempter()->GetAttemptErrorCode(); + SystemState::Get()->update_attempter()->GetAttemptErrorCode(); *out_last_attempt_error = static_cast(error_code); return true; } diff --git a/cros/common_service.h b/cros/common_service.h index 6169d9cb..2c176c55 100644 --- a/cros/common_service.h +++ b/cros/common_service.h @@ -26,7 +26,6 @@ #include #include "update_engine/client_library/include/update_engine/update_status.h" -#include "update_engine/common/system_state.h" namespace chromeos_update_engine { @@ -38,7 +37,7 @@ class UpdateEngineService { // Generic service error. static const char* const kErrorFailed; - explicit UpdateEngineService(SystemState* system_state); + UpdateEngineService(); virtual ~UpdateEngineService() = default; // Set flags that influence how updates and checks are performed. These @@ -160,9 +159,6 @@ class UpdateEngineService { // Returns the last UpdateAttempt error. bool GetLastAttemptError(brillo::ErrorPtr* error, int32_t* out_last_attempt_error); - - private: - SystemState* system_state_; }; } // namespace chromeos_update_engine diff --git a/cros/common_service_unittest.cc b/cros/common_service_unittest.cc index 733ec0af..a6b00140 100644 --- a/cros/common_service_unittest.cc +++ b/cros/common_service_unittest.cc @@ -39,17 +39,14 @@ namespace chromeos_update_engine { class UpdateEngineServiceTest : public ::testing::Test { protected: - UpdateEngineServiceTest() - : mock_update_attempter_(fake_system_state_.mock_update_attempter()), - common_service_(&fake_system_state_) {} + UpdateEngineServiceTest() = default; - void SetUp() override { fake_system_state_.set_device_policy(nullptr); } + void SetUp() override { + FakeSystemState::CreateInstance(); + FakeSystemState::Get()->set_device_policy(nullptr); + mock_update_attempter_ = FakeSystemState::Get()->mock_update_attempter(); + } - // Fake/mock infrastructure. - FakeSystemState fake_system_state_; - policy::MockDevicePolicy mock_device_policy_; - - // Shortcut for fake_system_state_.mock_update_attempter(). MockUpdateAttempter* mock_update_attempter_; brillo::ErrorPtr error_; @@ -119,7 +116,7 @@ TEST_F(UpdateEngineServiceTest, SetDlcActiveValueReturnsFalse) { TEST_F(UpdateEngineServiceTest, SetChannelWithNoPolicy) { EXPECT_CALL(*mock_update_attempter_, RefreshDevicePolicy()); // If SetTargetChannel is called it means the policy check passed. - EXPECT_CALL(*fake_system_state_.mock_request_params(), + EXPECT_CALL(*FakeSystemState::Get()->mock_request_params(), SetTargetChannel("stable-channel", true, _)) .WillOnce(Return(true)); EXPECT_TRUE(common_service_.SetChannel(&error_, "stable-channel", true)); @@ -129,10 +126,10 @@ TEST_F(UpdateEngineServiceTest, SetChannelWithNoPolicy) { // When the policy is present, the delegated value should be checked. TEST_F(UpdateEngineServiceTest, SetChannelWithDelegatedPolicy) { policy::MockDevicePolicy mock_device_policy; - fake_system_state_.set_device_policy(&mock_device_policy); + FakeSystemState::Get()->set_device_policy(&mock_device_policy); EXPECT_CALL(mock_device_policy, GetReleaseChannelDelegated(_)) .WillOnce(DoAll(SetArgPointee<0>(true), Return(true))); - EXPECT_CALL(*fake_system_state_.mock_request_params(), + EXPECT_CALL(*FakeSystemState::Get()->mock_request_params(), SetTargetChannel("beta-channel", true, _)) .WillOnce(Return(true)); @@ -144,7 +141,7 @@ TEST_F(UpdateEngineServiceTest, SetChannelWithDelegatedPolicy) { // raised. TEST_F(UpdateEngineServiceTest, SetChannelWithInvalidChannel) { EXPECT_CALL(*mock_update_attempter_, RefreshDevicePolicy()); - EXPECT_CALL(*fake_system_state_.mock_request_params(), + EXPECT_CALL(*FakeSystemState::Get()->mock_request_params(), SetTargetChannel("foo-channel", true, _)) .WillOnce(Return(false)); @@ -155,8 +152,8 @@ TEST_F(UpdateEngineServiceTest, SetChannelWithInvalidChannel) { } TEST_F(UpdateEngineServiceTest, GetChannel) { - fake_system_state_.mock_request_params()->set_current_channel("current"); - fake_system_state_.mock_request_params()->set_target_channel("target"); + FakeSystemState::Get()->mock_request_params()->set_current_channel("current"); + FakeSystemState::Get()->mock_request_params()->set_target_channel("target"); string channel; EXPECT_TRUE(common_service_.GetChannel( &error_, true /* get_current_channel */, &channel)); diff --git a/cros/connection_manager.cc b/cros/connection_manager.cc index 331f76bd..6a5c63b5 100644 --- a/cros/connection_manager.cc +++ b/cros/connection_manager.cc @@ -41,16 +41,14 @@ using std::string; namespace chromeos_update_engine { namespace connection_manager { -std::unique_ptr CreateConnectionManager( - SystemState* system_state) { +std::unique_ptr CreateConnectionManager() { return std::unique_ptr( - new ConnectionManager(new ShillProxy(), system_state)); + new ConnectionManager(new ShillProxy())); } } // namespace connection_manager -ConnectionManager::ConnectionManager(ShillProxyInterface* shill_proxy, - SystemState* system_state) - : shill_proxy_(shill_proxy), system_state_(system_state) {} +ConnectionManager::ConnectionManager(ShillProxyInterface* shill_proxy) + : shill_proxy_(shill_proxy) {} bool ConnectionManager::IsUpdateAllowedOver( ConnectionType type, ConnectionTethering tethering) const { @@ -64,15 +62,16 @@ bool ConnectionManager::IsUpdateAllowedOver( << "Current connection is confirmed tethered, using Cellular setting."; } - const policy::DevicePolicy* device_policy = system_state_->device_policy(); + const policy::DevicePolicy* device_policy = + SystemState::Get()->device_policy(); // The device_policy is loaded in a lazy way before an update check. Load // it now from the libbrillo cache if it wasn't already loaded. if (!device_policy) { - UpdateAttempter* update_attempter = system_state_->update_attempter(); + UpdateAttempter* update_attempter = SystemState::Get()->update_attempter(); if (update_attempter) { update_attempter->RefreshDevicePolicy(); - device_policy = system_state_->device_policy(); + device_policy = SystemState::Get()->device_policy(); } } @@ -109,7 +108,8 @@ bool ConnectionManager::IsUpdateAllowedOver( } bool ConnectionManager::IsAllowedConnectionTypesForUpdateSet() const { - const policy::DevicePolicy* device_policy = system_state_->device_policy(); + const policy::DevicePolicy* device_policy = + SystemState::Get()->device_policy(); if (!device_policy) { LOG(INFO) << "There's no device policy loaded yet."; return false; diff --git a/cros/connection_manager.h b/cros/connection_manager.h index b1fb961f..bb54ff7a 100644 --- a/cros/connection_manager.h +++ b/cros/connection_manager.h @@ -35,8 +35,7 @@ class ConnectionManager : public ConnectionManagerInterface { public: // Constructs a new ConnectionManager object initialized with the // given system state. - ConnectionManager(ShillProxyInterface* shill_proxy, - SystemState* system_state); + explicit ConnectionManager(ShillProxyInterface* shill_proxy); ~ConnectionManager() override = default; // ConnectionManagerInterface overrides. @@ -58,9 +57,6 @@ class ConnectionManager : public ConnectionManagerInterface { // The mockable interface to access the shill DBus proxies. std::unique_ptr shill_proxy_; - // The global context for update_engine. - SystemState* system_state_; - DISALLOW_COPY_AND_ASSIGN(ConnectionManager); }; diff --git a/cros/connection_manager_interface.h b/cros/connection_manager_interface.h index 6dd9fbdd..dc6c9838 100644 --- a/cros/connection_manager_interface.h +++ b/cros/connection_manager_interface.h @@ -25,8 +25,6 @@ namespace chromeos_update_engine { -class SystemState; - // This class exposes a generic interface to the connection manager // (e.g FlimFlam, Shill, etc.) to consolidate all connection-related // logic in update_engine. @@ -59,8 +57,7 @@ class ConnectionManagerInterface { namespace connection_manager { // Factory function which creates a ConnectionManager. -std::unique_ptr CreateConnectionManager( - SystemState* system_state); +std::unique_ptr CreateConnectionManager(); } // namespace connection_manager } // namespace chromeos_update_engine diff --git a/cros/connection_manager_unittest.cc b/cros/connection_manager_unittest.cc index 3f1ee5a3..46da8cc9 100644 --- a/cros/connection_manager_unittest.cc +++ b/cros/connection_manager_unittest.cc @@ -52,7 +52,8 @@ class ConnectionManagerTest : public ::testing::Test { void SetUp() override { loop_.SetAsCurrent(); - fake_system_state_.set_connection_manager(&cmut_); + FakeSystemState::CreateInstance(); + FakeSystemState::Get()->set_connection_manager(&cmut_); } void TearDown() override { EXPECT_FALSE(loop_.PendingTasks()); } @@ -81,11 +82,10 @@ class ConnectionManagerTest : public ::testing::Test { ConnectionTethering expected_tethering); brillo::FakeMessageLoop loop_{nullptr}; - FakeSystemState fake_system_state_; FakeShillProxy* fake_shill_proxy_; // ConnectionManager under test. - ConnectionManager cmut_{fake_shill_proxy_, &fake_system_state_}; + ConnectionManager cmut_{fake_shill_proxy_}; }; void ConnectionManagerTest::SetManagerReply(const char* default_service, @@ -227,7 +227,7 @@ TEST_F(ConnectionManagerTest, AllowUpdatesOverWifiTest) { TEST_F(ConnectionManagerTest, AllowUpdatesOnlyOver3GPerPolicyTest) { policy::MockDevicePolicy allow_3g_policy; - fake_system_state_.set_device_policy(&allow_3g_policy); + FakeSystemState::Get()->set_device_policy(&allow_3g_policy); // This test tests cellular (3G) being the only connection type being allowed. set allowed_set; @@ -244,7 +244,7 @@ TEST_F(ConnectionManagerTest, AllowUpdatesOnlyOver3GPerPolicyTest) { TEST_F(ConnectionManagerTest, AllowUpdatesOver3GAndOtherTypesPerPolicyTest) { policy::MockDevicePolicy allow_3g_policy; - fake_system_state_.set_device_policy(&allow_3g_policy); + FakeSystemState::Get()->set_device_policy(&allow_3g_policy); // This test tests multiple connection types being allowed, with // 3G one among them. Only Cellular is currently enforced by the policy @@ -276,7 +276,7 @@ TEST_F(ConnectionManagerTest, AllowUpdatesOver3GAndOtherTypesPerPolicyTest) { TEST_F(ConnectionManagerTest, AllowUpdatesOverCellularByDefaultTest) { policy::MockDevicePolicy device_policy; // Set an empty device policy. - fake_system_state_.set_device_policy(&device_policy); + FakeSystemState::Get()->set_device_policy(&device_policy); EXPECT_TRUE(cmut_.IsUpdateAllowedOver(ConnectionType::kCellular, ConnectionTethering::kUnknown)); @@ -285,7 +285,7 @@ TEST_F(ConnectionManagerTest, AllowUpdatesOverCellularByDefaultTest) { TEST_F(ConnectionManagerTest, AllowUpdatesOverTetheredNetworkByDefaultTest) { policy::MockDevicePolicy device_policy; // Set an empty device policy. - fake_system_state_.set_device_policy(&device_policy); + FakeSystemState::Get()->set_device_policy(&device_policy); EXPECT_TRUE(cmut_.IsUpdateAllowedOver(ConnectionType::kWifi, ConnectionTethering::kConfirmed)); @@ -298,7 +298,7 @@ TEST_F(ConnectionManagerTest, AllowUpdatesOverTetheredNetworkByDefaultTest) { TEST_F(ConnectionManagerTest, BlockUpdatesOver3GPerPolicyTest) { policy::MockDevicePolicy block_3g_policy; - fake_system_state_.set_device_policy(&block_3g_policy); + FakeSystemState::Get()->set_device_policy(&block_3g_policy); // Test that updates for 3G are blocked while updates are allowed // over several other types. @@ -317,7 +317,7 @@ TEST_F(ConnectionManagerTest, BlockUpdatesOver3GPerPolicyTest) { TEST_F(ConnectionManagerTest, AllowUpdatesOver3GIfPolicyIsNotSet) { policy::MockDevicePolicy device_policy; - fake_system_state_.set_device_policy(&device_policy); + FakeSystemState::Get()->set_device_policy(&device_policy); // Return false for GetAllowedConnectionTypesForUpdate and see // that updates are allowed as device policy is not set. Further @@ -331,7 +331,7 @@ TEST_F(ConnectionManagerTest, AllowUpdatesOver3GIfPolicyIsNotSet) { } TEST_F(ConnectionManagerTest, AllowUpdatesOverCellularIfPolicyFailsToBeLoaded) { - fake_system_state_.set_device_policy(nullptr); + FakeSystemState::Get()->set_device_policy(nullptr); EXPECT_TRUE(cmut_.IsUpdateAllowedOver(ConnectionType::kCellular, ConnectionTethering::kUnknown)); diff --git a/cros/daemon_chromeos.cc b/cros/daemon_chromeos.cc index 1e0e6d61..5fa24ea3 100644 --- a/cros/daemon_chromeos.cc +++ b/cros/daemon_chromeos.cc @@ -41,18 +41,15 @@ int DaemonChromeOS::OnInit() { if (exit_code != EX_OK) return exit_code; - // Initialize update engine global state but continue if something fails. - // TODO(deymo): Move the daemon_state_ initialization to a factory method - // avoiding the explicit re-usage of the |bus| instance, shared between - // D-Bus service and D-Bus client calls. - RealSystemState* real_system_state = new RealSystemState(); - LOG_IF(ERROR, !real_system_state->Initialize()) - << "Failed to initialize system state."; - system_state_.reset(real_system_state); + // Initialize update engine global state. + // TODO(deymo): Move the initialization to a factory method avoiding the + // explicit re-usage of the |bus| instance, shared between D-Bus service and + // D-Bus client calls. + RealSystemState::CreateInstance(); // Create the DBus service. - dbus_adaptor_.reset(new UpdateEngineAdaptor(real_system_state)); - system_state_->update_attempter()->AddObserver(dbus_adaptor_.get()); + dbus_adaptor_.reset(new UpdateEngineAdaptor()); + SystemState::Get()->update_attempter()->AddObserver(dbus_adaptor_.get()); dbus_adaptor_->RegisterAsync( base::Bind(&DaemonChromeOS::OnDBusRegistered, base::Unretained(this))); @@ -76,7 +73,7 @@ void DaemonChromeOS::OnDBusRegistered(bool succeeded) { QuitWithExitCode(1); return; } - system_state_->update_attempter()->StartUpdater(); + SystemState::Get()->update_attempter()->StartUpdater(); } } // namespace chromeos_update_engine diff --git a/cros/daemon_chromeos.h b/cros/daemon_chromeos.h index 3b9c8dea..ab9d4b2e 100644 --- a/cros/daemon_chromeos.h +++ b/cros/daemon_chromeos.h @@ -47,9 +47,6 @@ class DaemonChromeOS : public DaemonBase { // the main() function. Subprocess subprocess_; - // The global context sysetm state. - std::unique_ptr system_state_; - DISALLOW_COPY_AND_ASSIGN(DaemonChromeOS); }; diff --git a/cros/dbus_service.cc b/cros/dbus_service.cc index d115195f..1eb7b3c0 100644 --- a/cros/dbus_service.cc +++ b/cros/dbus_service.cc @@ -52,8 +52,8 @@ void ConvertToStatusResult(const UpdateEngineStatus& ue_status, } } // namespace -DBusUpdateEngineService::DBusUpdateEngineService(SystemState* system_state) - : common_(new UpdateEngineService{system_state}) {} +DBusUpdateEngineService::DBusUpdateEngineService() + : common_(new UpdateEngineService()) {} // org::chromium::UpdateEngineInterfaceInterface methods implementation. @@ -192,10 +192,10 @@ bool DBusUpdateEngineService::GetLastAttemptError( return common_->GetLastAttemptError(error, out_last_attempt_error); } -UpdateEngineAdaptor::UpdateEngineAdaptor(SystemState* system_state) +UpdateEngineAdaptor::UpdateEngineAdaptor() : org::chromium::UpdateEngineInterfaceAdaptor(&dbus_service_), bus_(DBusConnection::Get()->GetDBus()), - dbus_service_(system_state), + dbus_service_(), dbus_object_(nullptr, bus_, dbus::ObjectPath(update_engine::kUpdateEngineServicePath)) {} diff --git a/cros/dbus_service.h b/cros/dbus_service.h index 9e4457fb..3ad6589e 100644 --- a/cros/dbus_service.h +++ b/cros/dbus_service.h @@ -38,7 +38,7 @@ namespace chromeos_update_engine { class DBusUpdateEngineService : public org::chromium::UpdateEngineInterfaceInterface { public: - explicit DBusUpdateEngineService(SystemState* system_state); + DBusUpdateEngineService(); virtual ~DBusUpdateEngineService() = default; // Implementation of org::chromium::UpdateEngineInterfaceInterface. @@ -165,7 +165,7 @@ class DBusUpdateEngineService class UpdateEngineAdaptor : public org::chromium::UpdateEngineInterfaceAdaptor, public ServiceObserverInterface { public: - explicit UpdateEngineAdaptor(SystemState* system_state); + UpdateEngineAdaptor(); ~UpdateEngineAdaptor() = default; // Register the DBus object with the update engine service asynchronously. diff --git a/cros/fake_system_state.cc b/cros/fake_system_state.cc index 9dfdc5ba..81fa957b 100644 --- a/cros/fake_system_state.cc +++ b/cros/fake_system_state.cc @@ -21,8 +21,8 @@ namespace chromeos_update_engine { // Mock the SystemStateInterface so that we could lie that // OOBE is completed even when there's no such marker file, etc. FakeSystemState::FakeSystemState() - : mock_update_attempter_(this, nullptr), - mock_request_params_(this), + : mock_update_attempter_(nullptr), + mock_request_params_(), fake_update_manager_(&fake_clock_), clock_(&fake_clock_), connection_manager_(&mock_connection_manager_), @@ -37,7 +37,7 @@ FakeSystemState::FakeSystemState() update_manager_(&fake_update_manager_), device_policy_(nullptr), fake_system_rebooted_(false) { - mock_payload_state_.Initialize(this); + mock_payload_state_.Initialize(); } } // namespace chromeos_update_engine diff --git a/cros/fake_system_state.h b/cros/fake_system_state.h index 2f92b7c5..b1d5952d 100644 --- a/cros/fake_system_state.h +++ b/cros/fake_system_state.h @@ -42,7 +42,11 @@ namespace chromeos_update_engine { // OOBE is completed even when there's no such marker file, etc. class FakeSystemState : public SystemState { public: - FakeSystemState(); + static void CreateInstance() { g_instance_.reset(new FakeSystemState()); } + + static FakeSystemState* Get() { + return reinterpret_cast(g_instance_.get()); + } // Base class overrides. All getters return the current implementation of // various members, either the default (fake/mock) or the one set to override @@ -237,6 +241,9 @@ class FakeSystemState : public SystemState { } private: + // Don't allow for direct initialization of this class. + FakeSystemState(); + // Default mock/fake implementations (owned). FakeBootControl fake_boot_control_; FakeClock fake_clock_; diff --git a/cros/image_properties.h b/cros/image_properties.h index 4957d12d..12975473 100644 --- a/cros/image_properties.h +++ b/cros/image_properties.h @@ -25,8 +25,6 @@ namespace chromeos_update_engine { -class SystemState; - // The read-only system properties of the running image. struct ImageProperties { // The product id of the image used for all channels, except canary. @@ -77,16 +75,15 @@ struct MutableImageProperties { // Loads all the image properties from the running system. In case of error // loading any of these properties from the read-only system image a default // value may be returned instead. -ImageProperties LoadImageProperties(SystemState* system_state); +ImageProperties LoadImageProperties(); // Loads the mutable image properties from the stateful partition if found or // the system image otherwise. -MutableImageProperties LoadMutableImageProperties(SystemState* system_state); +MutableImageProperties LoadMutableImageProperties(); // Stores the mutable image properties in the stateful partition. Returns // whether the operation succeeded. -bool StoreMutableImageProperties(SystemState* system_state, - const MutableImageProperties& properties); +bool StoreMutableImageProperties(const MutableImageProperties& properties); // Logs the image properties. void LogImageProperties(); diff --git a/cros/image_properties_chromeos.cc b/cros/image_properties_chromeos.cc index c22da7cd..79155b5d 100644 --- a/cros/image_properties_chromeos.cc +++ b/cros/image_properties_chromeos.cc @@ -86,7 +86,7 @@ void SetImagePropertiesRootPrefix(const char* test_root_prefix) { } } // namespace test -ImageProperties LoadImageProperties(SystemState* system_state) { +ImageProperties LoadImageProperties() { ImageProperties result; brillo::KeyValueStore lsb_release; @@ -97,7 +97,7 @@ ImageProperties LoadImageProperties(SystemState* system_state) { // In dev-mode and unofficial build we can override the image properties set // in the system image with the ones from the stateful partition, except the // channel of the current image. - HardwareInterface* const hardware = system_state->hardware(); + HardwareInterface* const hardware = SystemState::Get()->hardware(); if (!hardware->IsOfficialBuild() || !hardware->IsNormalBootMode()) LoadLsbRelease(LsbReleaseSource::kStateful, &lsb_release); @@ -124,7 +124,7 @@ ImageProperties LoadImageProperties(SystemState* system_state) { return result; } -MutableImageProperties LoadMutableImageProperties(SystemState* system_state) { +MutableImageProperties LoadMutableImageProperties() { MutableImageProperties result; brillo::KeyValueStore lsb_release; LoadLsbRelease(LsbReleaseSource::kSystem, &lsb_release); @@ -137,8 +137,7 @@ MutableImageProperties LoadMutableImageProperties(SystemState* system_state) { return result; } -bool StoreMutableImageProperties(SystemState* system_state, - const MutableImageProperties& properties) { +bool StoreMutableImageProperties(const MutableImageProperties& properties) { brillo::KeyValueStore lsb_release; LoadLsbRelease(LsbReleaseSource::kStateful, &lsb_release); lsb_release.SetString(kLsbReleaseUpdateChannelKey, properties.target_channel); diff --git a/cros/image_properties_chromeos_unittest.cc b/cros/image_properties_chromeos_unittest.cc index 4822995e..497554e2 100644 --- a/cros/image_properties_chromeos_unittest.cc +++ b/cros/image_properties_chromeos_unittest.cc @@ -40,16 +40,15 @@ class ImagePropertiesTest : public ::testing::Test { EXPECT_TRUE(base::CreateDirectory(base::FilePath( tempdir_.GetPath().value() + kStatefulPartition + "/etc"))); test::SetImagePropertiesRootPrefix(tempdir_.GetPath().value().c_str()); + FakeSystemState::CreateInstance(); SetLockDown(false); } void SetLockDown(bool locked_down) { - fake_system_state_.fake_hardware()->SetIsOfficialBuild(locked_down); - fake_system_state_.fake_hardware()->SetIsNormalBootMode(locked_down); + FakeSystemState::Get()->fake_hardware()->SetIsOfficialBuild(locked_down); + FakeSystemState::Get()->fake_hardware()->SetIsNormalBootMode(locked_down); } - FakeSystemState fake_system_state_; - base::ScopedTempDir tempdir_; }; @@ -61,7 +60,7 @@ TEST_F(ImagePropertiesTest, SimpleTest) { "CHROMEOS_RELEASE_VERSION=0.2.2.3\n" "CHROMEOS_RELEASE_TRACK=dev-channel\n" "CHROMEOS_AUSERVER=http://www.google.com")); - ImageProperties props = LoadImageProperties(&fake_system_state_); + ImageProperties props = LoadImageProperties(); EXPECT_EQ("arm-generic", props.board); EXPECT_EQ("{87efface-864d-49a5-9bb3-4b050a7c227a}", props.product_id); EXPECT_EQ("0.2.2.3", props.version); @@ -73,7 +72,7 @@ TEST_F(ImagePropertiesTest, AppIDTest) { ASSERT_TRUE(WriteFileString( tempdir_.GetPath().Append("etc/lsb-release").value(), "CHROMEOS_RELEASE_APPID={58c35cef-9d30-476e-9098-ce20377d535d}")); - ImageProperties props = LoadImageProperties(&fake_system_state_); + ImageProperties props = LoadImageProperties(); EXPECT_EQ("{58c35cef-9d30-476e-9098-ce20377d535d}", props.product_id); } @@ -82,12 +81,12 @@ TEST_F(ImagePropertiesTest, ConfusingReleaseTest) { WriteFileString(tempdir_.GetPath().Append("etc/lsb-release").value(), "CHROMEOS_RELEASE_FOO=CHROMEOS_RELEASE_VERSION=1.2.3.4\n" "CHROMEOS_RELEASE_VERSION=0.2.2.3")); - ImageProperties props = LoadImageProperties(&fake_system_state_); + ImageProperties props = LoadImageProperties(); EXPECT_EQ("0.2.2.3", props.version); } TEST_F(ImagePropertiesTest, MissingVersionTest) { - ImageProperties props = LoadImageProperties(&fake_system_state_); + ImageProperties props = LoadImageProperties(); EXPECT_EQ("", props.version); } @@ -103,12 +102,11 @@ TEST_F(ImagePropertiesTest, OverrideTest) { "CHROMEOS_RELEASE_BOARD=x86-generic\n" "CHROMEOS_RELEASE_TRACK=beta-channel\n" "CHROMEOS_AUSERVER=https://www.google.com")); - ImageProperties props = LoadImageProperties(&fake_system_state_); + ImageProperties props = LoadImageProperties(); EXPECT_EQ("x86-generic", props.board); EXPECT_EQ("dev-channel", props.current_channel); EXPECT_EQ("https://www.google.com", props.omaha_url); - MutableImageProperties mutable_props = - LoadMutableImageProperties(&fake_system_state_); + MutableImageProperties mutable_props = LoadMutableImageProperties(); EXPECT_EQ("beta-channel", mutable_props.target_channel); } @@ -125,12 +123,11 @@ TEST_F(ImagePropertiesTest, OverrideLockDownTest) { "CHROMEOS_RELEASE_TRACK=stable-channel\n" "CHROMEOS_AUSERVER=http://www.google.com")); SetLockDown(true); - ImageProperties props = LoadImageProperties(&fake_system_state_); + ImageProperties props = LoadImageProperties(); EXPECT_EQ("arm-generic", props.board); EXPECT_EQ("dev-channel", props.current_channel); EXPECT_EQ("https://www.google.com", props.omaha_url); - MutableImageProperties mutable_props = - LoadMutableImageProperties(&fake_system_state_); + MutableImageProperties mutable_props = LoadMutableImageProperties(); EXPECT_EQ("stable-channel", mutable_props.target_channel); } @@ -141,7 +138,7 @@ TEST_F(ImagePropertiesTest, BoardAppIdUsedForNonCanaryChannelTest) { "CHROMEOS_BOARD_APPID=b\n" "CHROMEOS_CANARY_APPID=c\n" "CHROMEOS_RELEASE_TRACK=stable-channel\n")); - ImageProperties props = LoadImageProperties(&fake_system_state_); + ImageProperties props = LoadImageProperties(); EXPECT_EQ("stable-channel", props.current_channel); EXPECT_EQ("b", props.product_id); } @@ -153,7 +150,7 @@ TEST_F(ImagePropertiesTest, CanaryAppIdUsedForCanaryChannelTest) { "CHROMEOS_BOARD_APPID=b\n" "CHROMEOS_CANARY_APPID=c\n" "CHROMEOS_RELEASE_TRACK=canary-channel\n")); - ImageProperties props = LoadImageProperties(&fake_system_state_); + ImageProperties props = LoadImageProperties(); EXPECT_EQ("canary-channel", props.current_channel); EXPECT_EQ("c", props.canary_product_id); } @@ -164,7 +161,7 @@ TEST_F(ImagePropertiesTest, ReleaseAppIdUsedAsDefaultTest) { "CHROMEOS_RELEASE_APPID=r\n" "CHROMEOS_CANARY_APPID=c\n" "CHROMEOS_RELEASE_TRACK=stable-channel\n")); - ImageProperties props = LoadImageProperties(&fake_system_state_); + ImageProperties props = LoadImageProperties(); EXPECT_EQ("stable-channel", props.current_channel); EXPECT_EQ("r", props.product_id); } diff --git a/cros/metrics_reporter_omaha.cc b/cros/metrics_reporter_omaha.cc index 2cc0de5f..65093a1e 100644 --- a/cros/metrics_reporter_omaha.cc +++ b/cros/metrics_reporter_omaha.cc @@ -155,7 +155,6 @@ void MetricsReporterOmaha::ReportDailyMetrics(base::TimeDelta os_age) { } void MetricsReporterOmaha::ReportUpdateCheckMetrics( - SystemState* system_state, metrics::CheckResult result, metrics::CheckReaction reaction, metrics::DownloadErrorCode download_error_code) { @@ -182,8 +181,7 @@ void MetricsReporterOmaha::ReportUpdateCheckMetrics( } base::TimeDelta time_since_last; - if (WallclockDurationHelper(system_state, - kPrefsMetricsCheckLastReportingTime, + if (WallclockDurationHelper(kPrefsMetricsCheckLastReportingTime, &time_since_last)) { metric = metrics::kMetricCheckTimeSinceLastCheckMinutes; metrics_lib_->SendToUMA(metric, @@ -195,8 +193,7 @@ void MetricsReporterOmaha::ReportUpdateCheckMetrics( base::TimeDelta uptime_since_last; static int64_t uptime_since_last_storage = 0; - if (MonotonicDurationHelper( - system_state, &uptime_since_last_storage, &uptime_since_last)) { + if (MonotonicDurationHelper(&uptime_since_last_storage, &uptime_since_last)) { metric = metrics::kMetricCheckTimeSinceLastCheckUptimeMinutes; metrics_lib_->SendToUMA(metric, uptime_since_last.InMinutes(), @@ -206,14 +203,14 @@ void MetricsReporterOmaha::ReportUpdateCheckMetrics( } // First section of target version specified for the update. - if (system_state && system_state->request_params()) { + if (SystemState::Get()->request_params()) { string target_version = - system_state->request_params()->target_version_prefix(); + SystemState::Get()->request_params()->target_version_prefix(); value = utils::VersionPrefix(target_version); if (value != 0) { metric = metrics::kMetricCheckTargetVersion; metrics_lib_->SendSparseToUMA(metric, value); - if (system_state->request_params()->rollback_allowed()) { + if (SystemState::Get()->request_params()->rollback_allowed()) { metric = metrics::kMetricCheckRollbackTargetVersion; metrics_lib_->SendSparseToUMA(metric, value); } @@ -233,7 +230,6 @@ void MetricsReporterOmaha::ReportAbnormallyTerminatedUpdateAttemptMetrics() { } void MetricsReporterOmaha::ReportUpdateAttemptMetrics( - SystemState* system_state, int attempt_number, PayloadType payload_type, base::TimeDelta duration, @@ -284,8 +280,7 @@ void MetricsReporterOmaha::ReportUpdateAttemptMetrics( } base::TimeDelta time_since_last; - if (WallclockDurationHelper(system_state, - kPrefsMetricsAttemptLastReportingTime, + if (WallclockDurationHelper(kPrefsMetricsAttemptLastReportingTime, &time_since_last)) { metric = metrics::kMetricAttemptTimeSinceLastAttemptMinutes; metrics_lib_->SendToUMA(metric, @@ -297,8 +292,7 @@ void MetricsReporterOmaha::ReportUpdateAttemptMetrics( static int64_t uptime_since_last_storage = 0; base::TimeDelta uptime_since_last; - if (MonotonicDurationHelper( - system_state, &uptime_since_last_storage, &uptime_since_last)) { + if (MonotonicDurationHelper(&uptime_since_last_storage, &uptime_since_last)) { metric = metrics::kMetricAttemptTimeSinceLastAttemptUptimeMinutes; metrics_lib_->SendToUMA(metric, uptime_since_last.InMinutes(), @@ -557,13 +551,13 @@ void MetricsReporterOmaha::ReportEnterpriseUpdateSeenToDownloadDays( } bool MetricsReporterOmaha::WallclockDurationHelper( - SystemState* system_state, const std::string& state_variable_key, TimeDelta* out_duration) { bool ret = false; - Time now = system_state->clock()->GetWallclockTime(); + Time now = SystemState::Get()->clock()->GetWallclockTime(); int64_t stored_value; - if (system_state->prefs()->GetInt64(state_variable_key, &stored_value)) { + if (SystemState::Get()->prefs()->GetInt64(state_variable_key, + &stored_value)) { Time stored_time = Time::FromInternalValue(stored_value); if (stored_time > now) { LOG(ERROR) << "Stored time-stamp used for " << state_variable_key @@ -574,19 +568,18 @@ bool MetricsReporterOmaha::WallclockDurationHelper( } } - if (!system_state->prefs()->SetInt64(state_variable_key, - now.ToInternalValue())) { + if (!SystemState::Get()->prefs()->SetInt64(state_variable_key, + now.ToInternalValue())) { LOG(ERROR) << "Error storing time-stamp in " << state_variable_key; } return ret; } -bool MetricsReporterOmaha::MonotonicDurationHelper(SystemState* system_state, - int64_t* storage, +bool MetricsReporterOmaha::MonotonicDurationHelper(int64_t* storage, TimeDelta* out_duration) { bool ret = false; - Time now = system_state->clock()->GetMonotonicTime(); + Time now = SystemState::Get()->clock()->GetMonotonicTime(); if (*storage != 0) { Time stored_time = Time::FromInternalValue(*storage); *out_duration = now - stored_time; diff --git a/cros/metrics_reporter_omaha.h b/cros/metrics_reporter_omaha.h index 5b3fdb16..b6ffccee 100644 --- a/cros/metrics_reporter_omaha.h +++ b/cros/metrics_reporter_omaha.h @@ -29,7 +29,6 @@ #include "update_engine/common/error_code.h" #include "update_engine/common/metrics_constants.h" #include "update_engine/common/metrics_reporter_interface.h" -#include "update_engine/common/system_state.h" namespace chromeos_update_engine { @@ -117,13 +116,11 @@ class MetricsReporterOmaha : public MetricsReporterInterface { void ReportDailyMetrics(base::TimeDelta os_age) override; void ReportUpdateCheckMetrics( - SystemState* system_state, metrics::CheckResult result, metrics::CheckReaction reaction, metrics::DownloadErrorCode download_error_code) override; - void ReportUpdateAttemptMetrics(SystemState* system_state, - int attempt_number, + void ReportUpdateAttemptMetrics(int attempt_number, PayloadType payload_type, base::TimeDelta duration, base::TimeDelta duration_uptime, @@ -181,8 +178,7 @@ class MetricsReporterOmaha : public MetricsReporterInterface { // If the function returns |true|, the duration (always non-negative) // is returned in |out_duration|. If the function returns |false| // something went wrong or there was no previous measurement. - bool WallclockDurationHelper(SystemState* system_state, - const std::string& state_variable_key, + bool WallclockDurationHelper(const std::string& state_variable_key, base::TimeDelta* out_duration); // This function returns the duration on the monotonic clock since the @@ -194,9 +190,7 @@ class MetricsReporterOmaha : public MetricsReporterInterface { // If the function returns |true|, the duration (always non-negative) // is returned in |out_duration|. If the function returns |false| // something went wrong or there was no previous measurement. - bool MonotonicDurationHelper(SystemState* system_state, - int64_t* storage, - base::TimeDelta* out_duration); + bool MonotonicDurationHelper(int64_t* storage, base::TimeDelta* out_duration); std::unique_ptr metrics_lib_; diff --git a/cros/metrics_reporter_omaha_unittest.cc b/cros/metrics_reporter_omaha_unittest.cc index a25472a3..b1611375 100644 --- a/cros/metrics_reporter_omaha_unittest.cc +++ b/cros/metrics_reporter_omaha_unittest.cc @@ -40,6 +40,7 @@ class MetricsReporterOmahaTest : public ::testing::Test { // Reset the metrics_lib_ to a mock library. void SetUp() override { + FakeSystemState::CreateInstance(); mock_metrics_lib_ = new testing::NiceMock(); reporter_.metrics_lib_.reset(mock_metrics_lib_); } @@ -58,13 +59,12 @@ TEST_F(MetricsReporterOmahaTest, ReportDailyMetrics) { } TEST_F(MetricsReporterOmahaTest, ReportUpdateCheckMetrics) { - FakeSystemState fake_system_state; FakeClock fake_clock; FakePrefs fake_prefs; // We need to execute the report twice to test the time since last report. - fake_system_state.set_clock(&fake_clock); - fake_system_state.set_prefs(&fake_prefs); + FakeSystemState::Get()->set_clock(&fake_clock); + FakeSystemState::Get()->set_prefs(&fake_prefs); fake_clock.SetWallclockTime(base::Time::FromInternalValue(1000000)); fake_clock.SetMonotonicTime(base::Time::FromInternalValue(1000000)); @@ -104,24 +104,20 @@ TEST_F(MetricsReporterOmahaTest, ReportUpdateCheckMetrics) { metrics::kMetricCheckTimeSinceLastCheckUptimeMinutes, 1, _, _, _)) .Times(1); - reporter_.ReportUpdateCheckMetrics( - &fake_system_state, result, reaction, error_code); + reporter_.ReportUpdateCheckMetrics(result, reaction, error_code); // Advance the clock by 1 minute and report the same metrics again. fake_clock.SetWallclockTime(base::Time::FromInternalValue(61000000)); fake_clock.SetMonotonicTime(base::Time::FromInternalValue(61000000)); // Allow rollback - reporter_.ReportUpdateCheckMetrics( - &fake_system_state, result, reaction, error_code); + reporter_.ReportUpdateCheckMetrics(result, reaction, error_code); } TEST_F(MetricsReporterOmahaTest, ReportUpdateCheckMetricsPinned) { - FakeSystemState fake_system_state; - - OmahaRequestParams params(&fake_system_state); + OmahaRequestParams params; params.set_target_version_prefix("10575."); params.set_rollback_allowed(false); - fake_system_state.set_request_params(¶ms); + FakeSystemState::Get()->set_request_params(¶ms); metrics::CheckResult result = metrics::CheckResult::kUpdateAvailable; metrics::CheckReaction reaction = metrics::CheckReaction::kIgnored; @@ -138,17 +134,14 @@ TEST_F(MetricsReporterOmahaTest, ReportUpdateCheckMetricsPinned) { SendSparseToUMA(metrics::kMetricCheckRollbackTargetVersion, _)) .Times(0); - reporter_.ReportUpdateCheckMetrics( - &fake_system_state, result, reaction, error_code); + reporter_.ReportUpdateCheckMetrics(result, reaction, error_code); } TEST_F(MetricsReporterOmahaTest, ReportUpdateCheckMetricsRollback) { - FakeSystemState fake_system_state; - - OmahaRequestParams params(&fake_system_state); + OmahaRequestParams params; params.set_target_version_prefix("10575."); params.set_rollback_allowed(true); - fake_system_state.set_request_params(¶ms); + FakeSystemState::Get()->set_request_params(¶ms); metrics::CheckResult result = metrics::CheckResult::kUpdateAvailable; metrics::CheckReaction reaction = metrics::CheckReaction::kIgnored; @@ -166,8 +159,7 @@ TEST_F(MetricsReporterOmahaTest, ReportUpdateCheckMetricsRollback) { SendSparseToUMA(metrics::kMetricCheckRollbackTargetVersion, 10575)) .Times(1); - reporter_.ReportUpdateCheckMetrics( - &fake_system_state, result, reaction, error_code); + reporter_.ReportUpdateCheckMetrics(result, reaction, error_code); } TEST_F(MetricsReporterOmahaTest, @@ -183,12 +175,10 @@ TEST_F(MetricsReporterOmahaTest, } TEST_F(MetricsReporterOmahaTest, ReportUpdateAttemptMetrics) { - FakeSystemState fake_system_state; FakeClock fake_clock; FakePrefs fake_prefs; - - fake_system_state.set_clock(&fake_clock); - fake_system_state.set_prefs(&fake_prefs); + FakeSystemState::Get()->set_clock(&fake_clock); + FakeSystemState::Get()->set_prefs(&fake_prefs); fake_clock.SetWallclockTime(base::Time::FromInternalValue(1000000)); fake_clock.SetMonotonicTime(base::Time::FromInternalValue(1000000)); @@ -252,8 +242,7 @@ TEST_F(MetricsReporterOmahaTest, ReportUpdateAttemptMetrics) { metrics::kMetricAttemptTimeSinceLastAttemptUptimeMinutes, 1, _, _, _)) .Times(1); - reporter_.ReportUpdateAttemptMetrics(&fake_system_state, - attempt_number, + reporter_.ReportUpdateAttemptMetrics(attempt_number, payload_type, duration, duration_uptime, @@ -264,8 +253,7 @@ TEST_F(MetricsReporterOmahaTest, ReportUpdateAttemptMetrics) { // Advance the clock by 1 minute and report the same metrics again. fake_clock.SetWallclockTime(base::Time::FromInternalValue(61000000)); fake_clock.SetMonotonicTime(base::Time::FromInternalValue(61000000)); - reporter_.ReportUpdateAttemptMetrics(&fake_system_state, - attempt_number, + reporter_.ReportUpdateAttemptMetrics(attempt_number, payload_type, duration, duration_uptime, @@ -539,113 +527,95 @@ TEST_F(MetricsReporterOmahaTest, } TEST_F(MetricsReporterOmahaTest, WallclockDurationHelper) { - FakeSystemState fake_system_state; FakeClock fake_clock; base::TimeDelta duration; const std::string state_variable_key = "test-prefs"; FakePrefs fake_prefs; - fake_system_state.set_clock(&fake_clock); - fake_system_state.set_prefs(&fake_prefs); + FakeSystemState::Get()->set_clock(&fake_clock); + FakeSystemState::Get()->set_prefs(&fake_prefs); // Initialize wallclock to 1 sec. fake_clock.SetWallclockTime(base::Time::FromInternalValue(1000000)); // First time called so no previous measurement available. - EXPECT_FALSE(reporter_.WallclockDurationHelper( - &fake_system_state, state_variable_key, &duration)); + EXPECT_FALSE( + reporter_.WallclockDurationHelper(state_variable_key, &duration)); // Next time, we should get zero since the clock didn't advance. - EXPECT_TRUE(reporter_.WallclockDurationHelper( - &fake_system_state, state_variable_key, &duration)); + EXPECT_TRUE(reporter_.WallclockDurationHelper(state_variable_key, &duration)); EXPECT_EQ(duration.InSeconds(), 0); // We can also call it as many times as we want with it being // considered a failure. - EXPECT_TRUE(reporter_.WallclockDurationHelper( - &fake_system_state, state_variable_key, &duration)); + EXPECT_TRUE(reporter_.WallclockDurationHelper(state_variable_key, &duration)); EXPECT_EQ(duration.InSeconds(), 0); - EXPECT_TRUE(reporter_.WallclockDurationHelper( - &fake_system_state, state_variable_key, &duration)); + EXPECT_TRUE(reporter_.WallclockDurationHelper(state_variable_key, &duration)); EXPECT_EQ(duration.InSeconds(), 0); // Advance the clock one second, then we should get 1 sec on the // next call and 0 sec on the subsequent call. fake_clock.SetWallclockTime(base::Time::FromInternalValue(2000000)); - EXPECT_TRUE(reporter_.WallclockDurationHelper( - &fake_system_state, state_variable_key, &duration)); + EXPECT_TRUE(reporter_.WallclockDurationHelper(state_variable_key, &duration)); EXPECT_EQ(duration.InSeconds(), 1); - EXPECT_TRUE(reporter_.WallclockDurationHelper( - &fake_system_state, state_variable_key, &duration)); + EXPECT_TRUE(reporter_.WallclockDurationHelper(state_variable_key, &duration)); EXPECT_EQ(duration.InSeconds(), 0); // Advance clock two seconds and we should get 2 sec and then 0 sec. fake_clock.SetWallclockTime(base::Time::FromInternalValue(4000000)); - EXPECT_TRUE(reporter_.WallclockDurationHelper( - &fake_system_state, state_variable_key, &duration)); + EXPECT_TRUE(reporter_.WallclockDurationHelper(state_variable_key, &duration)); EXPECT_EQ(duration.InSeconds(), 2); - EXPECT_TRUE(reporter_.WallclockDurationHelper( - &fake_system_state, state_variable_key, &duration)); + EXPECT_TRUE(reporter_.WallclockDurationHelper(state_variable_key, &duration)); EXPECT_EQ(duration.InSeconds(), 0); // There's a possibility that the wallclock can go backwards (NTP // adjustments, for example) so check that we properly handle this // case. fake_clock.SetWallclockTime(base::Time::FromInternalValue(3000000)); - EXPECT_FALSE(reporter_.WallclockDurationHelper( - &fake_system_state, state_variable_key, &duration)); + EXPECT_FALSE( + reporter_.WallclockDurationHelper(state_variable_key, &duration)); fake_clock.SetWallclockTime(base::Time::FromInternalValue(4000000)); - EXPECT_TRUE(reporter_.WallclockDurationHelper( - &fake_system_state, state_variable_key, &duration)); + EXPECT_TRUE(reporter_.WallclockDurationHelper(state_variable_key, &duration)); EXPECT_EQ(duration.InSeconds(), 1); } TEST_F(MetricsReporterOmahaTest, MonotonicDurationHelper) { int64_t storage = 0; - FakeSystemState fake_system_state; FakeClock fake_clock; base::TimeDelta duration; - fake_system_state.set_clock(&fake_clock); + FakeSystemState::Get()->set_clock(&fake_clock); // Initialize monotonic clock to 1 sec. fake_clock.SetMonotonicTime(base::Time::FromInternalValue(1000000)); // First time called so no previous measurement available. - EXPECT_FALSE(reporter_.MonotonicDurationHelper( - &fake_system_state, &storage, &duration)); + EXPECT_FALSE(reporter_.MonotonicDurationHelper(&storage, &duration)); // Next time, we should get zero since the clock didn't advance. - EXPECT_TRUE(reporter_.MonotonicDurationHelper( - &fake_system_state, &storage, &duration)); + EXPECT_TRUE(reporter_.MonotonicDurationHelper(&storage, &duration)); EXPECT_EQ(duration.InSeconds(), 0); // We can also call it as many times as we want with it being // considered a failure. - EXPECT_TRUE(reporter_.MonotonicDurationHelper( - &fake_system_state, &storage, &duration)); + EXPECT_TRUE(reporter_.MonotonicDurationHelper(&storage, &duration)); EXPECT_EQ(duration.InSeconds(), 0); - EXPECT_TRUE(reporter_.MonotonicDurationHelper( - &fake_system_state, &storage, &duration)); + EXPECT_TRUE(reporter_.MonotonicDurationHelper(&storage, &duration)); EXPECT_EQ(duration.InSeconds(), 0); // Advance the clock one second, then we should get 1 sec on the // next call and 0 sec on the subsequent call. fake_clock.SetMonotonicTime(base::Time::FromInternalValue(2000000)); - EXPECT_TRUE(reporter_.MonotonicDurationHelper( - &fake_system_state, &storage, &duration)); + EXPECT_TRUE(reporter_.MonotonicDurationHelper(&storage, &duration)); EXPECT_EQ(duration.InSeconds(), 1); - EXPECT_TRUE(reporter_.MonotonicDurationHelper( - &fake_system_state, &storage, &duration)); + EXPECT_TRUE(reporter_.MonotonicDurationHelper(&storage, &duration)); EXPECT_EQ(duration.InSeconds(), 0); // Advance clock two seconds and we should get 2 sec and then 0 sec. fake_clock.SetMonotonicTime(base::Time::FromInternalValue(4000000)); - EXPECT_TRUE(reporter_.MonotonicDurationHelper( - &fake_system_state, &storage, &duration)); + EXPECT_TRUE(reporter_.MonotonicDurationHelper(&storage, &duration)); EXPECT_EQ(duration.InSeconds(), 2); - EXPECT_TRUE(reporter_.MonotonicDurationHelper( - &fake_system_state, &storage, &duration)); + EXPECT_TRUE(reporter_.MonotonicDurationHelper(&storage, &duration)); EXPECT_EQ(duration.InSeconds(), 0); } diff --git a/cros/mock_omaha_request_params.h b/cros/mock_omaha_request_params.h index 6072d224..1e218124 100644 --- a/cros/mock_omaha_request_params.h +++ b/cros/mock_omaha_request_params.h @@ -27,8 +27,7 @@ namespace chromeos_update_engine { class MockOmahaRequestParams : public OmahaRequestParams { public: - explicit MockOmahaRequestParams(SystemState* system_state) - : OmahaRequestParams(system_state) { + MockOmahaRequestParams() : OmahaRequestParams() { // Delegate all calls to the parent instance by default. This helps the // migration from tests using the real RequestParams when they should have // use a fake or mock. diff --git a/cros/mock_payload_state.h b/cros/mock_payload_state.h index 56094e64..211b96d6 100644 --- a/cros/mock_payload_state.h +++ b/cros/mock_payload_state.h @@ -21,14 +21,13 @@ #include -#include "update_engine/common/system_state.h" #include "update_engine/cros/payload_state_interface.h" namespace chromeos_update_engine { class MockPayloadState : public PayloadStateInterface { public: - bool Initialize(SystemState* system_state) { return true; } + bool Initialize() { return true; } // Significant methods. MOCK_METHOD1(SetResponse, void(const OmahaResponse& response)); diff --git a/cros/omaha_request_action.cc b/cros/omaha_request_action.cc index cad0c674..c3a1a114 100644 --- a/cros/omaha_request_action.cc +++ b/cros/omaha_request_action.cc @@ -48,6 +48,7 @@ #include "update_engine/common/platform_constants.h" #include "update_engine/common/prefs.h" #include "update_engine/common/prefs_interface.h" +#include "update_engine/common/system_state.h" #include "update_engine/common/utils.h" #include "update_engine/cros/connection_manager_interface.h" #include "update_engine/cros/omaha_request_builder_xml.h" @@ -275,13 +276,11 @@ void ParserHandlerEntityDecl(void* user_data, } // namespace OmahaRequestAction::OmahaRequestAction( - SystemState* system_state, OmahaEvent* event, std::unique_ptr http_fetcher, bool ping_only, const string& session_id) - : system_state_(system_state), - params_(system_state->request_params()), + : params_(SystemState::Get()->request_params()), event_(event), http_fetcher_(std::move(http_fetcher)), policy_provider_(std::make_unique()), @@ -298,7 +297,8 @@ OmahaRequestAction::~OmahaRequestAction() {} int OmahaRequestAction::CalculatePingDays(const string& key) { int days = kPingNeverPinged; int64_t last_ping = 0; - if (system_state_->prefs()->GetInt64(key, &last_ping) && last_ping >= 0) { + if (SystemState::Get()->prefs()->GetInt64(key, &last_ping) && + last_ping >= 0) { days = (Time::Now() - Time::FromInternalValue(last_ping)).InDays(); if (days < 0) { // If |days| is negative, then the system clock must have jumped @@ -329,13 +329,13 @@ void OmahaRequestAction::InitPingDays() { bool OmahaRequestAction::ShouldPing() const { if (ping_active_days_ == kPingNeverPinged && ping_roll_call_days_ == kPingNeverPinged) { - int powerwash_count = system_state_->hardware()->GetPowerwashCount(); + int powerwash_count = SystemState::Get()->hardware()->GetPowerwashCount(); if (powerwash_count > 0) { LOG(INFO) << "Not sending ping with a=-1 r=-1 to omaha because " << "powerwash_count is " << powerwash_count; return false; } - if (system_state_->hardware()->GetFirstActiveOmahaPingSent()) { + if (SystemState::Get()->hardware()->GetFirstActiveOmahaPingSent()) { LOG(INFO) << "Not sending ping with a=-1 r=-1 to omaha because " << "the first_active_omaha_ping_sent is true."; return false; @@ -346,8 +346,8 @@ bool OmahaRequestAction::ShouldPing() const { } // static -int OmahaRequestAction::GetInstallDate(SystemState* system_state) { - PrefsInterface* prefs = system_state->prefs(); +int OmahaRequestAction::GetInstallDate() { + PrefsInterface* prefs = SystemState::Get()->prefs(); if (prefs == nullptr) return -1; @@ -383,8 +383,8 @@ int OmahaRequestAction::GetInstallDate(SystemState* system_state) { // inspecting the timestamp of when OOBE happened. Time time_of_oobe; - if (!system_state->hardware()->IsOOBEEnabled() || - !system_state->hardware()->IsOOBEComplete(&time_of_oobe)) { + if (!SystemState::Get()->hardware()->IsOOBEEnabled() || + !SystemState::Get()->hardware()->IsOOBEComplete(&time_of_oobe)) { LOG(INFO) << "Not generating Omaha InstallData as we have " << "no prefs file and OOBE is not complete or not enabled."; return -1; @@ -399,8 +399,8 @@ int OmahaRequestAction::GetInstallDate(SystemState* system_state) { } // Persist this to disk, for future use. - if (!OmahaRequestAction::PersistInstallDate( - system_state, num_days, kProvisionedFromOOBEMarker)) + if (!OmahaRequestAction::PersistInstallDate(num_days, + kProvisionedFromOOBEMarker)) return -1; LOG(INFO) << "Set the Omaha InstallDate from OOBE time-stamp to " << num_days @@ -422,7 +422,7 @@ void OmahaRequestAction::StorePingReply( if (!dlc_params.send_ping) continue; - PrefsInterface* prefs = system_state_->prefs(); + PrefsInterface* prefs = SystemState::Get()->prefs(); // Reset the active metadata value to |kPingInactiveValue|. auto active_key = prefs->CreateSubKey({kDlcPrefsSubDir, dlc_id, kPrefsPingActive}); @@ -462,8 +462,8 @@ void OmahaRequestAction::PerformAction() { ShouldPing(), // include_ping ping_active_days_, ping_roll_call_days_, - GetInstallDate(system_state_), - system_state_->prefs(), + GetInstallDate(), + SystemState::Get()->prefs(), session_id_); string request_post = omaha_request.GetRequest(); @@ -742,16 +742,14 @@ bool OmahaRequestAction::ParseResponse(OmahaParserData* parser_data, // element. This is the number of days since Jan 1 2007, 0:00 // PST. If we don't have a persisted value of the Omaha InstallDate, // we'll use it to calculate it and then persist it. - if (ParseInstallDate(parser_data, output_object) && - !HasInstallDate(system_state_)) { + if (ParseInstallDate(parser_data, output_object) && !HasInstallDate()) { // Since output_object->install_date_days is never negative, the // elapsed_days -> install-date calculation is reduced to simply // rounding down to the nearest number divisible by 7. int remainder = output_object->install_date_days % 7; int install_date_days_rounded = output_object->install_date_days - remainder; - if (PersistInstallDate(system_state_, - install_date_days_rounded, + if (PersistInstallDate(install_date_days_rounded, kProvisionedFromOmahaResponse)) { LOG(INFO) << "Set the Omaha InstallDate from Omaha Response to " << install_date_days_rounded << " days."; @@ -908,7 +906,8 @@ void OmahaRequestAction::TransferComplete(HttpFetcher* fetcher, string current_response(response_buffer_.begin(), response_buffer_.end()); LOG(INFO) << "Omaha request response: " << current_response; - PayloadStateInterface* const payload_state = system_state_->payload_state(); + PayloadStateInterface* const payload_state = + SystemState::Get()->payload_state(); // Set the max kernel key version based on whether rollback is allowed. SetMaxKernelKeyVersionForRollback(); @@ -924,8 +923,7 @@ void OmahaRequestAction::TransferComplete(HttpFetcher* fetcher, if (aux_error_code != ErrorCode::kSuccess) { metrics::DownloadErrorCode download_error_code = metrics_utils::GetDownloadErrorCode(aux_error_code); - system_state_->metrics_reporter()->ReportUpdateCheckMetrics( - system_state_, + SystemState::Get()->metrics_reporter()->ReportUpdateCheckMetrics( metrics::CheckResult::kUnset, metrics::CheckReaction::kUnset, download_error_code); @@ -976,7 +974,7 @@ void OmahaRequestAction::TransferComplete(HttpFetcher* fetcher, // Update the last ping day preferences based on the server daystart response // even if we didn't send a ping. Omaha always includes the daystart in the // response, but log the error if it didn't. - LOG_IF(ERROR, !UpdateLastPingDays(&parser_data, system_state_->prefs())) + LOG_IF(ERROR, !UpdateLastPingDays(&parser_data, SystemState::Get()->prefs())) << "Failed to update the last ping day preferences!"; // Sets first_active_omaha_ping_sent to true (vpd in CrOS). We only do this if @@ -985,9 +983,9 @@ void OmahaRequestAction::TransferComplete(HttpFetcher* fetcher, // need to check if a=-1 has been sent because older devices have already sent // their a=-1 in the past and we have to set first_active_omaha_ping_sent for // future checks. - if (!system_state_->hardware()->GetFirstActiveOmahaPingSent()) { - if (!system_state_->hardware()->SetFirstActiveOmahaPingSent()) { - system_state_->metrics_reporter()->ReportInternalErrorCode( + if (!SystemState::Get()->hardware()->GetFirstActiveOmahaPingSent()) { + if (!SystemState::Get()->hardware()->SetFirstActiveOmahaPingSent()) { + SystemState::Get()->metrics_reporter()->ReportInternalErrorCode( ErrorCode::kFirstActiveOmahaPingSentPersistenceError); } } @@ -1006,8 +1004,8 @@ void OmahaRequestAction::TransferComplete(HttpFetcher* fetcher, if (!ParseResponse(&parser_data, &output_object, &completer)) return; ProcessExclusions(&output_object, - system_state_->request_params(), - system_state_->update_attempter()->GetExcluder()); + SystemState::Get()->request_params(), + SystemState::Get()->update_attempter()->GetExcluder()); output_object.update_exists = true; SetOutputObject(output_object); @@ -1071,7 +1069,7 @@ void OmahaRequestAction::TransferComplete(HttpFetcher* fetcher, void OmahaRequestAction::CompleteProcessing() { ScopedActionCompleter completer(processor_, this); OmahaResponse& output_object = const_cast(GetOutputObject()); - PayloadStateInterface* payload_state = system_state_->payload_state(); + PayloadStateInterface* payload_state = SystemState::Get()->payload_state(); if (ShouldDeferDownload(&output_object)) { output_object.update_exists = false; @@ -1093,11 +1091,11 @@ void OmahaRequestAction::CompleteProcessing() { void OmahaRequestAction::OnLookupPayloadViaP2PCompleted(const string& url) { LOG(INFO) << "Lookup complete, p2p-client returned URL '" << url << "'"; if (!url.empty()) { - system_state_->payload_state()->SetP2PUrl(url); + SystemState::Get()->payload_state()->SetP2PUrl(url); } else { LOG(INFO) << "Forcibly disabling use of p2p for downloading " << "because no suitable peer could be found."; - system_state_->payload_state()->SetUsingP2PForDownloading(false); + SystemState::Get()->payload_state()->SetUsingP2PForDownloading(false); } CompleteProcessing(); } @@ -1121,18 +1119,17 @@ void OmahaRequestAction::LookupPayloadViaP2P(const OmahaResponse& response) { int64_t manifest_signature_size = 0; int64_t next_data_offset = 0; int64_t next_data_length = 0; - if (system_state_ && - system_state_->prefs()->GetInt64(kPrefsManifestMetadataSize, - &manifest_metadata_size) && + if (SystemState::Get()->prefs()->GetInt64(kPrefsManifestMetadataSize, + &manifest_metadata_size) && manifest_metadata_size != -1 && - system_state_->prefs()->GetInt64(kPrefsManifestSignatureSize, - &manifest_signature_size) && + SystemState::Get()->prefs()->GetInt64(kPrefsManifestSignatureSize, + &manifest_signature_size) && manifest_signature_size != -1 && - system_state_->prefs()->GetInt64(kPrefsUpdateStateNextDataOffset, - &next_data_offset) && + SystemState::Get()->prefs()->GetInt64(kPrefsUpdateStateNextDataOffset, + &next_data_offset) && next_data_offset != -1 && - system_state_->prefs()->GetInt64(kPrefsUpdateStateNextDataLength, - &next_data_length)) { + SystemState::Get()->prefs()->GetInt64(kPrefsUpdateStateNextDataLength, + &next_data_length)) { minimum_size = manifest_metadata_size + manifest_signature_size + next_data_offset + next_data_length; } @@ -1143,10 +1140,10 @@ void OmahaRequestAction::LookupPayloadViaP2P(const OmahaResponse& response) { return; string file_id = utils::CalculateP2PFileId(raw_hash, response.packages[0].size); - if (system_state_->p2p_manager()) { + if (SystemState::Get()->p2p_manager()) { LOG(INFO) << "Checking if payload is available via p2p, file_id=" << file_id << " minimum_size=" << minimum_size; - system_state_->p2p_manager()->LookupUrlForFile( + SystemState::Get()->p2p_manager()->LookupUrlForFile( file_id, minimum_size, TimeDelta::FromSeconds(kMaxP2PNetworkWaitTimeSeconds), @@ -1165,7 +1162,8 @@ bool OmahaRequestAction::ShouldDeferDownload(OmahaResponse* output_object) { // defer the download. This is because the download will always // happen from a peer on the LAN and we've been waiting in line for // our turn. - const PayloadStateInterface* payload_state = system_state_->payload_state(); + const PayloadStateInterface* payload_state = + SystemState::Get()->payload_state(); if (payload_state->GetUsingP2PForDownloading() && !payload_state->GetP2PUrl().empty()) { LOG(INFO) << "Download not deferred because download " @@ -1222,13 +1220,13 @@ OmahaRequestAction::IsWallClockBasedWaitingSatisfied( } TimeDelta elapsed_time = - system_state_->clock()->GetWallclockTime() - update_first_seen_at; + SystemState::Get()->clock()->GetWallclockTime() - update_first_seen_at; TimeDelta max_scatter_period = TimeDelta::FromDays(output_object->max_days_to_scatter); int64_t staging_wait_time_in_days = 0; // Use staging and its default max value if staging is on. - if (system_state_->prefs()->GetInt64(kPrefsWallClockStagingWaitPeriod, - &staging_wait_time_in_days) && + if (SystemState::Get()->prefs()->GetInt64(kPrefsWallClockStagingWaitPeriod, + &staging_wait_time_in_days) && staging_wait_time_in_days > 0) max_scatter_period = TimeDelta::FromDays(kMaxWaitTimeStagingInDays); @@ -1287,9 +1285,9 @@ OmahaRequestAction::IsWallClockBasedWaitingSatisfied( bool OmahaRequestAction::IsUpdateCheckCountBasedWaitingSatisfied() { int64_t update_check_count_value; - if (system_state_->prefs()->Exists(kPrefsUpdateCheckCount)) { - if (!system_state_->prefs()->GetInt64(kPrefsUpdateCheckCount, - &update_check_count_value)) { + if (SystemState::Get()->prefs()->Exists(kPrefsUpdateCheckCount)) { + if (!SystemState::Get()->prefs()->GetInt64(kPrefsUpdateCheckCount, + &update_check_count_value)) { // We are unable to read the update check count from file for some reason. // So let's proceed anyway so as to not stall the update. LOG(ERROR) << "Unable to read update check count. " @@ -1307,8 +1305,8 @@ bool OmahaRequestAction::IsUpdateCheckCountBasedWaitingSatisfied() { << update_check_count_value; // Write out the initial value of update_check_count_value. - if (!system_state_->prefs()->SetInt64(kPrefsUpdateCheckCount, - update_check_count_value)) { + if (!SystemState::Get()->prefs()->SetInt64(kPrefsUpdateCheckCount, + update_check_count_value)) { // We weren't able to write the update check count file for some reason. // So let's proceed anyway so as to not stall the update. LOG(ERROR) << "Unable to write update check count. " @@ -1353,8 +1351,8 @@ bool OmahaRequestAction::ParseInstallDate(OmahaParserData* parser_data, } // static -bool OmahaRequestAction::HasInstallDate(SystemState* system_state) { - PrefsInterface* prefs = system_state->prefs(); +bool OmahaRequestAction::HasInstallDate() { + PrefsInterface* prefs = SystemState::Get()->prefs(); if (prefs == nullptr) return false; @@ -1363,19 +1361,18 @@ bool OmahaRequestAction::HasInstallDate(SystemState* system_state) { // static bool OmahaRequestAction::PersistInstallDate( - SystemState* system_state, int install_date_days, InstallDateProvisioningSource source) { TEST_AND_RETURN_FALSE(install_date_days >= 0); - PrefsInterface* prefs = system_state->prefs(); + PrefsInterface* prefs = SystemState::Get()->prefs(); if (prefs == nullptr) return false; if (!prefs->SetInt64(kPrefsInstallDateDays, install_date_days)) return false; - system_state->metrics_reporter()->ReportInstallDateProvisioningSource( + SystemState::Get()->metrics_reporter()->ReportInstallDateProvisioningSource( static_cast(source), // Sample. kProvisionedMax); // Maximum. return true; @@ -1386,13 +1383,13 @@ void OmahaRequestAction::PersistCohortData(const string& prefs_key, if (!new_value) return; const string& value = new_value.value(); - if (value.empty() && system_state_->prefs()->Exists(prefs_key)) { - if (!system_state_->prefs()->Delete(prefs_key)) + if (value.empty() && SystemState::Get()->prefs()->Exists(prefs_key)) { + if (!SystemState::Get()->prefs()->Delete(prefs_key)) LOG(ERROR) << "Failed to remove stored " << prefs_key << "value."; else LOG(INFO) << "Removed stored " << prefs_key << " value."; } else if (!value.empty()) { - if (!system_state_->prefs()->SetString(prefs_key, value)) + if (!SystemState::Get()->prefs()->SetString(prefs_key, value)) LOG(INFO) << "Failed to store new setting " << prefs_key << " as " << value; else @@ -1414,7 +1411,7 @@ void OmahaRequestAction::PersistCohorts(const OmahaParserData& parser_data) { << " as it is not in the request params."; continue; } - PrefsInterface* prefs = system_state_->prefs(); + PrefsInterface* prefs = SystemState::Get()->prefs(); PersistCohortData( prefs->CreateSubKey({kDlcPrefsSubDir, dlc_id, kPrefsOmahaCohort}), app.cohort); @@ -1436,7 +1433,7 @@ bool OmahaRequestAction::PersistEolInfo(const map& attrs) { auto eol_date_attr = attrs.find(kAttrEolDate); if (eol_date_attr != attrs.end()) { const auto& eol_date = eol_date_attr->second; - if (!system_state_->prefs()->SetString(kPrefsOmahaEolDate, eol_date)) { + if (!SystemState::Get()->prefs()->SetString(kPrefsOmahaEolDate, eol_date)) { LOG(ERROR) << "Setting EOL date failed."; return false; } @@ -1504,15 +1501,15 @@ void OmahaRequestAction::ActionCompleted(ErrorCode code) { break; } - system_state_->metrics_reporter()->ReportUpdateCheckMetrics( - system_state_, result, reaction, download_error_code); + SystemState::Get()->metrics_reporter()->ReportUpdateCheckMetrics( + result, reaction, download_error_code); } bool OmahaRequestAction::ShouldIgnoreUpdate(const OmahaResponse& response, ErrorCode* error) const { // Note: policy decision to not update to a version we rolled back from. string rollback_version = - system_state_->payload_state()->GetRollbackVersion(); + SystemState::Get()->payload_state()->GetRollbackVersion(); if (!rollback_version.empty()) { LOG(INFO) << "Detected previous rollback from version " << rollback_version; if (rollback_version == response.version) { @@ -1522,10 +1519,10 @@ bool OmahaRequestAction::ShouldIgnoreUpdate(const OmahaResponse& response, } } - if (system_state_->hardware()->IsOOBEEnabled() && - !system_state_->hardware()->IsOOBEComplete(nullptr) && + if (SystemState::Get()->hardware()->IsOOBEEnabled() && + !SystemState::Get()->hardware()->IsOOBEComplete(nullptr) && (response.deadline.empty() || - system_state_->payload_state()->GetRollbackHappened()) && + SystemState::Get()->payload_state()->GetRollbackHappened()) && params_->app_version() != "ForcedUpdate") { LOG(INFO) << "Ignoring a non-critical Omaha update before OOBE completion."; *error = ErrorCode::kNonCriticalUpdateInOOBE; @@ -1557,7 +1554,7 @@ bool OmahaRequestAction::ShouldIgnoreUpdate(const OmahaResponse& response, bool OmahaRequestAction::IsUpdateAllowedOverCellularByPrefs( const OmahaResponse& response) const { - PrefsInterface* prefs = system_state_->prefs(); + PrefsInterface* prefs = SystemState::Get()->prefs(); if (!prefs) { LOG(INFO) << "Disabling updates over cellular as the preferences are " @@ -1614,7 +1611,7 @@ bool OmahaRequestAction::IsUpdateAllowedOverCurrentConnection( ConnectionType type; ConnectionTethering tethering; ConnectionManagerInterface* connection_manager = - system_state_->connection_manager(); + SystemState::Get()->connection_manager(); if (!connection_manager->GetConnectionProperties(&type, &tethering)) { LOG(INFO) << "We could not determine our connection type. " << "Defaulting to allow updates."; @@ -1677,7 +1674,8 @@ bool OmahaRequestAction::IsRollbackEnabled() const { void OmahaRequestAction::SetMaxKernelKeyVersionForRollback() const { int max_kernel_rollforward; - int min_kernel_version = system_state_->hardware()->GetMinKernelKeyVersion(); + int min_kernel_version = + SystemState::Get()->hardware()->GetMinKernelKeyVersion(); if (IsRollbackEnabled()) { // If rollback is enabled, set the max kernel key version to the current // kernel key version. This has the effect of freezing kernel key roll @@ -1703,22 +1701,22 @@ void OmahaRequestAction::SetMaxKernelKeyVersionForRollback() const { } bool max_rollforward_set = - system_state_->hardware()->SetMaxKernelKeyRollforward( + SystemState::Get()->hardware()->SetMaxKernelKeyRollforward( max_kernel_rollforward); if (!max_rollforward_set) { LOG(ERROR) << "Failed to set kernel_max_rollforward"; } // Report metrics - system_state_->metrics_reporter()->ReportKeyVersionMetrics( + SystemState::Get()->metrics_reporter()->ReportKeyVersionMetrics( min_kernel_version, max_kernel_rollforward, max_rollforward_set); } base::Time OmahaRequestAction::LoadOrPersistUpdateFirstSeenAtPref() const { Time update_first_seen_at; int64_t update_first_seen_at_int; - if (system_state_->prefs()->Exists(kPrefsUpdateFirstSeenAt)) { - if (system_state_->prefs()->GetInt64(kPrefsUpdateFirstSeenAt, - &update_first_seen_at_int)) { + if (SystemState::Get()->prefs()->Exists(kPrefsUpdateFirstSeenAt)) { + if (SystemState::Get()->prefs()->GetInt64(kPrefsUpdateFirstSeenAt, + &update_first_seen_at_int)) { // Note: This timestamp could be that of ANY update we saw in the past // (not necessarily this particular update we're considering to apply) // but never got to apply because of some reason (e.g. stop AU policy, @@ -1738,10 +1736,10 @@ base::Time OmahaRequestAction::LoadOrPersistUpdateFirstSeenAtPref() const { return base::Time(); } } else { - update_first_seen_at = system_state_->clock()->GetWallclockTime(); + update_first_seen_at = SystemState::Get()->clock()->GetWallclockTime(); update_first_seen_at_int = update_first_seen_at.ToInternalValue(); - if (system_state_->prefs()->SetInt64(kPrefsUpdateFirstSeenAt, - update_first_seen_at_int)) { + if (SystemState::Get()->prefs()->SetInt64(kPrefsUpdateFirstSeenAt, + update_first_seen_at_int)) { LOG(INFO) << "Persisted the new value for UpdateFirstSeenAt: " << utils::ToString(update_first_seen_at); } else { diff --git a/cros/omaha_request_action.h b/cros/omaha_request_action.h index 9576a056..cdfcede4 100644 --- a/cros/omaha_request_action.h +++ b/cros/omaha_request_action.h @@ -34,7 +34,6 @@ #include "update_engine/common/action.h" #include "update_engine/common/http_fetcher.h" -#include "update_engine/common/system_state.h" #include "update_engine/cros/omaha_request_builder_xml.h" #include "update_engine/cros/omaha_response.h" @@ -102,8 +101,7 @@ class OmahaRequestAction : public Action, // OmahaRequestAction(..., new OmahaEvent(...), new WhateverHttpFetcher); // or // OmahaRequestAction(..., nullptr, new WhateverHttpFetcher); - OmahaRequestAction(SystemState* system_state, - OmahaEvent* event, + OmahaRequestAction(OmahaEvent* event, std::unique_ptr http_fetcher, bool ping_only, const std::string& session_id); @@ -157,7 +155,7 @@ class OmahaRequestAction : public Action, // Gets the install date, expressed as the number of PST8PDT // calendar weeks since January 1st 2007, times seven. Returns -1 if // unknown. See http://crbug.com/336838 for details about this value. - static int GetInstallDate(SystemState* system_state); + static int GetInstallDate(); // Parses the Omaha Response in |doc| and sets the // |install_date_days| field of |output_object| to the value of the @@ -168,13 +166,12 @@ class OmahaRequestAction : public Action, // Returns True if the kPrefsInstallDateDays state variable is set, // False otherwise. - static bool HasInstallDate(SystemState* system_state); + static bool HasInstallDate(); // Writes |install_date_days| into the kPrefsInstallDateDays state // variable and emits an UMA stat for the |source| used. Returns // True if the value was written, False if an error occurred. - static bool PersistInstallDate(SystemState* system_state, - int install_date_days, + static bool PersistInstallDate(int install_date_days, InstallDateProvisioningSource source); // Persist the new cohort value received in the XML file in the |prefs_key| @@ -289,9 +286,6 @@ class OmahaRequestAction : public Action, // kPrefsUpdateFirstSeenAt pref and returns it as a base::Time object. base::Time LoadOrPersistUpdateFirstSeenAtPref() const; - // Global system context. - SystemState* system_state_; - // Contains state that is relevant in the processing of the Omaha request. OmahaRequestParams* params_; diff --git a/cros/omaha_request_action_fuzzer.cc b/cros/omaha_request_action_fuzzer.cc index dd024670..995de8c5 100644 --- a/cros/omaha_request_action_fuzzer.cc +++ b/cros/omaha_request_action_fuzzer.cc @@ -31,10 +31,9 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { brillo::FakeMessageLoop loop(nullptr); loop.SetAsCurrent(); - chromeos_update_engine::FakeSystemState fake_system_state; + chromeos_update_engine::FakeSystemState::CreateInstance(); auto omaha_request_action = std::make_unique( - &fake_system_state, nullptr, std::make_unique( data, size, nullptr), diff --git a/cros/omaha_request_action_unittest.cc b/cros/omaha_request_action_unittest.cc index 9f9c75f4..a3799b47 100644 --- a/cros/omaha_request_action_unittest.cc +++ b/cros/omaha_request_action_unittest.cc @@ -135,7 +135,7 @@ struct FakeUpdateResponse { } string GetUpdateResponse() const { - chromeos_update_engine::OmahaRequestParams request_params{nullptr}; + chromeos_update_engine::OmahaRequestParams request_params; request_params.set_app_id(app_id); return "" @@ -379,6 +379,8 @@ struct TestUpdateCheckParams { class OmahaRequestActionTest : public ::testing::Test { protected: void SetUp() override { + FakeSystemState::CreateInstance(); + request_params_.set_os_sp("service_pack"); request_params_.set_os_board("x86-generic"); request_params_.set_app_id(kTestAppId); @@ -396,8 +398,8 @@ class OmahaRequestActionTest : public ::testing::Test { request_params_.set_is_install(false); request_params_.set_dlc_apps_params({}); - fake_system_state_.set_request_params(&request_params_); - fake_system_state_.set_prefs(&fake_prefs_); + FakeSystemState::Get()->set_request_params(&request_params_); + FakeSystemState::Get()->set_prefs(&fake_prefs_); // Setting the default update check params. Lookup |TestUpdateCheck()|. tuc_params_ = { @@ -413,7 +415,7 @@ class OmahaRequestActionTest : public ::testing::Test { .expected_download_error_code = metrics::DownloadErrorCode::kUnset, }; - ON_CALL(*fake_system_state_.mock_update_attempter(), GetExcluder()) + ON_CALL(*FakeSystemState::Get()->mock_update_attempter(), GetExcluder()) .WillByDefault(Return(&mock_excluder_)); } @@ -456,10 +458,9 @@ class OmahaRequestActionTest : public ::testing::Test { const string& expected_p2p_url); StrictMock mock_excluder_; - FakeSystemState fake_system_state_; FakeUpdateResponse fake_update_response_; // Used by all tests. - OmahaRequestParams request_params_{&fake_system_state_}; + OmahaRequestParams request_params_; FakePrefs fake_prefs_; @@ -514,13 +515,12 @@ bool OmahaRequestActionTest::TestUpdateCheck() { if (tuc_params_.fail_http_response_code >= 0) { fetcher->FailTransfer(tuc_params_.fail_http_response_code); } - // This ensures the tests didn't forget to update fake_system_state_ if they + // This ensures the tests didn't forget to update |FakeSystemState| if they // are not using the default request_params_. - EXPECT_EQ(&request_params_, fake_system_state_.request_params()); + EXPECT_EQ(&request_params_, FakeSystemState::Get()->request_params()); auto omaha_request_action = - std::make_unique(&fake_system_state_, - nullptr, + std::make_unique(nullptr, std::move(fetcher), tuc_params_.ping_only, tuc_params_.session_id); @@ -557,14 +557,13 @@ bool OmahaRequestActionTest::TestUpdateCheck() { processor.EnqueueAction(std::move(omaha_request_action)); processor.EnqueueAction(std::move(collector_action)); - EXPECT_CALL(*fake_system_state_.mock_metrics_reporter(), - ReportUpdateCheckMetrics(_, _, _, _)) + EXPECT_CALL(*FakeSystemState::Get()->mock_metrics_reporter(), + ReportUpdateCheckMetrics(_, _, _)) .Times(AnyNumber()); EXPECT_CALL( - *fake_system_state_.mock_metrics_reporter(), - ReportUpdateCheckMetrics(_, - tuc_params_.expected_check_result, + *FakeSystemState::Get()->mock_metrics_reporter(), + ReportUpdateCheckMetrics(tuc_params_.expected_check_result, tuc_params_.expected_check_reaction, tuc_params_.expected_download_error_code)) .Times(tuc_params_.ping_only ? 0 : 1); @@ -589,7 +588,6 @@ void OmahaRequestActionTest::TestEvent(OmahaEvent* event, loop.SetAsCurrent(); auto action = std::make_unique( - &fake_system_state_, event, std::make_unique( http_response.data(), http_response.size(), nullptr), @@ -833,7 +831,7 @@ TEST_F(OmahaRequestActionTest, ValidUpdateBlockedByConnection) { // Set up a connection manager that doesn't allow a valid update over // the current ethernet connection. MockConnectionManager mock_cm; - fake_system_state_.set_connection_manager(&mock_cm); + FakeSystemState::Get()->set_connection_manager(&mock_cm); EXPECT_CALL(mock_cm, GetConnectionProperties(_, _)) .WillRepeatedly(DoAll(SetArgPointee<0>(ConnectionType::kEthernet), @@ -855,7 +853,7 @@ TEST_F(OmahaRequestActionTest, ValidUpdateOverCellularAllowedByDevicePolicy) { // This test tests that update over cellular is allowed as device policy // says yes. MockConnectionManager mock_cm; - fake_system_state_.set_connection_manager(&mock_cm); + FakeSystemState::Get()->set_connection_manager(&mock_cm); EXPECT_CALL(mock_cm, GetConnectionProperties(_, _)) .WillRepeatedly(DoAll(SetArgPointee<0>(ConnectionType::kCellular), @@ -877,7 +875,7 @@ TEST_F(OmahaRequestActionTest, ValidUpdateOverCellularBlockedByDevicePolicy) { // This test tests that update over cellular is blocked as device policy // says no. MockConnectionManager mock_cm; - fake_system_state_.set_connection_manager(&mock_cm); + FakeSystemState::Get()->set_connection_manager(&mock_cm); EXPECT_CALL(mock_cm, GetConnectionProperties(_, _)) .WillRepeatedly(DoAll(SetArgPointee<0>(ConnectionType::kCellular), @@ -903,7 +901,7 @@ TEST_F(OmahaRequestActionTest, // is allowed as permission for update over cellular is set to true. MockConnectionManager mock_cm; fake_prefs_.SetBoolean(kPrefsUpdateOverCellularPermission, true); - fake_system_state_.set_connection_manager(&mock_cm); + FakeSystemState::Get()->set_connection_manager(&mock_cm); EXPECT_CALL(mock_cm, GetConnectionProperties(_, _)) .WillRepeatedly(DoAll(SetArgPointee<0>(ConnectionType::kCellular), @@ -935,7 +933,7 @@ TEST_F(OmahaRequestActionTest, fake_prefs_.SetString(kPrefsUpdateOverCellularTargetVersion, diff_version); fake_prefs_.SetInt64(kPrefsUpdateOverCellularTargetSize, diff_size); // This test tests cellular (3G) being the only connection type being allowed. - fake_system_state_.set_connection_manager(&mock_cm); + FakeSystemState::Get()->set_connection_manager(&mock_cm); EXPECT_CALL(mock_cm, GetConnectionProperties(_, _)) .WillRepeatedly(DoAll(SetArgPointee<0>(ConnectionType::kCellular), @@ -968,7 +966,7 @@ TEST_F(OmahaRequestActionTest, fake_prefs_.SetString(kPrefsUpdateOverCellularTargetVersion, new_version); fake_prefs_.SetInt64(kPrefsUpdateOverCellularTargetSize, new_size); - fake_system_state_.set_connection_manager(&mock_cm); + FakeSystemState::Get()->set_connection_manager(&mock_cm); EXPECT_CALL(mock_cm, GetConnectionProperties(_, _)) .WillRepeatedly(DoAll(SetArgPointee<0>(ConnectionType::kCellular), @@ -989,7 +987,7 @@ TEST_F(OmahaRequestActionTest, TEST_F(OmahaRequestActionTest, ValidUpdateBlockedByRollback) { string rollback_version = "1234.0.0"; MockPayloadState mock_payload_state; - fake_system_state_.set_payload_state(&mock_payload_state); + FakeSystemState::Get()->set_payload_state(&mock_payload_state); fake_update_response_.version = rollback_version; tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); tuc_params_.expected_code = ErrorCode::kOmahaUpdateIgnoredPerPolicy; @@ -1006,7 +1004,7 @@ TEST_F(OmahaRequestActionTest, ValidUpdateBlockedByRollback) { // Verify that update checks called during OOBE will not try to download an // update if the response doesn't include the deadline field. TEST_F(OmahaRequestActionTest, SkipNonCriticalUpdatesBeforeOOBE) { - fake_system_state_.fake_hardware()->UnsetIsOOBEComplete(); + FakeSystemState::Get()->fake_hardware()->UnsetIsOOBEComplete(); tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); tuc_params_.expected_code = ErrorCode::kNonCriticalUpdateInOOBE; tuc_params_.expected_check_result = metrics::CheckResult::kParsingError; @@ -1022,8 +1020,8 @@ TEST_F(OmahaRequestActionTest, SkipNonCriticalUpdatesBeforeOOBE) { // Verify that the IsOOBEComplete() value is ignored when the OOBE flow is not // enabled. TEST_F(OmahaRequestActionTest, SkipNonCriticalUpdatesBeforeOOBEDisabled) { - fake_system_state_.fake_hardware()->UnsetIsOOBEComplete(); - fake_system_state_.fake_hardware()->SetIsOOBEEnabled(false); + FakeSystemState::Get()->fake_hardware()->UnsetIsOOBEComplete(); + FakeSystemState::Get()->fake_hardware()->SetIsOOBEEnabled(false); tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); ASSERT_TRUE(TestUpdateCheck()); @@ -1034,7 +1032,7 @@ TEST_F(OmahaRequestActionTest, SkipNonCriticalUpdatesBeforeOOBEDisabled) { // Verify that update checks called during OOBE will still try to download an // update if the response includes the deadline field. TEST_F(OmahaRequestActionTest, SkipNonCriticalUpdatesBeforeOOBEDeadlineSet) { - fake_system_state_.fake_hardware()->UnsetIsOOBEComplete(); + FakeSystemState::Get()->fake_hardware()->UnsetIsOOBEComplete(); fake_update_response_.deadline = "20101020"; tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); @@ -1047,14 +1045,15 @@ TEST_F(OmahaRequestActionTest, SkipNonCriticalUpdatesBeforeOOBEDeadlineSet) { // update if a rollback happened, even when the response includes the deadline // field. TEST_F(OmahaRequestActionTest, SkipNonCriticalUpdatesBeforeOOBERollback) { - fake_system_state_.fake_hardware()->UnsetIsOOBEComplete(); + FakeSystemState::Get()->fake_hardware()->UnsetIsOOBEComplete(); fake_update_response_.deadline = "20101020"; tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); tuc_params_.expected_code = ErrorCode::kNonCriticalUpdateInOOBE; tuc_params_.expected_check_result = metrics::CheckResult::kParsingError; tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; - EXPECT_CALL(*(fake_system_state_.mock_payload_state()), GetRollbackHappened()) + EXPECT_CALL(*(FakeSystemState::Get()->mock_payload_state()), + GetRollbackHappened()) .WillOnce(Return(true)); ASSERT_FALSE(TestUpdateCheck()); @@ -1068,10 +1067,10 @@ TEST_F(OmahaRequestActionTest, SkipNonCriticalUpdatesBeforeOOBERollback) { // kOmahaUpdateIgnoredOverCellular error in this case might cause undesired UX // in OOBE (warning the user about an update that will be skipped). TEST_F(OmahaRequestActionTest, SkipNonCriticalUpdatesInOOBEOverCellular) { - fake_system_state_.fake_hardware()->UnsetIsOOBEComplete(); + FakeSystemState::Get()->fake_hardware()->UnsetIsOOBEComplete(); MockConnectionManager mock_cm; - fake_system_state_.set_connection_manager(&mock_cm); + FakeSystemState::Get()->set_connection_manager(&mock_cm); tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); tuc_params_.expected_code = ErrorCode::kNonCriticalUpdateInOOBE; tuc_params_.expected_check_result = metrics::CheckResult::kParsingError; @@ -1093,7 +1092,7 @@ TEST_F(OmahaRequestActionTest, WallClockBasedWaitAloneCausesScattering) { request_params_.set_wall_clock_based_wait_enabled(true); request_params_.set_update_check_count_wait_enabled(false); request_params_.set_waiting_period(TimeDelta::FromDays(2)); - fake_system_state_.fake_clock()->SetWallclockTime(Time::Now()); + FakeSystemState::Get()->fake_clock()->SetWallclockTime(Time::Now()); tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); tuc_params_.expected_code = ErrorCode::kOmahaUpdateDeferredPerPolicy; tuc_params_.expected_check_reaction = metrics::CheckReaction::kDeferring; @@ -1109,7 +1108,7 @@ TEST_F(OmahaRequestActionTest, request_params_.set_update_check_count_wait_enabled(false); request_params_.set_waiting_period(TimeDelta::FromDays(2)); request_params_.set_interactive(true); - fake_system_state_.fake_clock()->SetWallclockTime(Time::Now()); + FakeSystemState::Get()->fake_clock()->SetWallclockTime(Time::Now()); tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); // Verify if we are interactive check we don't defer. @@ -1151,7 +1150,7 @@ TEST_F(OmahaRequestActionTest, ZeroUpdateCheckCountCausesNoScattering) { request_params_.set_update_check_count_wait_enabled(true); request_params_.set_min_update_checks_needed(0); request_params_.set_max_update_checks_allowed(0); - fake_system_state_.fake_clock()->SetWallclockTime(Time::Now()); + FakeSystemState::Get()->fake_clock()->SetWallclockTime(Time::Now()); tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); ASSERT_TRUE(TestUpdateCheck()); @@ -1168,7 +1167,7 @@ TEST_F(OmahaRequestActionTest, NonZeroUpdateCheckCountCausesScattering) { request_params_.set_update_check_count_wait_enabled(true); request_params_.set_min_update_checks_needed(1); request_params_.set_max_update_checks_allowed(8); - fake_system_state_.fake_clock()->SetWallclockTime(Time::Now()); + FakeSystemState::Get()->fake_clock()->SetWallclockTime(Time::Now()); tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); tuc_params_.expected_code = ErrorCode::kOmahaUpdateDeferredPerPolicy; tuc_params_.expected_check_reaction = metrics::CheckReaction::kDeferring; @@ -1189,7 +1188,7 @@ TEST_F(OmahaRequestActionTest, request_params_.set_min_update_checks_needed(1); request_params_.set_max_update_checks_allowed(8); request_params_.set_interactive(true); - fake_system_state_.fake_clock()->SetWallclockTime(Time::Now()); + FakeSystemState::Get()->fake_clock()->SetWallclockTime(Time::Now()); tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); // Verify if we are interactive check we don't defer. @@ -1204,7 +1203,7 @@ TEST_F(OmahaRequestActionTest, ExistingUpdateCheckCountCausesScattering) { request_params_.set_update_check_count_wait_enabled(true); request_params_.set_min_update_checks_needed(1); request_params_.set_max_update_checks_allowed(8); - fake_system_state_.fake_clock()->SetWallclockTime(Time::Now()); + FakeSystemState::Get()->fake_clock()->SetWallclockTime(Time::Now()); tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); tuc_params_.expected_code = ErrorCode::kOmahaUpdateDeferredPerPolicy; tuc_params_.expected_check_reaction = metrics::CheckReaction::kDeferring; @@ -1228,7 +1227,7 @@ TEST_F(OmahaRequestActionTest, request_params_.set_min_update_checks_needed(1); request_params_.set_max_update_checks_allowed(8); request_params_.set_interactive(true); - fake_system_state_.fake_clock()->SetWallclockTime(Time::Now()); + FakeSystemState::Get()->fake_clock()->SetWallclockTime(Time::Now()); tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); ASSERT_TRUE(fake_prefs_.SetInt64(kPrefsUpdateCheckCount, 5)); @@ -1244,7 +1243,7 @@ TEST_F(OmahaRequestActionTest, StagingTurnedOnCausesScattering) { request_params_.set_wall_clock_based_wait_enabled(true); request_params_.set_waiting_period(TimeDelta::FromDays(6)); request_params_.set_update_check_count_wait_enabled(false); - fake_system_state_.fake_clock()->SetWallclockTime(Time::Now()); + FakeSystemState::Get()->fake_clock()->SetWallclockTime(Time::Now()); ASSERT_TRUE(fake_prefs_.SetInt64(kPrefsWallClockStagingWaitPeriod, 6)); // This should not prevent scattering due to staging. @@ -1478,7 +1477,6 @@ TEST_F(OmahaRequestActionTest, NoOutputPipeTest) { loop.SetAsCurrent(); auto action = std::make_unique( - &fake_system_state_, nullptr, std::make_unique( http_response.data(), http_response.size(), nullptr), @@ -1614,7 +1612,6 @@ TEST_F(OmahaRequestActionTest, TerminateTransferTest) { string http_response("doesn't matter"); auto action = std::make_unique( - &fake_system_state_, nullptr, std::make_unique( http_response.data(), http_response.size(), nullptr), @@ -1693,7 +1690,7 @@ TEST_F(OmahaRequestActionTest, ParseIntTest) { TEST_F(OmahaRequestActionTest, FormatUpdateCheckOutputTest) { NiceMock prefs; - fake_system_state_.set_prefs(&prefs); + FakeSystemState::Get()->set_prefs(&prefs); tuc_params_.http_response = "invalid xml>"; tuc_params_.expected_code = ErrorCode::kOmahaRequestXMLParseError; tuc_params_.expected_check_result = metrics::CheckResult::kParsingError; @@ -1748,7 +1745,6 @@ TEST_F(OmahaRequestActionTest, FormatErrorEventOutputTest) { TEST_F(OmahaRequestActionTest, IsEventTest) { string http_response("doesn't matter"); OmahaRequestAction update_check_action( - &fake_system_state_, nullptr, std::make_unique( http_response.data(), http_response.size(), nullptr), @@ -1757,7 +1753,6 @@ TEST_F(OmahaRequestActionTest, IsEventTest) { EXPECT_FALSE(update_check_action.IsEvent()); OmahaRequestAction event_action( - &fake_system_state_, new OmahaEvent(OmahaEvent::kTypeUpdateComplete), std::make_unique( http_response.data(), http_response.size(), nullptr), @@ -1923,7 +1918,7 @@ TEST_F(OmahaRequestActionTest, TargetChannelHintTest) { void OmahaRequestActionTest::PingTest(bool ping_only) { NiceMock prefs; - fake_system_state_.set_prefs(&prefs); + FakeSystemState::Get()->set_prefs(&prefs); EXPECT_CALL(prefs, GetInt64(kPrefsMetricsCheckLastReportingTime, _)) .Times(AnyNumber()); EXPECT_CALL(prefs, SetInt64(_, _)).Times(AnyNumber()); @@ -1967,7 +1962,7 @@ TEST_F(OmahaRequestActionTest, PingTestSendAlsoAnUpdateCheck) { TEST_F(OmahaRequestActionTest, ActivePingTest) { NiceMock prefs; - fake_system_state_.set_prefs(&prefs); + FakeSystemState::Get()->set_prefs(&prefs); EXPECT_CALL(prefs, GetInt64(kPrefsMetricsCheckLastReportingTime, _)) .Times(AnyNumber()); EXPECT_CALL(prefs, SetInt64(_, _)).Times(AnyNumber()); @@ -1992,7 +1987,7 @@ TEST_F(OmahaRequestActionTest, ActivePingTest) { TEST_F(OmahaRequestActionTest, RollCallPingTest) { NiceMock prefs; - fake_system_state_.set_prefs(&prefs); + FakeSystemState::Get()->set_prefs(&prefs); EXPECT_CALL(prefs, GetInt64(kPrefsMetricsCheckLastReportingTime, _)) .Times(AnyNumber()); EXPECT_CALL(prefs, SetInt64(_, _)).Times(AnyNumber()); @@ -2018,7 +2013,7 @@ TEST_F(OmahaRequestActionTest, RollCallPingTest) { TEST_F(OmahaRequestActionTest, NoPingTest) { NiceMock prefs; - fake_system_state_.set_prefs(&prefs); + FakeSystemState::Get()->set_prefs(&prefs); EXPECT_CALL(prefs, GetInt64(kPrefsMetricsCheckLastReportingTime, _)) .Times(AnyNumber()); EXPECT_CALL(prefs, SetInt64(_, _)).Times(AnyNumber()); @@ -2049,7 +2044,7 @@ TEST_F(OmahaRequestActionTest, NoPingTest) { TEST_F(OmahaRequestActionTest, IgnoreEmptyPingTest) { // This test ensures that we ignore empty ping only requests. NiceMock prefs; - fake_system_state_.set_prefs(&prefs); + FakeSystemState::Get()->set_prefs(&prefs); int64_t now = Time::Now().ToInternalValue(); EXPECT_CALL(prefs, GetInt64(kPrefsLastActivePingDay, _)) .WillOnce(DoAll(SetArgPointee<1>(now), Return(true))); @@ -2069,7 +2064,7 @@ TEST_F(OmahaRequestActionTest, IgnoreEmptyPingTest) { TEST_F(OmahaRequestActionTest, BackInTimePingTest) { NiceMock prefs; - fake_system_state_.set_prefs(&prefs); + FakeSystemState::Get()->set_prefs(&prefs); EXPECT_CALL(prefs, GetInt64(kPrefsMetricsCheckLastReportingTime, _)) .Times(AnyNumber()); EXPECT_CALL(prefs, SetInt64(_, _)).Times(AnyNumber()); @@ -2108,7 +2103,7 @@ TEST_F(OmahaRequestActionTest, LastPingDayUpdateTest) { int64_t midnight_slack = (Time::Now() - TimeDelta::FromSeconds(195)).ToInternalValue(); NiceMock prefs; - fake_system_state_.set_prefs(&prefs); + FakeSystemState::Get()->set_prefs(&prefs); EXPECT_CALL(prefs, GetInt64(_, _)).Times(AnyNumber()); EXPECT_CALL(prefs, SetInt64(_, _)).Times(AnyNumber()); EXPECT_CALL(prefs, @@ -2133,7 +2128,7 @@ TEST_F(OmahaRequestActionTest, LastPingDayUpdateTest) { TEST_F(OmahaRequestActionTest, NoElapsedSecondsTest) { NiceMock prefs; - fake_system_state_.set_prefs(&prefs); + FakeSystemState::Get()->set_prefs(&prefs); EXPECT_CALL(prefs, GetInt64(_, _)).Times(AnyNumber()); EXPECT_CALL(prefs, SetInt64(_, _)).Times(AnyNumber()); EXPECT_CALL(prefs, SetInt64(kPrefsLastActivePingDay, _)).Times(0); @@ -2152,7 +2147,7 @@ TEST_F(OmahaRequestActionTest, NoElapsedSecondsTest) { TEST_F(OmahaRequestActionTest, BadElapsedSecondsTest) { NiceMock prefs; - fake_system_state_.set_prefs(&prefs); + FakeSystemState::Get()->set_prefs(&prefs); EXPECT_CALL(prefs, GetInt64(_, _)).Times(AnyNumber()); EXPECT_CALL(prefs, SetInt64(_, _)).Times(AnyNumber()); EXPECT_CALL(prefs, SetInt64(kPrefsLastActivePingDay, _)).Times(0); @@ -2218,7 +2213,7 @@ TEST_F(OmahaRequestActionTest, TestUpdateFirstSeenAtGetsPersistedFirstTime) { Time arbitrary_date; ASSERT_TRUE(Time::FromString("6/4/1989", &arbitrary_date)); - fake_system_state_.fake_clock()->SetWallclockTime(arbitrary_date); + FakeSystemState::Get()->fake_clock()->SetWallclockTime(arbitrary_date); tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); tuc_params_.expected_code = ErrorCode::kOmahaUpdateDeferredPerPolicy; @@ -2250,7 +2245,7 @@ TEST_F(OmahaRequestActionTest, TestUpdateFirstSeenAtGetsUsedIfAlreadyPresent) { ASSERT_TRUE(Time::FromString("1/3/2012", &t2)); ASSERT_TRUE( fake_prefs_.SetInt64(kPrefsUpdateFirstSeenAt, t1.ToInternalValue())); - fake_system_state_.fake_clock()->SetWallclockTime(t2); + FakeSystemState::Get()->fake_clock()->SetWallclockTime(t2); tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); @@ -2330,7 +2325,7 @@ TEST_F(OmahaRequestActionTest, PingWhenPowerwashed) { fake_prefs_.SetString(kPrefsPreviousVersion, ""); // Flag that the device was powerwashed in the past. - fake_system_state_.fake_hardware()->SetPowerwashCount(1); + FakeSystemState::Get()->fake_hardware()->SetPowerwashCount(1); tuc_params_.http_response = fake_update_response_.GetNoUpdateResponse(); tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable; tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; @@ -2347,10 +2342,10 @@ TEST_F(OmahaRequestActionTest, PingWhenFirstActiveOmahaPingIsSent) { fake_prefs_.SetString(kPrefsPreviousVersion, ""); // Flag that the device was not powerwashed in the past. - fake_system_state_.fake_hardware()->SetPowerwashCount(0); + FakeSystemState::Get()->fake_hardware()->SetPowerwashCount(0); // Flag that the device has sent first active ping in the past. - fake_system_state_.fake_hardware()->SetFirstActiveOmahaPingSent(); + FakeSystemState::Get()->fake_hardware()->SetFirstActiveOmahaPingSent(); tuc_params_.http_response = fake_update_response_.GetNoUpdateResponse(); tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable; @@ -2404,7 +2399,7 @@ void OmahaRequestActionTest::P2PTest(bool initial_allow_p2p_for_downloading, string actual_p2p_url; MockPayloadState mock_payload_state; - fake_system_state_.set_payload_state(&mock_payload_state); + FakeSystemState::Get()->set_payload_state(&mock_payload_state); EXPECT_CALL(mock_payload_state, P2PAttemptAllowed()) .WillRepeatedly(Return(payload_state_allow_p2p_attempt)); EXPECT_CALL(mock_payload_state, GetUsingP2PForDownloading()) @@ -2419,7 +2414,7 @@ void OmahaRequestActionTest::P2PTest(bool initial_allow_p2p_for_downloading, .WillRepeatedly(SaveArg<0>(&actual_p2p_url)); MockP2PManager mock_p2p_manager; - fake_system_state_.set_p2p_manager(&mock_p2p_manager); + FakeSystemState::Get()->set_p2p_manager(&mock_p2p_manager); mock_p2p_manager.fake().SetLookupUrlForFileResult(p2p_client_result_url); TimeDelta timeout = TimeDelta::FromSeconds(kMaxP2PNetworkWaitTimeSeconds); @@ -2537,7 +2532,7 @@ TEST_F(OmahaRequestActionTest, ParseInstallDateFromResponse) { // deadline in the response is needed to force the update attempt to // occur; responses without a deadline seen during OOBE will normally // return ErrorCode::kNonCriticalUpdateInOOBE. - fake_system_state_.fake_hardware()->UnsetIsOOBEComplete(); + FakeSystemState::Get()->fake_hardware()->UnsetIsOOBEComplete(); fake_update_response_.deadline = "20101020"; // Check that we parse elapsed_days in the Omaha Response correctly. @@ -2577,8 +2572,8 @@ TEST_F(OmahaRequestActionTest, ParseInstallDateFromResponse) { // If there is no prefs and OOBE is not complete, we should not // report anything to Omaha. TEST_F(OmahaRequestActionTest, GetInstallDateWhenNoPrefsNorOOBE) { - fake_system_state_.fake_hardware()->UnsetIsOOBEComplete(); - EXPECT_EQ(OmahaRequestAction::GetInstallDate(&fake_system_state_), -1); + FakeSystemState::Get()->fake_hardware()->UnsetIsOOBEComplete(); + EXPECT_EQ(OmahaRequestAction::GetInstallDate(), -1); EXPECT_FALSE(fake_prefs_.Exists(kPrefsInstallDateDays)); } @@ -2588,8 +2583,8 @@ TEST_F(OmahaRequestActionTest, GetInstallDateWhenNoPrefsNorOOBE) { // nothing. TEST_F(OmahaRequestActionTest, GetInstallDateWhenOOBECompletedWithInvalidDate) { Time oobe_date = Time::FromTimeT(42); // Dec 31, 1969 16:00:42 PST. - fake_system_state_.fake_hardware()->SetIsOOBEComplete(oobe_date); - EXPECT_EQ(OmahaRequestAction::GetInstallDate(&fake_system_state_), -1); + FakeSystemState::Get()->fake_hardware()->SetIsOOBEComplete(oobe_date); + EXPECT_EQ(OmahaRequestAction::GetInstallDate(), -1); EXPECT_FALSE(fake_prefs_.Exists(kPrefsInstallDateDays)); } @@ -2597,8 +2592,8 @@ TEST_F(OmahaRequestActionTest, GetInstallDateWhenOOBECompletedWithInvalidDate) { // should yield an InstallDate of 14. TEST_F(OmahaRequestActionTest, GetInstallDateWhenOOBECompletedWithValidDate) { Time oobe_date = Time::FromTimeT(1169280000); // Jan 20, 2007 0:00 PST. - fake_system_state_.fake_hardware()->SetIsOOBEComplete(oobe_date); - EXPECT_EQ(OmahaRequestAction::GetInstallDate(&fake_system_state_), 14); + FakeSystemState::Get()->fake_hardware()->SetIsOOBEComplete(oobe_date); + EXPECT_EQ(OmahaRequestAction::GetInstallDate(), 14); EXPECT_TRUE(fake_prefs_.Exists(kPrefsInstallDateDays)); int64_t prefs_days; @@ -2615,8 +2610,8 @@ TEST_F(OmahaRequestActionTest, GetInstallDateWhenOOBECompletedDateChanges) { EXPECT_TRUE(fake_prefs_.SetInt64(kPrefsInstallDateDays, 14)); Time oobe_date = Time::FromTimeT(1170144000); // Jan 30, 2007 0:00 PST. - fake_system_state_.fake_hardware()->SetIsOOBEComplete(oobe_date); - EXPECT_EQ(OmahaRequestAction::GetInstallDate(&fake_system_state_), 14); + FakeSystemState::Get()->fake_hardware()->SetIsOOBEComplete(oobe_date); + EXPECT_EQ(OmahaRequestAction::GetInstallDate(), 14); int64_t prefs_days; EXPECT_TRUE(fake_prefs_.GetInt64(kPrefsInstallDateDays, &prefs_days)); @@ -2624,7 +2619,7 @@ TEST_F(OmahaRequestActionTest, GetInstallDateWhenOOBECompletedDateChanges) { // If we delete the prefs file, we should get 28 days. EXPECT_TRUE(fake_prefs_.Delete(kPrefsInstallDateDays)); - EXPECT_EQ(OmahaRequestAction::GetInstallDate(&fake_system_state_), 28); + EXPECT_EQ(OmahaRequestAction::GetInstallDate(), 28); EXPECT_TRUE(fake_prefs_.GetInt64(kPrefsInstallDateDays, &prefs_days)); EXPECT_EQ(prefs_days, 28); } @@ -2633,7 +2628,7 @@ TEST_F(OmahaRequestActionTest, GetInstallDateWhenOOBECompletedDateChanges) { // device sets the max kernel key version to the current version. // ie. the same behavior as if rollback is enabled. TEST_F(OmahaRequestActionTest, NoPolicyEnterpriseDevicesSetMaxRollback) { - FakeHardware* fake_hw = fake_system_state_.fake_hardware(); + FakeHardware* fake_hw = FakeSystemState::Get()->fake_hardware(); // Setup and verify some initial default values for the kernel TPM // values that control verified boot and rollback. @@ -2644,7 +2639,7 @@ TEST_F(OmahaRequestActionTest, NoPolicyEnterpriseDevicesSetMaxRollback) { EXPECT_EQ(kRollforwardInfinity, fake_hw->GetMaxKernelKeyRollforward()); EXPECT_CALL( - *fake_system_state_.mock_metrics_reporter(), + *FakeSystemState::Get()->mock_metrics_reporter(), ReportKeyVersionMetrics(min_kernel_version, min_kernel_version, true)) .Times(1); @@ -2669,7 +2664,7 @@ TEST_F(OmahaRequestActionTest, NoPolicyEnterpriseDevicesSetMaxRollback) { // max kernel key version to the current version. ie. the same // behavior as if rollback is enabled. TEST_F(OmahaRequestActionTest, NoPolicyConsumerDevicesSetMaxRollback) { - FakeHardware* fake_hw = fake_system_state_.fake_hardware(); + FakeHardware* fake_hw = FakeSystemState::Get()->fake_hardware(); // Setup and verify some initial default values for the kernel TPM // values that control verified boot and rollback. @@ -2680,7 +2675,7 @@ TEST_F(OmahaRequestActionTest, NoPolicyConsumerDevicesSetMaxRollback) { EXPECT_EQ(kRollforwardInfinity, fake_hw->GetMaxKernelKeyRollforward()); EXPECT_CALL( - *fake_system_state_.mock_metrics_reporter(), + *FakeSystemState::Get()->mock_metrics_reporter(), ReportKeyVersionMetrics(min_kernel_version, kRollforwardInfinity, true)) .Times(1); @@ -2703,7 +2698,7 @@ TEST_F(OmahaRequestActionTest, NoPolicyConsumerDevicesSetMaxRollback) { // Verifies that a device with rollback enabled sets kernel_max_rollforward // in the TPM to prevent roll forward. TEST_F(OmahaRequestActionTest, RollbackEnabledDevicesSetMaxRollback) { - FakeHardware* fake_hw = fake_system_state_.fake_hardware(); + FakeHardware* fake_hw = FakeSystemState::Get()->fake_hardware(); // Setup and verify some initial default values for the kernel TPM // values that control verified boot and rollback. @@ -2715,7 +2710,7 @@ TEST_F(OmahaRequestActionTest, RollbackEnabledDevicesSetMaxRollback) { EXPECT_EQ(kRollforwardInfinity, fake_hw->GetMaxKernelKeyRollforward()); EXPECT_CALL( - *fake_system_state_.mock_metrics_reporter(), + *FakeSystemState::Get()->mock_metrics_reporter(), ReportKeyVersionMetrics(min_kernel_version, min_kernel_version, true)) .Times(1); @@ -2740,7 +2735,7 @@ TEST_F(OmahaRequestActionTest, RollbackEnabledDevicesSetMaxRollback) { // Verifies that a device with rollback disabled sets kernel_max_rollforward // in the TPM to logical infinity, to allow roll forward. TEST_F(OmahaRequestActionTest, RollbackDisabledDevicesSetMaxRollback) { - FakeHardware* fake_hw = fake_system_state_.fake_hardware(); + FakeHardware* fake_hw = FakeSystemState::Get()->fake_hardware(); // Setup and verify some initial default values for the kernel TPM // values that control verified boot and rollback. @@ -2752,7 +2747,7 @@ TEST_F(OmahaRequestActionTest, RollbackDisabledDevicesSetMaxRollback) { EXPECT_EQ(kRollforwardInfinity, fake_hw->GetMaxKernelKeyRollforward()); EXPECT_CALL( - *fake_system_state_.mock_metrics_reporter(), + *FakeSystemState::Get()->mock_metrics_reporter(), ReportKeyVersionMetrics(min_kernel_version, kRollforwardInfinity, true)) .Times(1); @@ -2808,7 +2803,7 @@ TEST_F(OmahaRequestActionTest, FakeClock fake_clock; Time now = Time::Now(); fake_clock.SetWallclockTime(now); - fake_system_state_.set_clock(&fake_clock); + FakeSystemState::Get()->set_clock(&fake_clock); tuc_params_.http_response = fake_update_response_.GetUpdateResponse(); ASSERT_TRUE(TestUpdateCheck()); @@ -2827,7 +2822,7 @@ TEST_F(OmahaRequestActionTest, FakeClock fake_clock; Time now = Time::Now(); fake_clock.SetWallclockTime(now); - fake_system_state_.set_clock(&fake_clock); + FakeSystemState::Get()->set_clock(&fake_clock); tuc_params_.http_response = fake_update_response_.GetNoUpdateResponse(); tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable; @@ -3053,8 +3048,8 @@ TEST_F(OmahaRequestActionTest, PersistEolDateTest) { ASSERT_TRUE(TestUpdateCheck()); string eol_date; - EXPECT_TRUE( - fake_system_state_.prefs()->GetString(kPrefsOmahaEolDate, &eol_date)); + EXPECT_TRUE(FakeSystemState::Get()->prefs()->GetString(kPrefsOmahaEolDate, + &eol_date)); EXPECT_EQ("200", eol_date); } @@ -3068,13 +3063,13 @@ TEST_F(OmahaRequestActionTest, PersistEolMissingDateTest) { tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset; const string kDate = "123"; - fake_system_state_.prefs()->SetString(kPrefsOmahaEolDate, kDate); + FakeSystemState::Get()->prefs()->SetString(kPrefsOmahaEolDate, kDate); ASSERT_TRUE(TestUpdateCheck()); string eol_date; - EXPECT_TRUE( - fake_system_state_.prefs()->GetString(kPrefsOmahaEolDate, &eol_date)); + EXPECT_TRUE(FakeSystemState::Get()->prefs()->GetString(kPrefsOmahaEolDate, + &eol_date)); EXPECT_EQ(kDate, eol_date); } @@ -3090,8 +3085,8 @@ TEST_F(OmahaRequestActionTest, PersistEolBadDateTest) { ASSERT_TRUE(TestUpdateCheck()); string eol_date; - EXPECT_TRUE( - fake_system_state_.prefs()->GetString(kPrefsOmahaEolDate, &eol_date)); + EXPECT_TRUE(FakeSystemState::Get()->prefs()->GetString(kPrefsOmahaEolDate, + &eol_date)); EXPECT_EQ(kEolDateInvalid, StringToEolDate(eol_date)); } diff --git a/cros/omaha_request_builder_xml.h b/cros/omaha_request_builder_xml.h index 0aca7f33..6bbc84e7 100644 --- a/cros/omaha_request_builder_xml.h +++ b/cros/omaha_request_builder_xml.h @@ -33,7 +33,6 @@ #include "update_engine/common/action.h" #include "update_engine/common/http_fetcher.h" -#include "update_engine/common/system_state.h" #include "update_engine/cros/omaha_request_params.h" #include "update_engine/cros/omaha_response.h" diff --git a/cros/omaha_request_builder_xml_unittest.cc b/cros/omaha_request_builder_xml_unittest.cc index c04c9944..74d616d3 100644 --- a/cros/omaha_request_builder_xml_unittest.cc +++ b/cros/omaha_request_builder_xml_unittest.cc @@ -61,10 +61,9 @@ static size_t CountSubstringInString(const string& str, const string& substr) { class OmahaRequestBuilderXmlTest : public ::testing::Test { protected: - void SetUp() override {} + void SetUp() override { FakeSystemState::CreateInstance(); } void TearDown() override {} - FakeSystemState fake_system_state_; static constexpr size_t kGuidSize = 36; }; @@ -94,7 +93,7 @@ TEST_F(OmahaRequestBuilderXmlTest, XmlEncodeWithDefaultTest) { } TEST_F(OmahaRequestBuilderXmlTest, PlatformGetAppTest) { - OmahaRequestParams omaha_request_params{&fake_system_state_}; + OmahaRequestParams omaha_request_params; omaha_request_params.set_device_requisition("device requisition"); OmahaRequestBuilderXml omaha_request{nullptr, &omaha_request_params, @@ -103,7 +102,7 @@ TEST_F(OmahaRequestBuilderXmlTest, PlatformGetAppTest) { 0, 0, 0, - fake_system_state_.prefs(), + FakeSystemState::Get()->prefs(), ""}; OmahaAppData dlc_app_data = {.id = "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX", .version = "", @@ -118,7 +117,7 @@ TEST_F(OmahaRequestBuilderXmlTest, PlatformGetAppTest) { } TEST_F(OmahaRequestBuilderXmlTest, DlcGetAppTest) { - OmahaRequestParams omaha_request_params{&fake_system_state_}; + OmahaRequestParams omaha_request_params; omaha_request_params.set_device_requisition("device requisition"); OmahaRequestBuilderXml omaha_request{nullptr, &omaha_request_params, @@ -127,7 +126,7 @@ TEST_F(OmahaRequestBuilderXmlTest, DlcGetAppTest) { 0, 0, 0, - fake_system_state_.prefs(), + FakeSystemState::Get()->prefs(), ""}; OmahaAppData dlc_app_data = { .id = "_dlc_id", .version = "", .skip_update = false, .is_dlc = true}; @@ -140,7 +139,7 @@ TEST_F(OmahaRequestBuilderXmlTest, DlcGetAppTest) { } TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlRequestIdTest) { - OmahaRequestParams omaha_request_params{&fake_system_state_}; + OmahaRequestParams omaha_request_params; OmahaRequestBuilderXml omaha_request{nullptr, &omaha_request_params, false, @@ -148,7 +147,7 @@ TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlRequestIdTest) { 0, 0, 0, - fake_system_state_.prefs(), + FakeSystemState::Get()->prefs(), ""}; const string request_xml = omaha_request.GetRequest(); const string key = "requestid"; @@ -161,7 +160,7 @@ TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlRequestIdTest) { TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlSessionIdTest) { const string gen_session_id = base::GenerateGUID(); - OmahaRequestParams omaha_request_params{&fake_system_state_}; + OmahaRequestParams omaha_request_params; OmahaRequestBuilderXml omaha_request{nullptr, &omaha_request_params, false, @@ -169,7 +168,7 @@ TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlSessionIdTest) { 0, 0, 0, - fake_system_state_.prefs(), + FakeSystemState::Get()->prefs(), gen_session_id}; const string request_xml = omaha_request.GetRequest(); const string key = "sessionid"; @@ -183,7 +182,7 @@ TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlSessionIdTest) { } TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlPlatformUpdateTest) { - OmahaRequestParams omaha_request_params{&fake_system_state_}; + OmahaRequestParams omaha_request_params; OmahaRequestBuilderXml omaha_request{nullptr, &omaha_request_params, false, @@ -191,7 +190,7 @@ TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlPlatformUpdateTest) { 0, 0, 0, - fake_system_state_.prefs(), + FakeSystemState::Get()->prefs(), ""}; const string request_xml = omaha_request.GetRequest(); EXPECT_EQ(1, CountSubstringInString(request_xml, "prefs(), ""}; const string request_xml = omaha_request.GetRequest(); EXPECT_EQ(3, CountSubstringInString(request_xml, " dlcs = { {omaha_request_params.GetDlcAppId("dlc_no_0"), {.name = "dlc_no_0"}}, {omaha_request_params.GetDlcAppId("dlc_no_1"), {.name = "dlc_no_1"}}}; @@ -231,7 +230,7 @@ TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlDlcInstallationTest) { 0, 0, 0, - fake_system_state_.prefs(), + FakeSystemState::Get()->prefs(), ""}; const string request_xml = omaha_request.GetRequest(); EXPECT_EQ(2, CountSubstringInString(request_xml, "prefs(), ""}; const string request_xml = omaha_request.GetRequest(); EXPECT_EQ(0, CountSubstringInString(request_xml, "prefs(), ""}; const string request_xml = omaha_request.GetRequest(); EXPECT_EQ(1, CountSubstringInString(request_xml, "prefs(), ""}; const string request_xml = omaha_request.GetRequest(); EXPECT_EQ(1, @@ -323,7 +322,7 @@ TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlDlcPingRollCallAndActive) { } TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlUpdateCompleteEvent) { - OmahaRequestParams omaha_request_params{&fake_system_state_}; + OmahaRequestParams omaha_request_params; OmahaEvent event(OmahaEvent::kTypeUpdateComplete); OmahaRequestBuilderXml omaha_request{&event, &omaha_request_params, @@ -332,7 +331,7 @@ TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlUpdateCompleteEvent) { 0, 0, 0, - fake_system_state_.prefs(), + FakeSystemState::Get()->prefs(), ""}; const string request_xml = omaha_request.GetRequest(); LOG(INFO) << request_xml; @@ -345,7 +344,7 @@ TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlUpdateCompleteEvent) { TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlUpdateCompleteEventSomeDlcsExcluded) { - OmahaRequestParams omaha_request_params{&fake_system_state_}; + OmahaRequestParams omaha_request_params; omaha_request_params.set_dlc_apps_params({ {omaha_request_params.GetDlcAppId("dlc_1"), {.updated = true}}, {omaha_request_params.GetDlcAppId("dlc_2"), {.updated = false}}, @@ -358,7 +357,7 @@ TEST_F(OmahaRequestBuilderXmlTest, 0, 0, 0, - fake_system_state_.prefs(), + FakeSystemState::Get()->prefs(), ""}; const string request_xml = omaha_request.GetRequest(); EXPECT_EQ( @@ -376,7 +375,7 @@ TEST_F(OmahaRequestBuilderXmlTest, TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlUpdateCompleteEventAllDlcsExcluded) { - OmahaRequestParams omaha_request_params{&fake_system_state_}; + OmahaRequestParams omaha_request_params; omaha_request_params.set_dlc_apps_params({ {omaha_request_params.GetDlcAppId("dlc_1"), {.updated = false}}, {omaha_request_params.GetDlcAppId("dlc_2"), {.updated = false}}, @@ -389,7 +388,7 @@ TEST_F(OmahaRequestBuilderXmlTest, 0, 0, 0, - fake_system_state_.prefs(), + FakeSystemState::Get()->prefs(), ""}; const string request_xml = omaha_request.GetRequest(); EXPECT_EQ( @@ -406,11 +405,11 @@ TEST_F(OmahaRequestBuilderXmlTest, } TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlDlcCohortMissingCheck) { - OmahaRequestParams omaha_request_params{&fake_system_state_}; + OmahaRequestParams omaha_request_params; constexpr char kDlcId[] = "test-dlc-id"; omaha_request_params.set_dlc_apps_params( {{omaha_request_params.GetDlcAppId(kDlcId), {.name = kDlcId}}}); - auto* mock_prefs = fake_system_state_.mock_prefs(); + auto* mock_prefs = FakeSystemState::Get()->mock_prefs(); OmahaEvent event(OmahaEvent::kTypeUpdateDownloadStarted); OmahaRequestBuilderXml omaha_request{ &event, &omaha_request_params, false, false, 0, 0, 0, mock_prefs, ""}; @@ -439,12 +438,12 @@ TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlDlcCohortMissingCheck) { } TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlDlcCohortCheck) { - OmahaRequestParams omaha_request_params{&fake_system_state_}; + OmahaRequestParams omaha_request_params; const string kDlcId = "test-dlc-id"; omaha_request_params.set_dlc_apps_params( {{omaha_request_params.GetDlcAppId(kDlcId), {.name = kDlcId}}}); FakePrefs fake_prefs; - fake_system_state_.set_prefs(&fake_prefs); + FakeSystemState::Get()->set_prefs(&fake_prefs); OmahaEvent event(OmahaEvent::kTypeUpdateDownloadStarted); OmahaRequestBuilderXml omaha_request{ &event, &omaha_request_params, false, false, 0, 0, 0, &fake_prefs, ""}; diff --git a/cros/omaha_request_params.cc b/cros/omaha_request_params.cc index e7e719bd..0d69b24a 100644 --- a/cros/omaha_request_params.cc +++ b/cros/omaha_request_params.cc @@ -65,8 +65,8 @@ bool OmahaRequestParams::Init(const string& app_version, const string& update_url, const UpdateCheckParams& params) { LOG(INFO) << "Initializing parameters for this update attempt"; - image_props_ = LoadImageProperties(system_state_); - mutable_image_props_ = LoadMutableImageProperties(system_state_); + image_props_ = LoadImageProperties(); + mutable_image_props_ = LoadMutableImageProperties(); // Validation check the channel names. if (!IsValidChannel(image_props_.current_channel)) @@ -84,8 +84,8 @@ bool OmahaRequestParams::Init(const string& app_version, os_sp_ = image_props_.version + "_" + GetMachineType(); app_lang_ = "en-US"; - hwid_ = system_state_->hardware()->GetHardwareClass(); - device_requisition_ = system_state_->hardware()->GetDeviceRequisition(); + hwid_ = SystemState::Get()->hardware()->GetHardwareClass(); + device_requisition_ = SystemState::Get()->hardware()->GetDeviceRequisition(); if (image_props_.current_channel == mutable_image_props_.target_channel) { // deltas are only okay if the /.nodelta file does not exist. if we don't @@ -177,7 +177,7 @@ bool OmahaRequestParams::SetTargetChannel(const string& new_target_channel, new_props.target_channel = new_target_channel; new_props.is_powerwash_allowed = is_powerwash_allowed; - if (!StoreMutableImageProperties(system_state_, new_props)) { + if (!StoreMutableImageProperties(new_props)) { if (error_message) *error_message = "Error storing the new channel value."; return false; diff --git a/cros/omaha_request_params.h b/cros/omaha_request_params.h index fa452ce8..fd4c2e23 100644 --- a/cros/omaha_request_params.h +++ b/cros/omaha_request_params.h @@ -37,8 +37,6 @@ namespace chromeos_update_engine { -class SystemState; - // This class encapsulates the data Omaha gets for the request, along with // essential state needed for the processing of the request/response. The // strings in this struct should not be XML escaped. @@ -47,9 +45,8 @@ class SystemState; // reflect its lifetime more appropriately. class OmahaRequestParams { public: - explicit OmahaRequestParams(SystemState* system_state) - : system_state_(system_state), - os_platform_(constants::kOmahaPlatformName), + OmahaRequestParams() + : os_platform_(constants::kOmahaPlatformName), os_version_(kOsVersion), delta_okay_(true), interactive_(false), @@ -327,9 +324,6 @@ class OmahaRequestParams { // Gets the machine type (e.g. "i686"). std::string GetMachineType() const; - // Global system context. - SystemState* system_state_; - // The system image properties. ImageProperties image_props_; MutableImageProperties mutable_image_props_; diff --git a/cros/omaha_request_params_unittest.cc b/cros/omaha_request_params_unittest.cc index ff52fc2d..fbcd1a3e 100644 --- a/cros/omaha_request_params_unittest.cc +++ b/cros/omaha_request_params_unittest.cc @@ -38,24 +38,24 @@ namespace chromeos_update_engine { class OmahaRequestParamsTest : public ::testing::Test { public: - OmahaRequestParamsTest() : params_(&fake_system_state_) {} + OmahaRequestParamsTest() : params_() {} protected: void SetUp() override { // Create a uniquely named test directory. ASSERT_TRUE(tempdir_.CreateUniqueTempDir()); params_.set_root(tempdir_.GetPath().value()); + FakeSystemState::CreateInstance(); + FakeSystemState::Get()->set_prefs(&fake_prefs_); SetLockDown(false); - fake_system_state_.set_prefs(&fake_prefs_); } void SetLockDown(bool locked_down) { - fake_system_state_.fake_hardware()->SetIsOfficialBuild(locked_down); - fake_system_state_.fake_hardware()->SetIsNormalBootMode(locked_down); + FakeSystemState::Get()->fake_hardware()->SetIsOfficialBuild(locked_down); + FakeSystemState::Get()->fake_hardware()->SetIsNormalBootMode(locked_down); } - FakeSystemState fake_system_state_; - OmahaRequestParams params_{&fake_system_state_}; + OmahaRequestParams params_; FakePrefs fake_prefs_; base::ScopedTempDir tempdir_; @@ -110,7 +110,7 @@ TEST_F(OmahaRequestParamsTest, NoDeltasTest) { TEST_F(OmahaRequestParamsTest, SetTargetChannelTest) { { - OmahaRequestParams params(&fake_system_state_); + OmahaRequestParams params; params.set_root(tempdir_.GetPath().value()); EXPECT_TRUE(params.Init("", "", {})); EXPECT_TRUE(params.SetTargetChannel("canary-channel", false, nullptr)); @@ -124,7 +124,7 @@ TEST_F(OmahaRequestParamsTest, SetTargetChannelTest) { TEST_F(OmahaRequestParamsTest, SetIsPowerwashAllowedTest) { { - OmahaRequestParams params(&fake_system_state_); + OmahaRequestParams params; params.set_root(tempdir_.GetPath().value()); EXPECT_TRUE(params.Init("", "", {})); EXPECT_TRUE(params.SetTargetChannel("canary-channel", true, nullptr)); @@ -138,7 +138,7 @@ TEST_F(OmahaRequestParamsTest, SetIsPowerwashAllowedTest) { TEST_F(OmahaRequestParamsTest, SetTargetChannelInvalidTest) { { - OmahaRequestParams params(&fake_system_state_); + OmahaRequestParams params; params.set_root(tempdir_.GetPath().value()); SetLockDown(true); EXPECT_TRUE(params.Init("", "", {})); diff --git a/cros/omaha_response_handler_action.cc b/cros/omaha_response_handler_action.cc index 52142a34..6a51c776 100644 --- a/cros/omaha_response_handler_action.cc +++ b/cros/omaha_response_handler_action.cc @@ -27,6 +27,7 @@ #include "update_engine/common/constants.h" #include "update_engine/common/hardware_interface.h" #include "update_engine/common/prefs_interface.h" +#include "update_engine/common/system_state.h" #include "update_engine/common/utils.h" #include "update_engine/cros/connection_manager_interface.h" #include "update_engine/cros/omaha_request_params.h" @@ -43,10 +44,8 @@ using std::string; namespace chromeos_update_engine { -OmahaResponseHandlerAction::OmahaResponseHandlerAction( - SystemState* system_state) - : system_state_(system_state), - deadline_file_(constants::kOmahaResponseDeadlineFile) {} +OmahaResponseHandlerAction::OmahaResponseHandlerAction() + : deadline_file_(constants::kOmahaResponseDeadlineFile) {} void OmahaResponseHandlerAction::PerformAction() { CHECK(HasInputObject()); @@ -60,7 +59,7 @@ void OmahaResponseHandlerAction::PerformAction() { // All decisions as to which URL should be used have already been done. So, // make the current URL as the download URL. - string current_url = system_state_->payload_state()->GetCurrentUrl(); + string current_url = SystemState::Get()->payload_state()->GetCurrentUrl(); if (current_url.empty()) { // This shouldn't happen as we should always supply the HTTPS backup URL. // Handling this anyway, just in case. @@ -76,8 +75,9 @@ void OmahaResponseHandlerAction::PerformAction() { install_plan_.download_url = current_url; install_plan_.version = response.version; - OmahaRequestParams* const params = system_state_->request_params(); - PayloadStateInterface* const payload_state = system_state_->payload_state(); + OmahaRequestParams* const params = SystemState::Get()->request_params(); + PayloadStateInterface* const payload_state = + SystemState::Get()->payload_state(); // If we're using p2p to download and there is a local peer, use it. if (payload_state->GetUsingP2PForDownloading() && @@ -114,25 +114,28 @@ void OmahaResponseHandlerAction::PerformAction() { install_plan_.public_key_rsa = response.public_key_rsa; install_plan_.hash_checks_mandatory = AreHashChecksMandatory(response); install_plan_.is_resume = DeltaPerformer::CanResumeUpdate( - system_state_->prefs(), update_check_response_hash); + SystemState::Get()->prefs(), update_check_response_hash); if (install_plan_.is_resume) { payload_state->UpdateResumed(); } else { payload_state->UpdateRestarted(); LOG_IF(WARNING, - !DeltaPerformer::ResetUpdateProgress(system_state_->prefs(), false)) + !DeltaPerformer::ResetUpdateProgress(SystemState::Get()->prefs(), + false)) << "Unable to reset the update progress."; LOG_IF(WARNING, - !system_state_->prefs()->SetString(kPrefsUpdateCheckResponseHash, - update_check_response_hash)) + !SystemState::Get()->prefs()->SetString( + kPrefsUpdateCheckResponseHash, update_check_response_hash)) << "Unable to save the update check response hash."; } if (params->is_install()) { - install_plan_.target_slot = system_state_->boot_control()->GetCurrentSlot(); + install_plan_.target_slot = + SystemState::Get()->boot_control()->GetCurrentSlot(); install_plan_.source_slot = BootControlInterface::kInvalidSlot; } else { - install_plan_.source_slot = system_state_->boot_control()->GetCurrentSlot(); + install_plan_.source_slot = + SystemState::Get()->boot_control()->GetCurrentSlot(); install_plan_.target_slot = install_plan_.source_slot == 0 ? 1 : 0; } @@ -142,8 +145,8 @@ void OmahaResponseHandlerAction::PerformAction() { // downloaded from. string current_channel_key = kPrefsChannelOnSlotPrefix + std::to_string(install_plan_.target_slot); - system_state_->prefs()->SetString(current_channel_key, - params->download_channel()); + SystemState::Get()->prefs()->SetString(current_channel_key, + params->download_channel()); // Checking whether device is able to boot up the returned rollback image. if (response.is_rollback) { @@ -155,9 +158,9 @@ void OmahaResponseHandlerAction::PerformAction() { // Calculate the values on the version values on current device. auto min_kernel_key_version = static_cast( - system_state_->hardware()->GetMinKernelKeyVersion()); + SystemState::Get()->hardware()->GetMinKernelKeyVersion()); auto min_firmware_key_version = static_cast( - system_state_->hardware()->GetMinFirmwareKeyVersion()); + SystemState::Get()->hardware()->GetMinFirmwareKeyVersion()); uint32_t kernel_key_version = static_cast(response.rollback_key_version.kernel_key) << 16 | @@ -210,8 +213,8 @@ void OmahaResponseHandlerAction::PerformAction() { install_plan_.powerwash_required = true; // Always try to preserve enrollment and wifi data for enrolled devices. install_plan_.rollback_data_save_requested = - system_state_ && system_state_->device_policy() && - system_state_->device_policy()->IsEnterpriseEnrolled(); + SystemState::Get()->device_policy() && + SystemState::Get()->device_policy()->IsEnterpriseEnrolled(); } } @@ -244,7 +247,7 @@ void OmahaResponseHandlerAction::PerformAction() { // Check the generated install-plan with the Policy to confirm that // it can be applied at this time (or at all). - UpdateManager* const update_manager = system_state_->update_manager(); + UpdateManager* const update_manager = SystemState::Get()->update_manager(); CHECK(update_manager); auto ec = ErrorCode::kSuccess; update_manager->PolicyRequest( @@ -285,7 +288,7 @@ void OmahaResponseHandlerAction::PerformAction() { << " max_firmware_rollforward=" << max_firmware_rollforward << " rollback_allowed_milestones=" << params->rollback_allowed_milestones(); - system_state_->hardware()->SetMaxKernelKeyRollforward( + SystemState::Get()->hardware()->SetMaxKernelKeyRollforward( max_kernel_rollforward); // TODO(crbug/783998): Set max firmware rollforward when implemented. } @@ -294,7 +297,8 @@ void OmahaResponseHandlerAction::PerformAction() { << " to infinity"; // When rollback is not allowed, explicitly set the max roll forward to // infinity. - system_state_->hardware()->SetMaxKernelKeyRollforward(kRollforwardInfinity); + SystemState::Get()->hardware()->SetMaxKernelKeyRollforward( + kRollforwardInfinity); // TODO(crbug/783998): Set max firmware rollforward when implemented. } } @@ -314,8 +318,8 @@ bool OmahaResponseHandlerAction::AreHashChecksMandatory( // devmode/debugd checks pass, in which case the hash is waived. // * Dev/test image: // - Any URL is allowed through with no hash checking. - if (!system_state_->request_params()->IsUpdateUrlOfficial() || - !system_state_->hardware()->IsOfficialBuild()) { + if (!SystemState::Get()->request_params()->IsUpdateUrlOfficial() || + !SystemState::Get()->hardware()->IsOfficialBuild()) { // Still do a hash check if a public key is included. if (!response.public_key_rsa.empty()) { // The autoupdate_CatchBadSignatures test checks for this string diff --git a/cros/omaha_response_handler_action.h b/cros/omaha_response_handler_action.h index f3b821ef..9842c942 100644 --- a/cros/omaha_response_handler_action.h +++ b/cros/omaha_response_handler_action.h @@ -22,7 +22,6 @@ #include // for FRIEND_TEST #include "update_engine/common/action.h" -#include "update_engine/common/system_state.h" #include "update_engine/cros/omaha_request_action.h" #include "update_engine/payload_consumer/install_plan.h" @@ -42,7 +41,7 @@ class ActionTraits { class OmahaResponseHandlerAction : public Action { public: - explicit OmahaResponseHandlerAction(SystemState* system_state); + OmahaResponseHandlerAction(); typedef ActionTraits::InputObjectType InputObjectType; @@ -65,9 +64,6 @@ class OmahaResponseHandlerAction : public Action { // of the system and the contents of the Omaha response. False otherwise. bool AreHashChecksMandatory(const OmahaResponse& response); - // Global system context. - SystemState* system_state_; - // The install plan, if we have an update. InstallPlan install_plan_; diff --git a/cros/omaha_response_handler_action_unittest.cc b/cros/omaha_response_handler_action_unittest.cc index 74f4d048..8750724e 100644 --- a/cros/omaha_response_handler_action_unittest.cc +++ b/cros/omaha_response_handler_action_unittest.cc @@ -80,7 +80,9 @@ class OmahaResponseHandlerActionProcessorDelegate class OmahaResponseHandlerActionTest : public ::testing::Test { protected: void SetUp() override { - FakeBootControl* fake_boot_control = fake_system_state_.fake_boot_control(); + FakeSystemState::CreateInstance(); + FakeBootControl* fake_boot_control = + FakeSystemState::Get()->fake_boot_control(); fake_boot_control->SetPartitionDevice(kPartitionNameKernel, 0, "/dev/sdz2"); fake_boot_control->SetPartitionDevice(kPartitionNameRoot, 0, "/dev/sdz3"); fake_boot_control->SetPartitionDevice(kPartitionNameKernel, 1, "/dev/sdz4"); @@ -100,7 +102,6 @@ class OmahaResponseHandlerActionTest : public ::testing::Test { // it in non-success cases. ErrorCode action_result_code_; - FakeSystemState fake_system_state_; // "Hash+" const brillo::Blob expected_hash_ = {0x48, 0x61, 0x73, 0x68, 0x2b}; }; @@ -136,25 +137,25 @@ bool OmahaResponseHandlerActionTest::DoTest(const OmahaResponse& in, string expected_hash; for (const auto& package : in.packages) expected_hash += package.hash + ":"; - EXPECT_CALL(*(fake_system_state_.mock_prefs()), + EXPECT_CALL(*(FakeSystemState::Get()->mock_prefs()), SetString(kPrefsUpdateCheckResponseHash, expected_hash)) .WillOnce(Return(true)); int slot = - fake_system_state_.request_params()->is_install() - ? fake_system_state_.fake_boot_control()->GetCurrentSlot() - : 1 - fake_system_state_.fake_boot_control()->GetCurrentSlot(); + FakeSystemState::Get()->request_params()->is_install() + ? FakeSystemState::Get()->fake_boot_control()->GetCurrentSlot() + : 1 - FakeSystemState::Get()->fake_boot_control()->GetCurrentSlot(); string key = kPrefsChannelOnSlotPrefix + std::to_string(slot); - EXPECT_CALL(*(fake_system_state_.mock_prefs()), SetString(key, testing::_)) + EXPECT_CALL(*(FakeSystemState::Get()->mock_prefs()), + SetString(key, testing::_)) .WillOnce(Return(true)); } string current_url = in.packages.size() ? in.packages[0].payload_urls[0] : ""; - EXPECT_CALL(*(fake_system_state_.mock_payload_state()), GetCurrentUrl()) + EXPECT_CALL(*(FakeSystemState::Get()->mock_payload_state()), GetCurrentUrl()) .WillRepeatedly(Return(current_url)); - auto response_handler_action = - std::make_unique(&fake_system_state_); + auto response_handler_action = std::make_unique(); if (!test_deadline_file.empty()) response_handler_action->deadline_file_ = test_deadline_file; @@ -225,7 +226,7 @@ TEST_F(OmahaResponseHandlerActionTest, SimpleTest) { in.prompt = true; InstallPlan install_plan; // Set the other slot as current. - fake_system_state_.fake_boot_control()->SetCurrentSlot(1); + FakeSystemState::Get()->fake_boot_control()->SetCurrentSlot(1); EXPECT_TRUE(DoTest(in, test_deadline_file.path(), &install_plan)); EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url); EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash); @@ -250,10 +251,10 @@ TEST_F(OmahaResponseHandlerActionTest, SimpleTest) { in.prompt = true; in.deadline = "some-deadline"; InstallPlan install_plan; - fake_system_state_.fake_boot_control()->SetCurrentSlot(0); + FakeSystemState::Get()->fake_boot_control()->SetCurrentSlot(0); // Because rollback happened, the deadline shouldn't be written into the // file. - EXPECT_CALL(*(fake_system_state_.mock_payload_state()), + EXPECT_CALL(*(FakeSystemState::Get()->mock_payload_state()), GetRollbackHappened()) .WillOnce(Return(true)); EXPECT_TRUE(DoTest(in, test_deadline_file.path(), &install_plan)); @@ -280,8 +281,8 @@ TEST_F(OmahaResponseHandlerActionTest, SimpleTest) { in.prompt = true; in.deadline = "some-deadline"; InstallPlan install_plan; - fake_system_state_.fake_boot_control()->SetCurrentSlot(0); - EXPECT_CALL(*(fake_system_state_.mock_payload_state()), + FakeSystemState::Get()->fake_boot_control()->SetCurrentSlot(0); + EXPECT_CALL(*(FakeSystemState::Get()->mock_payload_state()), GetRollbackHappened()) .WillOnce(Return(false)); EXPECT_TRUE(DoTest(in, test_deadline_file.path(), &install_plan)); @@ -315,10 +316,10 @@ TEST_F(OmahaResponseHandlerActionTest, InstallTest) { {.payload_urls = {kLongName}, .size = 2, .hash = kPayloadHashHex}); in.more_info_url = "http://more/info"; - OmahaRequestParams params(&fake_system_state_); + OmahaRequestParams params; params.set_is_install(true); - fake_system_state_.set_request_params(¶ms); + FakeSystemState::Get()->set_request_params(¶ms); InstallPlan install_plan; EXPECT_TRUE(DoTest(in, "", &install_plan)); EXPECT_EQ(install_plan.source_slot, UINT_MAX); @@ -366,7 +367,7 @@ TEST_F(OmahaResponseHandlerActionTest, HashChecksForHttpTest) { .fp = kPayloadFp1}); in.more_info_url = "http://more/info"; // Hash checks are always skipped for non-official update URLs. - EXPECT_CALL(*(fake_system_state_.mock_request_params()), + EXPECT_CALL(*(FakeSystemState::Get()->mock_request_params()), IsUpdateUrlOfficial()) .WillRepeatedly(Return(true)); InstallPlan install_plan; @@ -390,7 +391,7 @@ TEST_F(OmahaResponseHandlerActionTest, HashChecksForUnofficialUpdateUrl) { .app_id = kPayloadAppId, .fp = kPayloadFp1}); in.more_info_url = "http://more/info"; - EXPECT_CALL(*(fake_system_state_.mock_request_params()), + EXPECT_CALL(*(FakeSystemState::Get()->mock_request_params()), IsUpdateUrlOfficial()) .WillRepeatedly(Return(false)); InstallPlan install_plan; @@ -416,10 +417,10 @@ TEST_F(OmahaResponseHandlerActionTest, .app_id = kPayloadAppId, .fp = kPayloadFp1}); in.more_info_url = "http://more/info"; - EXPECT_CALL(*(fake_system_state_.mock_request_params()), + EXPECT_CALL(*(FakeSystemState::Get()->mock_request_params()), IsUpdateUrlOfficial()) .WillRepeatedly(Return(true)); - fake_system_state_.fake_hardware()->SetIsOfficialBuild(false); + FakeSystemState::Get()->fake_hardware()->SetIsOfficialBuild(false); InstallPlan install_plan; EXPECT_TRUE(DoTest(in, "", &install_plan)); EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url); @@ -441,7 +442,7 @@ TEST_F(OmahaResponseHandlerActionTest, HashChecksForHttpsTest) { .app_id = kPayloadAppId, .fp = kPayloadFp1}); in.more_info_url = "http://more/info"; - EXPECT_CALL(*(fake_system_state_.mock_request_params()), + EXPECT_CALL(*(FakeSystemState::Get()->mock_request_params()), IsUpdateUrlOfficial()) .WillRepeatedly(Return(true)); InstallPlan install_plan; @@ -466,7 +467,7 @@ TEST_F(OmahaResponseHandlerActionTest, HashChecksForBothHttpAndHttpsTest) { .app_id = kPayloadAppId, .fp = kPayloadFp1}); in.more_info_url = "http://more/info"; - EXPECT_CALL(*(fake_system_state_.mock_request_params()), + EXPECT_CALL(*(FakeSystemState::Get()->mock_request_params()), IsUpdateUrlOfficial()) .WillRepeatedly(Return(true)); InstallPlan install_plan; @@ -493,15 +494,15 @@ TEST_F(OmahaResponseHandlerActionTest, base::ScopedTempDir tempdir; ASSERT_TRUE(tempdir.CreateUniqueTempDir()); - OmahaRequestParams params(&fake_system_state_); - fake_system_state_.fake_hardware()->SetIsOfficialBuild(false); + OmahaRequestParams params; + FakeSystemState::Get()->fake_hardware()->SetIsOfficialBuild(false); params.set_root(tempdir.GetPath().value()); params.set_current_channel("canary-channel"); EXPECT_TRUE(params.SetTargetChannel("stable-channel", true, nullptr)); params.UpdateDownloadChannel(); params.set_app_version("2.0.0.0"); - fake_system_state_.set_request_params(¶ms); + FakeSystemState::Get()->set_request_params(¶ms); InstallPlan install_plan; EXPECT_TRUE(DoTest(in, "", &install_plan)); EXPECT_TRUE(install_plan.powerwash_required); @@ -521,15 +522,15 @@ TEST_F(OmahaResponseHandlerActionTest, base::ScopedTempDir tempdir; ASSERT_TRUE(tempdir.CreateUniqueTempDir()); - OmahaRequestParams params(&fake_system_state_); - fake_system_state_.fake_hardware()->SetIsOfficialBuild(false); + OmahaRequestParams params; + FakeSystemState::Get()->fake_hardware()->SetIsOfficialBuild(false); params.set_root(tempdir.GetPath().value()); params.set_current_channel("canary-channel"); EXPECT_TRUE(params.SetTargetChannel("stable-channel", false, nullptr)); params.UpdateDownloadChannel(); params.set_app_version("2.0.0.0"); - fake_system_state_.set_request_params(¶ms); + FakeSystemState::Get()->set_request_params(¶ms); InstallPlan install_plan; EXPECT_TRUE(DoTest(in, "", &install_plan)); EXPECT_FALSE(install_plan.powerwash_required); @@ -549,15 +550,15 @@ TEST_F(OmahaResponseHandlerActionTest, base::ScopedTempDir tempdir; ASSERT_TRUE(tempdir.CreateUniqueTempDir()); - OmahaRequestParams params(&fake_system_state_); - fake_system_state_.fake_hardware()->SetIsOfficialBuild(false); + OmahaRequestParams params; + FakeSystemState::Get()->fake_hardware()->SetIsOfficialBuild(false); params.set_root(tempdir.GetPath().value()); params.set_current_channel("beta-channel"); EXPECT_TRUE(params.SetTargetChannel("stable-channel", true, nullptr)); params.UpdateDownloadChannel(); params.set_app_version("12345.48.0.0"); - fake_system_state_.set_request_params(¶ms); + FakeSystemState::Get()->set_request_params(¶ms); InstallPlan install_plan; EXPECT_TRUE(DoTest(in, "", &install_plan)); EXPECT_FALSE(install_plan.powerwash_required); @@ -577,15 +578,15 @@ TEST_F(OmahaResponseHandlerActionTest, base::ScopedTempDir tempdir; ASSERT_TRUE(tempdir.CreateUniqueTempDir()); - OmahaRequestParams params(&fake_system_state_); - fake_system_state_.fake_hardware()->SetIsOfficialBuild(false); + OmahaRequestParams params; + FakeSystemState::Get()->fake_hardware()->SetIsOfficialBuild(false); params.set_root(tempdir.GetPath().value()); params.set_current_channel("beta-channel"); EXPECT_TRUE(params.SetTargetChannel("stable-channel", true, nullptr)); params.UpdateDownloadChannel(); params.set_app_version("12345.0.0.0"); - fake_system_state_.set_request_params(¶ms); + FakeSystemState::Get()->set_request_params(¶ms); InstallPlan install_plan; EXPECT_TRUE(DoTest(in, "", &install_plan)); EXPECT_FALSE(install_plan.powerwash_required); @@ -608,8 +609,8 @@ TEST_F(OmahaResponseHandlerActionTest, base::ScopedTempDir tempdir; ASSERT_TRUE(tempdir.CreateUniqueTempDir()); - OmahaRequestParams params(&fake_system_state_); - fake_system_state_.fake_hardware()->SetIsOfficialBuild(true); + OmahaRequestParams params; + FakeSystemState::Get()->fake_hardware()->SetIsOfficialBuild(true); params.set_root(tempdir.GetPath().value()); params.set_current_channel("beta-channel"); EXPECT_TRUE(params.SetTargetChannel("stable-channel", true, nullptr)); @@ -619,9 +620,9 @@ TEST_F(OmahaResponseHandlerActionTest, testing::NiceMock mock_device_policy; EXPECT_CALL(mock_device_policy, IsEnterpriseEnrolled()) .WillOnce(Return(true)); - fake_system_state_.set_device_policy(&mock_device_policy); + FakeSystemState::Get()->set_device_policy(&mock_device_policy); - fake_system_state_.set_request_params(¶ms); + FakeSystemState::Get()->set_request_params(¶ms); InstallPlan install_plan; EXPECT_TRUE(DoTest(in, "", &install_plan)); EXPECT_TRUE(install_plan.rollback_data_save_requested); @@ -642,8 +643,8 @@ TEST_F(OmahaResponseHandlerActionTest, base::ScopedTempDir tempdir; ASSERT_TRUE(tempdir.CreateUniqueTempDir()); - OmahaRequestParams params(&fake_system_state_); - fake_system_state_.fake_hardware()->SetIsOfficialBuild(true); + OmahaRequestParams params; + FakeSystemState::Get()->fake_hardware()->SetIsOfficialBuild(true); params.set_root(tempdir.GetPath().value()); params.set_current_channel("beta-channel"); EXPECT_TRUE(params.SetTargetChannel("stable-channel", true, nullptr)); @@ -653,9 +654,9 @@ TEST_F(OmahaResponseHandlerActionTest, testing::NiceMock mock_device_policy; EXPECT_CALL(mock_device_policy, IsEnterpriseEnrolled()) .WillOnce(Return(false)); - fake_system_state_.set_device_policy(&mock_device_policy); + FakeSystemState::Get()->set_device_policy(&mock_device_policy); - fake_system_state_.set_request_params(¶ms); + FakeSystemState::Get()->set_request_params(¶ms); InstallPlan install_plan; EXPECT_TRUE(DoTest(in, "", &install_plan)); EXPECT_FALSE(install_plan.rollback_data_save_requested); @@ -675,15 +676,15 @@ TEST_F(OmahaResponseHandlerActionTest, base::ScopedTempDir tempdir; ASSERT_TRUE(tempdir.CreateUniqueTempDir()); - OmahaRequestParams params(&fake_system_state_); - fake_system_state_.fake_hardware()->SetIsOfficialBuild(true); + OmahaRequestParams params; + FakeSystemState::Get()->fake_hardware()->SetIsOfficialBuild(true); params.set_root(tempdir.GetPath().value()); params.set_current_channel("beta-channel"); EXPECT_TRUE(params.SetTargetChannel("stable-channel", false, nullptr)); params.UpdateDownloadChannel(); params.set_app_version("12347.48.0.0"); - fake_system_state_.set_request_params(¶ms); + FakeSystemState::Get()->set_request_params(¶ms); InstallPlan install_plan; EXPECT_TRUE(DoTest(in, "", &install_plan)); EXPECT_FALSE(install_plan.rollback_data_save_requested); @@ -703,15 +704,15 @@ TEST_F(OmahaResponseHandlerActionTest, base::ScopedTempDir tempdir; ASSERT_TRUE(tempdir.CreateUniqueTempDir()); - OmahaRequestParams params(&fake_system_state_); - fake_system_state_.fake_hardware()->SetIsOfficialBuild(false); + OmahaRequestParams params; + FakeSystemState::Get()->fake_hardware()->SetIsOfficialBuild(false); params.set_root(tempdir.GetPath().value()); params.set_current_channel("stable-channel"); EXPECT_TRUE(params.SetTargetChannel("canary-channel", false, nullptr)); params.UpdateDownloadChannel(); params.set_app_version("1.0.0.0"); - fake_system_state_.set_request_params(¶ms); + FakeSystemState::Get()->set_request_params(¶ms); InstallPlan install_plan; EXPECT_TRUE(DoTest(in, "", &install_plan)); EXPECT_FALSE(install_plan.powerwash_required); @@ -729,20 +730,20 @@ TEST_F(OmahaResponseHandlerActionTest, P2PUrlIsUsedAndHashChecksMandatory) { .fp = kPayloadFp1}); in.more_info_url = "http://more/info"; - OmahaRequestParams params(&fake_system_state_); + OmahaRequestParams params; // We're using a real OmahaRequestParams object here so we can't mock // IsUpdateUrlOfficial(), but setting the update URL to the AutoUpdate test // server will cause IsUpdateUrlOfficial() to return true. params.set_update_url(constants::kOmahaDefaultAUTestURL); - fake_system_state_.set_request_params(¶ms); + FakeSystemState::Get()->set_request_params(¶ms); - EXPECT_CALL(*fake_system_state_.mock_payload_state(), + EXPECT_CALL(*FakeSystemState::Get()->mock_payload_state(), SetUsingP2PForDownloading(true)); string p2p_url = "http://9.8.7.6/p2p"; - EXPECT_CALL(*fake_system_state_.mock_payload_state(), GetP2PUrl()) + EXPECT_CALL(*FakeSystemState::Get()->mock_payload_state(), GetP2PUrl()) .WillRepeatedly(Return(p2p_url)); - EXPECT_CALL(*fake_system_state_.mock_payload_state(), + EXPECT_CALL(*FakeSystemState::Get()->mock_payload_state(), GetUsingP2PForDownloading()) .WillRepeatedly(Return(true)); @@ -777,17 +778,18 @@ TEST_F(OmahaResponseHandlerActionTest, RollbackTest) { in.past_rollback_key_version = m4; - fake_system_state_.fake_hardware()->SetMinKernelKeyVersion(0x00010002); - fake_system_state_.fake_hardware()->SetMinFirmwareKeyVersion(0x00030004); + FakeSystemState::Get()->fake_hardware()->SetMinKernelKeyVersion(0x00010002); + FakeSystemState::Get()->fake_hardware()->SetMinFirmwareKeyVersion(0x00030004); - fake_system_state_.fake_hardware()->SetMaxKernelKeyRollforward(0xaaaaaaaa); + FakeSystemState::Get()->fake_hardware()->SetMaxKernelKeyRollforward( + 0xaaaaaaaa); // TODO(crbug/783998): Add support for firmware when implemented. - OmahaRequestParams params(&fake_system_state_); + OmahaRequestParams params; params.set_rollback_allowed(true); params.set_rollback_allowed_milestones(4); - fake_system_state_.set_request_params(¶ms); + FakeSystemState::Get()->set_request_params(¶ms); InstallPlan install_plan; EXPECT_TRUE(DoTest(in, "", &install_plan)); EXPECT_TRUE(install_plan.is_rollback); @@ -797,8 +799,9 @@ TEST_F(OmahaResponseHandlerActionTest, RollbackTest) { const uint32_t expected_max_kernel_rollforward = static_cast(m4.kernel_key) << 16 | static_cast(m4.kernel); - EXPECT_EQ(expected_max_kernel_rollforward, - fake_system_state_.fake_hardware()->GetMaxKernelKeyRollforward()); + EXPECT_EQ( + expected_max_kernel_rollforward, + FakeSystemState::Get()->fake_hardware()->GetMaxKernelKeyRollforward()); // TODO(crbug/783998): Add support for firmware when implemented. } @@ -821,23 +824,24 @@ TEST_F(OmahaResponseHandlerActionTest, RollbackKernelVersionErrorTest) { m4.kernel = 13; in.past_rollback_key_version = m4; - fake_system_state_.fake_hardware()->SetMinKernelKeyVersion(0x00010002); - fake_system_state_.fake_hardware()->SetMinFirmwareKeyVersion(0x00030004); + FakeSystemState::Get()->fake_hardware()->SetMinKernelKeyVersion(0x00010002); + FakeSystemState::Get()->fake_hardware()->SetMinFirmwareKeyVersion(0x00030004); const uint32_t current_kernel_max_rollforward = 0xaaaaaaaa; - fake_system_state_.fake_hardware()->SetMaxKernelKeyRollforward( + FakeSystemState::Get()->fake_hardware()->SetMaxKernelKeyRollforward( current_kernel_max_rollforward); - OmahaRequestParams params(&fake_system_state_); + OmahaRequestParams params; params.set_rollback_allowed(true); params.set_rollback_allowed_milestones(4); - fake_system_state_.set_request_params(¶ms); + FakeSystemState::Get()->set_request_params(¶ms); InstallPlan install_plan; EXPECT_FALSE(DoTest(in, "", &install_plan)); // Max rollforward is not changed in error cases. - EXPECT_EQ(current_kernel_max_rollforward, - fake_system_state_.fake_hardware()->GetMaxKernelKeyRollforward()); + EXPECT_EQ( + current_kernel_max_rollforward, + FakeSystemState::Get()->fake_hardware()->GetMaxKernelKeyRollforward()); // TODO(crbug/783998): Add support for firmware when implemented. } @@ -855,14 +859,14 @@ TEST_F(OmahaResponseHandlerActionTest, RollbackFirmwareVersionErrorTest) { in.rollback_key_version.firmware_key = 3; in.rollback_key_version.firmware = 3; // This is lower than the minimum. - fake_system_state_.fake_hardware()->SetMinKernelKeyVersion(0x00010002); - fake_system_state_.fake_hardware()->SetMinFirmwareKeyVersion(0x00030004); + FakeSystemState::Get()->fake_hardware()->SetMinKernelKeyVersion(0x00010002); + FakeSystemState::Get()->fake_hardware()->SetMinFirmwareKeyVersion(0x00030004); - OmahaRequestParams params(&fake_system_state_); + OmahaRequestParams params; params.set_rollback_allowed(true); params.set_rollback_allowed_milestones(4); - fake_system_state_.set_request_params(¶ms); + FakeSystemState::Get()->set_request_params(¶ms); InstallPlan install_plan; EXPECT_FALSE(DoTest(in, "", &install_plan)); } @@ -876,21 +880,22 @@ TEST_F(OmahaResponseHandlerActionTest, RollbackNotRollbackTest) { in.is_rollback = false; const uint32_t current_kernel_max_rollforward = 0xaaaaaaaa; - fake_system_state_.fake_hardware()->SetMaxKernelKeyRollforward( + FakeSystemState::Get()->fake_hardware()->SetMaxKernelKeyRollforward( current_kernel_max_rollforward); - OmahaRequestParams params(&fake_system_state_); + OmahaRequestParams params; params.set_rollback_allowed(true); params.set_rollback_allowed_milestones(4); - fake_system_state_.set_request_params(¶ms); + FakeSystemState::Get()->set_request_params(¶ms); InstallPlan install_plan; EXPECT_TRUE(DoTest(in, "", &install_plan)); EXPECT_FALSE(install_plan.is_rollback); // Max rollforward is not changed for non-rollback cases. - EXPECT_EQ(current_kernel_max_rollforward, - fake_system_state_.fake_hardware()->GetMaxKernelKeyRollforward()); + EXPECT_EQ( + current_kernel_max_rollforward, + FakeSystemState::Get()->fake_hardware()->GetMaxKernelKeyRollforward()); // TODO(crbug/783998): Add support for firmware when implemented. } @@ -902,21 +907,22 @@ TEST_F(OmahaResponseHandlerActionTest, RollbackNotAllowedTest) { .hash = kPayloadHashHex}); in.is_rollback = true; - OmahaRequestParams params(&fake_system_state_); + OmahaRequestParams params; params.set_rollback_allowed(false); params.set_rollback_allowed_milestones(4); const uint32_t current_kernel_max_rollforward = 0xaaaaaaaa; - fake_system_state_.fake_hardware()->SetMaxKernelKeyRollforward( + FakeSystemState::Get()->fake_hardware()->SetMaxKernelKeyRollforward( current_kernel_max_rollforward); - fake_system_state_.set_request_params(¶ms); + FakeSystemState::Get()->set_request_params(¶ms); InstallPlan install_plan; EXPECT_FALSE(DoTest(in, "", &install_plan)); // This case generates an error so, do not update max rollforward. - EXPECT_EQ(current_kernel_max_rollforward, - fake_system_state_.fake_hardware()->GetMaxKernelKeyRollforward()); + EXPECT_EQ( + current_kernel_max_rollforward, + FakeSystemState::Get()->fake_hardware()->GetMaxKernelKeyRollforward()); // TODO(crbug/783998): Add support for firmware when implemented. } @@ -928,21 +934,22 @@ TEST_F(OmahaResponseHandlerActionTest, NormalUpdateWithZeroMilestonesAllowed) { .hash = kPayloadHashHex}); in.is_rollback = false; - OmahaRequestParams params(&fake_system_state_); + OmahaRequestParams params; params.set_rollback_allowed(true); params.set_rollback_allowed_milestones(0); const uint32_t current_kernel_max_rollforward = 0xaaaaaaaa; - fake_system_state_.fake_hardware()->SetMaxKernelKeyRollforward( + FakeSystemState::Get()->fake_hardware()->SetMaxKernelKeyRollforward( current_kernel_max_rollforward); - fake_system_state_.set_request_params(¶ms); + FakeSystemState::Get()->set_request_params(¶ms); InstallPlan install_plan; EXPECT_TRUE(DoTest(in, "", &install_plan)); // When allowed_milestones is 0, this is set to infinity. - EXPECT_EQ(kRollforwardInfinity, - fake_system_state_.fake_hardware()->GetMaxKernelKeyRollforward()); + EXPECT_EQ( + kRollforwardInfinity, + FakeSystemState::Get()->fake_hardware()->GetMaxKernelKeyRollforward()); // TODO(crbug/783998): Add support for firmware when implemented. } @@ -989,7 +996,7 @@ TEST_F(OmahaResponseHandlerActionTest, TestDeferredByPolicy) { FakeClock fake_clock; MockPolicy* mock_policy = new MockPolicy(&fake_clock); FakeUpdateManager* fake_update_manager = - fake_system_state_.fake_update_manager(); + FakeSystemState::Get()->fake_update_manager(); fake_update_manager->set_policy(mock_policy); EXPECT_CALL(*mock_policy, UpdateCanBeApplied(_, _, _, _, _)) .WillOnce( diff --git a/cros/payload_state.cc b/cros/payload_state.cc index d2e6851e..d7de6e62 100644 --- a/cros/payload_state.cc +++ b/cros/payload_state.cc @@ -78,11 +78,10 @@ PayloadState::PayloadState() total_bytes_downloaded_[i] = current_bytes_downloaded_[i] = 0; } -bool PayloadState::Initialize(SystemState* system_state) { - system_state_ = system_state; - prefs_ = system_state_->prefs(); - powerwash_safe_prefs_ = system_state_->powerwash_safe_prefs(); - excluder_ = system_state_->update_attempter()->GetExcluder(); +bool PayloadState::Initialize() { + prefs_ = SystemState::Get()->prefs(); + powerwash_safe_prefs_ = SystemState::Get()->powerwash_safe_prefs(); + excluder_ = SystemState::Get()->update_attempter()->GetExcluder(); LoadResponseSignature(); LoadPayloadAttemptNumber(); LoadFullPayloadAttemptNumber(); @@ -197,7 +196,7 @@ void PayloadState::AttemptStarted(AttemptType attempt_type) { attempt_type_ = attempt_type; - ClockInterface* clock = system_state_->clock(); + ClockInterface* clock = SystemState::Get()->clock(); attempt_start_time_boot_ = clock->GetBootTime(); attempt_start_time_monotonic_ = clock->GetMonotonicTime(); attempt_num_bytes_downloaded_ = 0; @@ -206,7 +205,7 @@ void PayloadState::AttemptStarted(AttemptType attempt_type) { ConnectionType network_connection_type; ConnectionTethering tethering; ConnectionManagerInterface* connection_manager = - system_state_->connection_manager(); + SystemState::Get()->connection_manager(); if (!connection_manager->GetConnectionProperties(&network_connection_type, &tethering)) { LOG(ERROR) << "Failed to determine connection type."; @@ -236,7 +235,7 @@ void PayloadState::UpdateRestarted() { void PayloadState::UpdateSucceeded() { // Send the relevant metrics that are tracked in this class to UMA. CalculateUpdateDurationUptime(); - SetUpdateTimestampEnd(system_state_->clock()->GetWallclockTime()); + SetUpdateTimestampEnd(SystemState::Get()->clock()->GetWallclockTime()); switch (attempt_type_) { case AttemptType::kUpdate: @@ -246,7 +245,7 @@ void PayloadState::UpdateSucceeded() { break; case AttemptType::kRollback: - system_state_->metrics_reporter()->ReportRollbackMetrics( + SystemState::Get()->metrics_reporter()->ReportRollbackMetrics( metrics::RollbackResult::kSuccess); break; } @@ -256,7 +255,7 @@ void PayloadState::UpdateSucceeded() { SetNumResponsesSeen(0); SetPayloadIndex(0); - metrics_utils::SetSystemUpdatedMarker(system_state_->clock(), prefs_); + metrics_utils::SetSystemUpdatedMarker(SystemState::Get()->clock(), prefs_); } void PayloadState::UpdateFailed(ErrorCode error) { @@ -279,7 +278,7 @@ void PayloadState::UpdateFailed(ErrorCode error) { break; case AttemptType::kRollback: - system_state_->metrics_reporter()->ReportRollbackMetrics( + SystemState::Get()->metrics_reporter()->ReportRollbackMetrics( metrics::RollbackResult::kFailed); break; } @@ -409,7 +408,7 @@ bool PayloadState::ShouldBackoffDownload() { << "will happen from local peer (via p2p)."; return false; } - if (system_state_->request_params()->interactive()) { + if (SystemState::Get()->request_params()->interactive()) { LOG(INFO) << "Payload backoff disabled for interactive update checks."; return false; } @@ -425,7 +424,7 @@ bool PayloadState::ShouldBackoffDownload() { } } - if (!system_state_->hardware()->IsOfficialBuild() && + if (!SystemState::Get()->hardware()->IsOfficialBuild() && !prefs_->Exists(kPrefsNoIgnoreBackoff)) { // Backoffs are needed only for official builds. We do not want any delays // or update failures due to backoffs during testing or development. Unless @@ -454,7 +453,7 @@ bool PayloadState::ShouldBackoffDownload() { } void PayloadState::Rollback() { - SetRollbackVersion(system_state_->request_params()->app_version()); + SetRollbackVersion(SystemState::Get()->request_params()->app_version()); AttemptStarted(AttemptType::kRollback); } @@ -612,7 +611,7 @@ PayloadType PayloadState::CalculatePayloadType() { return kPayloadTypeDelta; } } - OmahaRequestParams* params = system_state_->request_params(); + OmahaRequestParams* params = SystemState::Get()->request_params(); if (params->delta_okay()) { return kPayloadTypeFull; } @@ -629,7 +628,7 @@ void PayloadState::CollectAndReportAttemptMetrics(ErrorCode code) { int64_t payload_bytes_downloaded = attempt_num_bytes_downloaded_; - ClockInterface* clock = system_state_->clock(); + ClockInterface* clock = SystemState::Get()->clock(); TimeDelta duration = clock->GetBootTime() - attempt_start_time_boot_; TimeDelta duration_uptime = clock->GetMonotonicTime() - attempt_start_time_monotonic_; @@ -680,8 +679,7 @@ void PayloadState::CollectAndReportAttemptMetrics(ErrorCode code) { break; } - system_state_->metrics_reporter()->ReportUpdateAttemptMetrics( - system_state_, + SystemState::Get()->metrics_reporter()->ReportUpdateAttemptMetrics( attempt_number, payload_type, duration, @@ -690,7 +688,7 @@ void PayloadState::CollectAndReportAttemptMetrics(ErrorCode code) { attempt_result, internal_error_code); - system_state_->metrics_reporter()->ReportUpdateAttemptDownloadMetrics( + SystemState::Get()->metrics_reporter()->ReportUpdateAttemptDownloadMetrics( payload_bytes_downloaded, payload_download_speed_bps, download_source, @@ -720,7 +718,8 @@ void PayloadState::ReportAndClearPersistedAttemptMetrics() { if (!attempt_in_progress) return; - system_state_->metrics_reporter() + SystemState::Get() + ->metrics_reporter() ->ReportAbnormallyTerminatedUpdateAttemptMetrics(); ClearPersistedAttemptMetrics(); @@ -784,7 +783,7 @@ void PayloadState::CollectAndReportSuccessfulUpdateMetrics() { int updates_abandoned_count = num_responses_seen_ - 1; - system_state_->metrics_reporter()->ReportSuccessfulUpdateMetrics( + SystemState::Get()->metrics_reporter()->ReportSuccessfulUpdateMetrics( attempt_count, updates_abandoned_count, payload_type, @@ -800,7 +799,7 @@ void PayloadState::CollectAndReportSuccessfulUpdateMetrics() { void PayloadState::UpdateNumReboots() { // We only update the reboot count when the system has been detected to have // been rebooted. - if (!system_state_->system_rebooted()) { + if (!SystemState::Get()->system_rebooted()) { return; } @@ -820,7 +819,7 @@ void PayloadState::ResetPersistedState() { SetUrlFailureCount(0); SetUrlSwitchCount(0); UpdateBackoffExpiryTime(); // This will reset the backoff expiry time. - SetUpdateTimestampStart(system_state_->clock()->GetWallclockTime()); + SetUpdateTimestampStart(SystemState::Get()->clock()->GetWallclockTime()); SetUpdateTimestampEnd(Time()); // Set to null time SetUpdateDurationUptime(TimeDelta::FromSeconds(0)); ResetDownloadSourcesOnNewUpdate(); @@ -1040,7 +1039,7 @@ void PayloadState::SetBackoffExpiryTime(const Time& new_time) { TimeDelta PayloadState::GetUpdateDuration() { Time end_time = update_timestamp_end_.is_null() - ? system_state_->clock()->GetWallclockTime() + ? SystemState::Get()->clock()->GetWallclockTime() : update_timestamp_end_; return end_time - update_timestamp_start_; } @@ -1051,7 +1050,7 @@ void PayloadState::LoadUpdateTimestampStart() { CHECK(prefs_); - Time now = system_state_->clock()->GetWallclockTime(); + Time now = SystemState::Get()->clock()->GetWallclockTime(); if (!prefs_->Exists(kPrefsUpdateTimestampStart)) { // The preference missing is not unexpected - in that case, just @@ -1180,12 +1179,12 @@ void PayloadState::SetUpdateDurationUptimeExtended(const TimeDelta& value, } void PayloadState::SetUpdateDurationUptime(const TimeDelta& value) { - Time now = system_state_->clock()->GetMonotonicTime(); + Time now = SystemState::Get()->clock()->GetMonotonicTime(); SetUpdateDurationUptimeExtended(value, now, true); } void PayloadState::CalculateUpdateDurationUptime() { - Time now = system_state_->clock()->GetMonotonicTime(); + Time now = SystemState::Get()->clock()->GetMonotonicTime(); TimeDelta uptime_since_last_update = now - update_duration_uptime_timestamp_; if (uptime_since_last_update > TimeDelta::FromSeconds(kUptimeResolution)) { @@ -1259,8 +1258,8 @@ void PayloadState::SetNumResponsesSeen(int num_responses_seen) { void PayloadState::ComputeCandidateUrls() { bool http_url_ok = true; - if (system_state_->hardware()->IsOfficialBuild()) { - const policy::DevicePolicy* policy = system_state_->device_policy(); + if (SystemState::Get()->hardware()->IsOfficialBuild()) { + const policy::DevicePolicy* policy = SystemState::Get()->device_policy(); if (policy && policy->GetHttpDownloadsEnabled(&http_url_ok) && !http_url_ok) LOG(INFO) << "Downloads via HTTP Url are not enabled by device policy"; } else { @@ -1293,12 +1292,14 @@ void PayloadState::UpdateEngineStarted() { // Avoid the UpdateEngineStarted actions if this is not the first time we // run the update engine since reboot. - if (!system_state_->system_rebooted()) + if (!SystemState::Get()->system_rebooted()) return; // Report time_to_reboot if we booted into a new update. metrics_utils::LoadAndReportTimeToReboot( - system_state_->metrics_reporter(), prefs_, system_state_->clock()); + SystemState::Get()->metrics_reporter(), + prefs_, + SystemState::Get()->clock()); prefs_->Delete(kPrefsSystemUpdatedMarker); // Check if it is needed to send metrics about a failed reboot into a new @@ -1323,7 +1324,8 @@ void PayloadState::ReportFailedBootIfNeeded() { // since we successfully booted the new update in that case. If the boot // failed, we will read this value from the same version, so it will always // be compatible. - if (installed_from == system_state_->boot_control()->GetCurrentSlot()) { + if (installed_from == + SystemState::Get()->boot_control()->GetCurrentSlot()) { // A reboot was pending, but the chromebook is again in the same // BootDevice where the update was installed from. int64_t target_attempt; @@ -1334,7 +1336,7 @@ void PayloadState::ReportFailedBootIfNeeded() { } // Report the UMA metric of the current boot failure. - system_state_->metrics_reporter()->ReportFailedUpdateCount( + SystemState::Get()->metrics_reporter()->ReportFailedUpdateCount( target_attempt); } else { prefs_->Delete(kPrefsTargetVersionAttempt); @@ -1365,7 +1367,7 @@ void PayloadState::ExpectRebootInNewVersion(const string& target_version_uid) { prefs_->SetInt64(kPrefsTargetVersionAttempt, target_attempt + 1); prefs_->SetInt64(kPrefsTargetVersionInstalledFrom, - system_state_->boot_control()->GetCurrentSlot()); + SystemState::Get()->boot_control()->GetCurrentSlot()); } void PayloadState::ResetUpdateStatus() { @@ -1419,7 +1421,8 @@ void PayloadState::P2PNewAttempt() { CHECK(prefs_); // Set timestamp, if it hasn't been set already if (p2p_first_attempt_timestamp_.is_null()) { - SetP2PFirstAttemptTimestamp(system_state_->clock()->GetWallclockTime()); + SetP2PFirstAttemptTimestamp( + SystemState::Get()->clock()->GetWallclockTime()); } // Increase number of attempts SetP2PNumAttempts(GetP2PNumAttempts() + 1); @@ -1434,7 +1437,7 @@ bool PayloadState::P2PAttemptAllowed() { } if (!p2p_first_attempt_timestamp_.is_null()) { - Time now = system_state_->clock()->GetWallclockTime(); + Time now = SystemState::Get()->clock()->GetWallclockTime(); TimeDelta time_spent_attempting_p2p = now - p2p_first_attempt_timestamp_; if (time_spent_attempting_p2p.InSeconds() < 0) { LOG(ERROR) << "Time spent attempting p2p is negative" diff --git a/cros/payload_state.h b/cros/payload_state.h index 08272730..ad190742 100644 --- a/cros/payload_state.h +++ b/cros/payload_state.h @@ -48,7 +48,7 @@ class PayloadState : public PayloadStateInterface { // It performs the initial loading of all persisted state into memory and // dumps the initial state for debugging purposes. Note: the other methods // should be called only after calling Initialize on this object. - bool Initialize(SystemState* system_state); + bool Initialize(); // Implementation of PayloadStateInterface methods. void SetResponse(const OmahaResponse& response) override; @@ -429,9 +429,6 @@ class PayloadState : public PayloadStateInterface { // Get the total size of all payloads. int64_t GetPayloadSize(); - // The global state of the system. - SystemState* system_state_; - // Interface object with which we read/write persisted state. This must // be set by calling the Initialize method before calling any other method. PrefsInterface* prefs_; diff --git a/cros/payload_state_unittest.cc b/cros/payload_state_unittest.cc index 107c6e2b..edcb9d6d 100644 --- a/cros/payload_state_unittest.cc +++ b/cros/payload_state_unittest.cc @@ -106,12 +106,14 @@ static void SetupPayloadStateWith2Urls(string hash, EXPECT_EQ(expected_response_sign, stored_response_sign); } -class PayloadStateTest : public ::testing::Test {}; +class PayloadStateTest : public ::testing::Test { + public: + void SetUp() { FakeSystemState::CreateInstance(); } +}; TEST_F(PayloadStateTest, SetResponseWorksWithEmptyResponse) { OmahaResponse response; - FakeSystemState fake_system_state; - NiceMock* prefs = fake_system_state.mock_prefs(); + NiceMock* prefs = FakeSystemState::Get()->mock_prefs(); EXPECT_CALL(*prefs, SetInt64(_, _)).Times(AnyNumber()); EXPECT_CALL(*prefs, SetInt64(kPrefsPayloadAttemptNumber, 0)) .Times(AtLeast(1)); @@ -133,7 +135,7 @@ TEST_F(PayloadStateTest, SetResponseWorksWithEmptyResponse) { .Times(AtLeast(1)); EXPECT_CALL(*prefs, SetInt64(kPrefsNumReboots, 0)).Times(AtLeast(1)); PayloadState payload_state; - EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); + EXPECT_TRUE(payload_state.Initialize()); payload_state.SetResponse(response); string stored_response_sign = payload_state.GetResponseSignature(); string expected_response_sign = @@ -153,8 +155,7 @@ TEST_F(PayloadStateTest, SetResponseWorksWithSingleUrl) { .metadata_size = 58123, .metadata_signature = "msign", .hash = "hash"}); - FakeSystemState fake_system_state; - NiceMock* prefs = fake_system_state.mock_prefs(); + NiceMock* prefs = FakeSystemState::Get()->mock_prefs(); EXPECT_CALL(*prefs, SetInt64(_, _)).Times(AnyNumber()); EXPECT_CALL(*prefs, SetInt64(kPrefsPayloadAttemptNumber, 0)) .Times(AtLeast(1)); @@ -176,7 +177,7 @@ TEST_F(PayloadStateTest, SetResponseWorksWithSingleUrl) { .Times(AtLeast(1)); EXPECT_CALL(*prefs, SetInt64(kPrefsNumReboots, 0)).Times(AtLeast(1)); PayloadState payload_state; - EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); + EXPECT_TRUE(payload_state.Initialize()); payload_state.SetResponse(response); string stored_response_sign = payload_state.GetResponseSignature(); string expected_response_sign = @@ -205,8 +206,7 @@ TEST_F(PayloadStateTest, SetResponseWorksWithMultipleUrls) { .metadata_size = 558123, .metadata_signature = "metasign", .hash = "rhash"}); - FakeSystemState fake_system_state; - NiceMock* prefs = fake_system_state.mock_prefs(); + NiceMock* prefs = FakeSystemState::Get()->mock_prefs(); EXPECT_CALL(*prefs, SetInt64(_, _)).Times(AnyNumber()); EXPECT_CALL(*prefs, SetInt64(kPrefsPayloadAttemptNumber, 0)) .Times(AtLeast(1)); @@ -225,7 +225,7 @@ TEST_F(PayloadStateTest, SetResponseWorksWithMultipleUrls) { EXPECT_CALL(*prefs, SetInt64(kPrefsNumReboots, 0)).Times(AtLeast(1)); PayloadState payload_state; - EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); + EXPECT_TRUE(payload_state.Initialize()); payload_state.SetResponse(response); string stored_response_sign = payload_state.GetResponseSignature(); string expected_response_sign = @@ -249,8 +249,7 @@ TEST_F(PayloadStateTest, SetResponseWorksWithMultipleUrls) { TEST_F(PayloadStateTest, CanAdvanceUrlIndexCorrectly) { OmahaResponse response; - FakeSystemState fake_system_state; - NiceMock* prefs = fake_system_state.mock_prefs(); + NiceMock* prefs = FakeSystemState::Get()->mock_prefs(); PayloadState payload_state; EXPECT_CALL(*prefs, SetInt64(_, _)).Times(AnyNumber()); @@ -277,7 +276,7 @@ TEST_F(PayloadStateTest, CanAdvanceUrlIndexCorrectly) { EXPECT_CALL(*prefs, SetInt64(kPrefsCurrentUrlFailureCount, 0)) .Times(AtLeast(4)); - EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); + EXPECT_TRUE(payload_state.Initialize()); // This does a SetResponse which causes all the states to be set to 0 for // the first time. @@ -304,10 +303,9 @@ TEST_F(PayloadStateTest, CanAdvanceUrlIndexCorrectly) { TEST_F(PayloadStateTest, NewResponseResetsPayloadState) { OmahaResponse response; - FakeSystemState fake_system_state; PayloadState payload_state; - EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); + EXPECT_TRUE(payload_state.Initialize()); // Set the first response. SetupPayloadStateWith2Urls( @@ -352,9 +350,8 @@ TEST_F(PayloadStateTest, NewResponseResetsPayloadState) { TEST_F(PayloadStateTest, AllCountersGetUpdatedProperlyOnErrorCodesAndEvents) { OmahaResponse response; PayloadState payload_state; - FakeSystemState fake_system_state; int progress_bytes = 100; - NiceMock* prefs = fake_system_state.mock_prefs(); + NiceMock* prefs = FakeSystemState::Get()->mock_prefs(); EXPECT_CALL(*prefs, SetInt64(_, _)).Times(AnyNumber()); EXPECT_CALL(*prefs, SetInt64(kPrefsPayloadAttemptNumber, 0)) @@ -400,7 +397,7 @@ TEST_F(PayloadStateTest, AllCountersGetUpdatedProperlyOnErrorCodesAndEvents) { .Times(AtLeast(1)); EXPECT_CALL(*prefs, SetInt64(kPrefsNumReboots, 0)).Times(AtLeast(1)); - EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); + EXPECT_TRUE(payload_state.Initialize()); SetupPayloadStateWith2Urls( "Hash5873", true, false, &payload_state, &response); @@ -499,8 +496,7 @@ TEST_F(PayloadStateTest, PayloadAttemptNumberIncreasesOnSuccessfulFullDownload) { OmahaResponse response; PayloadState payload_state; - FakeSystemState fake_system_state; - NiceMock* prefs = fake_system_state.mock_prefs(); + NiceMock* prefs = FakeSystemState::Get()->mock_prefs(); EXPECT_CALL(*prefs, SetInt64(_, _)).Times(AnyNumber()); EXPECT_CALL(*prefs, SetInt64(kPrefsPayloadAttemptNumber, 0)) @@ -519,7 +515,7 @@ TEST_F(PayloadStateTest, EXPECT_CALL(*prefs, SetInt64(kPrefsCurrentUrlFailureCount, 0)) .Times(AtLeast(1)); - EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); + EXPECT_TRUE(payload_state.Initialize()); SetupPayloadStateWith2Urls( "Hash8593", true, false, &payload_state, &response); @@ -539,8 +535,7 @@ TEST_F(PayloadStateTest, PayloadAttemptNumberIncreasesOnSuccessfulDeltaDownload) { OmahaResponse response; PayloadState payload_state; - FakeSystemState fake_system_state; - NiceMock* prefs = fake_system_state.mock_prefs(); + NiceMock* prefs = FakeSystemState::Get()->mock_prefs(); EXPECT_CALL(*prefs, SetInt64(_, _)).Times(AnyNumber()); EXPECT_CALL(*prefs, SetInt64(kPrefsPayloadAttemptNumber, 0)) @@ -558,7 +553,7 @@ TEST_F(PayloadStateTest, EXPECT_CALL(*prefs, SetInt64(kPrefsCurrentUrlFailureCount, 0)) .Times(AtLeast(1)); - EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); + EXPECT_TRUE(payload_state.Initialize()); SetupPayloadStateWith2Urls("Hash8593", true, true, &payload_state, &response); @@ -576,9 +571,8 @@ TEST_F(PayloadStateTest, TEST_F(PayloadStateTest, SetResponseResetsInvalidUrlIndex) { OmahaResponse response; PayloadState payload_state; - FakeSystemState fake_system_state; - EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); + EXPECT_TRUE(payload_state.Initialize()); SetupPayloadStateWith2Urls( "Hash4427", true, false, &payload_state, &response); @@ -596,8 +590,7 @@ TEST_F(PayloadStateTest, SetResponseResetsInvalidUrlIndex) { // Now, simulate a corrupted url index on persisted store which gets // loaded when update_engine restarts. Using a different prefs object // so as to not bother accounting for the uninteresting calls above. - FakeSystemState fake_system_state2; - NiceMock* prefs2 = fake_system_state2.mock_prefs(); + NiceMock* prefs2 = FakeSystemState::Get()->mock_prefs(); EXPECT_CALL(*prefs2, Exists(_)).WillRepeatedly(Return(true)); EXPECT_CALL(*prefs2, GetInt64(_, _)).Times(AtLeast(1)); EXPECT_CALL(*prefs2, GetInt64(kPrefsPayloadAttemptNumber, _)) @@ -614,7 +607,7 @@ TEST_F(PayloadStateTest, SetResponseResetsInvalidUrlIndex) { // have the same hash as before so as to not trivially reset because the // response was different. We want to specifically test that even if the // response is same, we should reset the state if we find it corrupted. - EXPECT_TRUE(payload_state.Initialize(&fake_system_state2)); + EXPECT_TRUE(payload_state.Initialize()); SetupPayloadStateWith2Urls( "Hash4427", true, false, &payload_state, &response); @@ -630,12 +623,11 @@ TEST_F(PayloadStateTest, SetResponseResetsInvalidUrlIndex) { TEST_F(PayloadStateTest, NoBackoffInteractiveChecks) { OmahaResponse response; PayloadState payload_state; - FakeSystemState fake_system_state; - OmahaRequestParams params(&fake_system_state); + OmahaRequestParams params; params.Init("", "", {.interactive = true}); - fake_system_state.set_request_params(¶ms); + FakeSystemState::Get()->set_request_params(¶ms); - EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); + EXPECT_TRUE(payload_state.Initialize()); SetupPayloadStateWith2Urls( "Hash6437", true, false, &payload_state, &response); @@ -653,12 +645,11 @@ TEST_F(PayloadStateTest, NoBackoffInteractiveChecks) { TEST_F(PayloadStateTest, NoBackoffForP2PUpdates) { OmahaResponse response; PayloadState payload_state; - FakeSystemState fake_system_state; - OmahaRequestParams params(&fake_system_state); + OmahaRequestParams params; params.Init("", "", {}); - fake_system_state.set_request_params(¶ms); + FakeSystemState::Get()->set_request_params(¶ms); - EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); + EXPECT_TRUE(payload_state.Initialize()); SetupPayloadStateWith2Urls( "Hash6437", true, false, &payload_state, &response); @@ -684,9 +675,8 @@ TEST_F(PayloadStateTest, NoBackoffForP2PUpdates) { TEST_F(PayloadStateTest, NoBackoffForDeltaPayloads) { OmahaResponse response; PayloadState payload_state; - FakeSystemState fake_system_state; - EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); + EXPECT_TRUE(payload_state.Initialize()); SetupPayloadStateWith2Urls("Hash6437", true, true, &payload_state, &response); // Simulate a successful download and see that we're ready to download @@ -728,9 +718,8 @@ static void CheckPayloadBackoffState(PayloadState* payload_state, TEST_F(PayloadStateTest, BackoffPeriodsAreInCorrectRange) { OmahaResponse response; PayloadState payload_state; - FakeSystemState fake_system_state; - EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); + EXPECT_TRUE(payload_state.Initialize()); SetupPayloadStateWith2Urls( "Hash8939", true, false, &payload_state, &response); @@ -750,9 +739,8 @@ TEST_F(PayloadStateTest, BackoffLogicCanBeDisabled) { OmahaResponse response; response.disable_payload_backoff = true; PayloadState payload_state; - FakeSystemState fake_system_state; - EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); + EXPECT_TRUE(payload_state.Initialize()); SetupPayloadStateWith2Urls( "Hash8939", true, false, &payload_state, &response); @@ -777,11 +765,10 @@ TEST_F(PayloadStateTest, BytesDownloadedMetricsGetAddedToCorrectSources) { OmahaResponse response; response.disable_payload_backoff = true; PayloadState payload_state; - FakeSystemState fake_system_state; uint64_t https_total = 0; uint64_t http_total = 0; - EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); + EXPECT_TRUE(payload_state.Initialize()); SetupPayloadStateWith2Urls( "Hash3286", true, false, &payload_state, &response); EXPECT_EQ(1, payload_state.GetNumResponsesSeen()); @@ -864,7 +851,7 @@ TEST_F(PayloadStateTest, BytesDownloadedMetricsGetAddedToCorrectSources) { EXPECT_EQ(p2p_total, payload_state.GetTotalBytesDownloaded(kDownloadSourceHttpPeer)); - EXPECT_CALL(*fake_system_state.mock_metrics_reporter(), + EXPECT_CALL(*FakeSystemState::Get()->mock_metrics_reporter(), ReportSuccessfulUpdateMetrics( 1, _, kPayloadTypeFull, _, _, 314, _, _, _, 3)); @@ -885,9 +872,8 @@ TEST_F(PayloadStateTest, BytesDownloadedMetricsGetAddedToCorrectSources) { TEST_F(PayloadStateTest, DownloadSourcesUsedIsCorrect) { OmahaResponse response; PayloadState payload_state; - FakeSystemState fake_system_state; - EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); + EXPECT_TRUE(payload_state.Initialize()); SetupPayloadStateWith2Urls( "Hash3286", true, false, &payload_state, &response); @@ -905,7 +891,7 @@ TEST_F(PayloadStateTest, DownloadSourcesUsedIsCorrect) { int64_t total_bytes[kNumDownloadSources] = {}; total_bytes[kDownloadSourceHttpServer] = num_bytes; - EXPECT_CALL(*fake_system_state.mock_metrics_reporter(), + EXPECT_CALL(*FakeSystemState::Get()->mock_metrics_reporter(), ReportSuccessfulUpdateMetrics( _, _, @@ -924,10 +910,9 @@ TEST_F(PayloadStateTest, DownloadSourcesUsedIsCorrect) { TEST_F(PayloadStateTest, RestartingUpdateResetsMetrics) { OmahaResponse response; - FakeSystemState fake_system_state; PayloadState payload_state; - EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); + EXPECT_TRUE(payload_state.Initialize()); // Set the first response. SetupPayloadStateWith2Urls( @@ -953,24 +938,23 @@ TEST_F(PayloadStateTest, RestartingUpdateResetsMetrics) { } TEST_F(PayloadStateTest, NumRebootsIncrementsCorrectly) { - FakeSystemState fake_system_state; PayloadState payload_state; - NiceMock* prefs = fake_system_state.mock_prefs(); + NiceMock* prefs = FakeSystemState::Get()->mock_prefs(); EXPECT_CALL(*prefs, SetInt64(_, _)).Times(AtLeast(0)); EXPECT_CALL(*prefs, SetInt64(kPrefsNumReboots, 1)).Times(AtLeast(1)); - EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); + EXPECT_TRUE(payload_state.Initialize()); payload_state.UpdateRestarted(); EXPECT_EQ(0U, payload_state.GetNumReboots()); - fake_system_state.set_system_rebooted(true); + FakeSystemState::Get()->set_system_rebooted(true); payload_state.UpdateResumed(); // Num reboots should be incremented because system rebooted detected. EXPECT_EQ(1U, payload_state.GetNumReboots()); - fake_system_state.set_system_rebooted(false); + FakeSystemState::Get()->set_system_rebooted(false); payload_state.UpdateResumed(); // Num reboots should now be 1 as reboot was not detected. EXPECT_EQ(1U, payload_state.GetNumReboots()); @@ -981,12 +965,11 @@ TEST_F(PayloadStateTest, NumRebootsIncrementsCorrectly) { } TEST_F(PayloadStateTest, RollbackHappened) { - FakeSystemState fake_system_state; PayloadState payload_state; NiceMock* mock_powerwash_safe_prefs = - fake_system_state.mock_powerwash_safe_prefs(); - EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); + FakeSystemState::Get()->mock_powerwash_safe_prefs(); + EXPECT_TRUE(payload_state.Initialize()); // Verify pre-conditions are good. EXPECT_FALSE(payload_state.GetRollbackHappened()); @@ -1012,19 +995,18 @@ TEST_F(PayloadStateTest, RollbackHappened) { } TEST_F(PayloadStateTest, RollbackVersion) { - FakeSystemState fake_system_state; PayloadState payload_state; NiceMock* mock_powerwash_safe_prefs = - fake_system_state.mock_powerwash_safe_prefs(); + FakeSystemState::Get()->mock_powerwash_safe_prefs(); // Mock out the os version and make sure it's excluded correctly. string rollback_version = "2345.0.0"; - OmahaRequestParams params(&fake_system_state); + OmahaRequestParams params; params.Init(rollback_version, "", {}); - fake_system_state.set_request_params(¶ms); + FakeSystemState::Get()->set_request_params(¶ms); - EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); + EXPECT_TRUE(payload_state.Initialize()); // Verify pre-conditions are good. EXPECT_TRUE(payload_state.GetRollbackVersion().empty()); @@ -1047,7 +1029,7 @@ TEST_F(PayloadStateTest, RollbackVersion) { // Check that we report only UpdateEngine.Rollback.* metrics in // UpdateSucceeded(). - EXPECT_CALL(*fake_system_state.mock_metrics_reporter(), + EXPECT_CALL(*FakeSystemState::Get()->mock_metrics_reporter(), ReportRollbackMetrics(metrics::RollbackResult::kSuccess)) .Times(1); @@ -1058,7 +1040,6 @@ TEST_F(PayloadStateTest, DurationsAreCorrect) { OmahaResponse response; response.packages.resize(1); PayloadState payload_state; - FakeSystemState fake_system_state; FakeClock fake_clock; FakePrefs fake_prefs; @@ -1067,9 +1048,9 @@ TEST_F(PayloadStateTest, DurationsAreCorrect) { fake_clock.SetWallclockTime(Time::FromInternalValue(1000000)); fake_clock.SetMonotonicTime(Time::FromInternalValue(2000000)); - fake_system_state.set_clock(&fake_clock); - fake_system_state.set_prefs(&fake_prefs); - EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); + FakeSystemState::Get()->set_clock(&fake_clock); + FakeSystemState::Get()->set_prefs(&fake_prefs); + EXPECT_TRUE(payload_state.Initialize()); // Check that durations are correct for a successful update where // time has advanced 7 seconds on the wall clock and 4 seconds on @@ -1101,7 +1082,7 @@ TEST_F(PayloadStateTest, DurationsAreCorrect) { // durations correctly (e.g. they are the same as before). fake_clock.SetMonotonicTime(Time::FromInternalValue(5000)); PayloadState payload_state2; - EXPECT_TRUE(payload_state2.Initialize(&fake_system_state)); + EXPECT_TRUE(payload_state2.Initialize()); payload_state2.SetResponse(response); EXPECT_EQ(payload_state2.GetUpdateDuration().InMicroseconds(), 10000000); EXPECT_EQ(payload_state2.GetUpdateDurationUptime().InMicroseconds(), @@ -1120,7 +1101,6 @@ TEST_F(PayloadStateTest, DurationsAreCorrect) { TEST_F(PayloadStateTest, RebootAfterSuccessfulUpdateTest) { OmahaResponse response; PayloadState payload_state; - FakeSystemState fake_system_state; FakeClock fake_clock; FakePrefs fake_prefs; @@ -1128,9 +1108,9 @@ TEST_F(PayloadStateTest, RebootAfterSuccessfulUpdateTest) { fake_clock.SetMonotonicTime( Time::FromInternalValue(30 * Time::kMicrosecondsPerSecond)); - fake_system_state.set_clock(&fake_clock); - fake_system_state.set_prefs(&fake_prefs); - EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); + FakeSystemState::Get()->set_clock(&fake_clock); + FakeSystemState::Get()->set_prefs(&fake_prefs); + EXPECT_TRUE(payload_state.Initialize()); // Make the update succeed. SetupPayloadStateWith2Urls( @@ -1147,12 +1127,12 @@ TEST_F(PayloadStateTest, RebootAfterSuccessfulUpdateTest) { fake_clock.SetMonotonicTime( Time::FromInternalValue(500 * Time::kMicrosecondsPerSecond)); PayloadState payload_state2; - EXPECT_TRUE(payload_state2.Initialize(&fake_system_state)); + EXPECT_TRUE(payload_state2.Initialize()); // Expect 500 - 30 seconds = 470 seconds ~= 7 min 50 sec - EXPECT_CALL(*fake_system_state.mock_metrics_reporter(), + EXPECT_CALL(*FakeSystemState::Get()->mock_metrics_reporter(), ReportTimeToReboot(7)); - fake_system_state.set_system_rebooted(true); + FakeSystemState::Get()->set_system_rebooted(true); payload_state2.UpdateEngineStarted(); @@ -1162,12 +1142,11 @@ TEST_F(PayloadStateTest, RebootAfterSuccessfulUpdateTest) { TEST_F(PayloadStateTest, RestartAfterCrash) { PayloadState payload_state; - FakeSystemState fake_system_state; testing::StrictMock mock_metrics_reporter; - fake_system_state.set_metrics_reporter(&mock_metrics_reporter); - NiceMock* prefs = fake_system_state.mock_prefs(); + FakeSystemState::Get()->set_metrics_reporter(&mock_metrics_reporter); + NiceMock* prefs = FakeSystemState::Get()->mock_prefs(); - EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); + EXPECT_TRUE(payload_state.Initialize()); // Only the |kPrefsAttemptInProgress| state variable should be read. EXPECT_CALL(*prefs, Exists(_)).Times(0); @@ -1180,18 +1159,17 @@ TEST_F(PayloadStateTest, RestartAfterCrash) { EXPECT_CALL(*prefs, GetBoolean(kPrefsAttemptInProgress, _)); // Simulate an update_engine restart without a reboot. - fake_system_state.set_system_rebooted(false); + FakeSystemState::Get()->set_system_rebooted(false); payload_state.UpdateEngineStarted(); } TEST_F(PayloadStateTest, AbnormalTerminationAttemptMetricsNoReporting) { PayloadState payload_state; - FakeSystemState fake_system_state; // If there's no marker at startup, ensure we don't report a metric. - EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); - EXPECT_CALL(*fake_system_state.mock_metrics_reporter(), + EXPECT_TRUE(payload_state.Initialize()); + EXPECT_CALL(*FakeSystemState::Get()->mock_metrics_reporter(), ReportAbnormallyTerminatedUpdateAttemptMetrics()) .Times(0); payload_state.UpdateEngineStarted(); @@ -1199,17 +1177,16 @@ TEST_F(PayloadStateTest, AbnormalTerminationAttemptMetricsNoReporting) { TEST_F(PayloadStateTest, AbnormalTerminationAttemptMetricsReported) { PayloadState payload_state; - FakeSystemState fake_system_state; FakePrefs fake_prefs; // If we have a marker at startup, ensure it's reported and the // marker is then cleared. - fake_system_state.set_prefs(&fake_prefs); + FakeSystemState::Get()->set_prefs(&fake_prefs); fake_prefs.SetBoolean(kPrefsAttemptInProgress, true); - EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); + EXPECT_TRUE(payload_state.Initialize()); - EXPECT_CALL(*fake_system_state.mock_metrics_reporter(), + EXPECT_CALL(*FakeSystemState::Get()->mock_metrics_reporter(), ReportAbnormallyTerminatedUpdateAttemptMetrics()) .Times(1); payload_state.UpdateEngineStarted(); @@ -1219,19 +1196,18 @@ TEST_F(PayloadStateTest, AbnormalTerminationAttemptMetricsReported) { TEST_F(PayloadStateTest, AbnormalTerminationAttemptMetricsClearedOnSucceess) { PayloadState payload_state; - FakeSystemState fake_system_state; FakePrefs fake_prefs; // Make sure the marker is written and cleared during an attempt and // also that we DO NOT emit the metric (since the attempt didn't end // abnormally). - fake_system_state.set_prefs(&fake_prefs); - EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); + FakeSystemState::Get()->set_prefs(&fake_prefs); + EXPECT_TRUE(payload_state.Initialize()); OmahaResponse response; response.packages.resize(1); payload_state.SetResponse(response); - EXPECT_CALL(*fake_system_state.mock_metrics_reporter(), + EXPECT_CALL(*FakeSystemState::Get()->mock_metrics_reporter(), ReportAbnormallyTerminatedUpdateAttemptMetrics()) .Times(0); @@ -1251,12 +1227,11 @@ TEST_F(PayloadStateTest, AbnormalTerminationAttemptMetricsClearedOnSucceess) { TEST_F(PayloadStateTest, CandidateUrlsComputedCorrectly) { OmahaResponse response; - FakeSystemState fake_system_state; PayloadState payload_state; policy::MockDevicePolicy disable_http_policy; - fake_system_state.set_device_policy(&disable_http_policy); - EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); + FakeSystemState::Get()->set_device_policy(&disable_http_policy); + EXPECT_TRUE(payload_state.Initialize()); // Test with no device policy. Should default to allowing http. EXPECT_CALL(disable_http_policy, GetHttpDownloadsEnabled(_)) @@ -1299,7 +1274,7 @@ TEST_F(PayloadStateTest, CandidateUrlsComputedCorrectly) { // Now, pretend that the HTTP policy is turned on. We want to make sure // the new policy is honored. policy::MockDevicePolicy enable_http_policy; - fake_system_state.set_device_policy(&enable_http_policy); + FakeSystemState::Get()->set_device_policy(&enable_http_policy); EXPECT_CALL(enable_http_policy, GetHttpDownloadsEnabled(_)) .WillRepeatedly(DoAll(SetArgPointee<0>(true), Return(true))); @@ -1325,30 +1300,29 @@ TEST_F(PayloadStateTest, CandidateUrlsComputedCorrectly) { TEST_F(PayloadStateTest, PayloadTypeMetricWhenTypeIsDelta) { OmahaResponse response; PayloadState payload_state; - FakeSystemState fake_system_state; - EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); + EXPECT_TRUE(payload_state.Initialize()); SetupPayloadStateWith2Urls("Hash6437", true, true, &payload_state, &response); // Simulate a successful download and update. payload_state.DownloadComplete(); - EXPECT_CALL(*fake_system_state.mock_metrics_reporter(), + EXPECT_CALL(*FakeSystemState::Get()->mock_metrics_reporter(), ReportSuccessfulUpdateMetrics( _, _, kPayloadTypeDelta, _, _, _, _, _, _, _)); payload_state.UpdateSucceeded(); // Mock the request to a request where the delta was disabled but Omaha sends // a delta anyway and test again. - OmahaRequestParams params(&fake_system_state); + OmahaRequestParams params; params.set_delta_okay(false); - fake_system_state.set_request_params(¶ms); + FakeSystemState::Get()->set_request_params(¶ms); - EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); + EXPECT_TRUE(payload_state.Initialize()); SetupPayloadStateWith2Urls("Hash6437", true, true, &payload_state, &response); payload_state.DownloadComplete(); - EXPECT_CALL(*fake_system_state.mock_metrics_reporter(), + EXPECT_CALL(*FakeSystemState::Get()->mock_metrics_reporter(), ReportSuccessfulUpdateMetrics( _, _, kPayloadTypeDelta, _, _, _, _, _, _, _)); payload_state.UpdateSucceeded(); @@ -1357,21 +1331,20 @@ TEST_F(PayloadStateTest, PayloadTypeMetricWhenTypeIsDelta) { TEST_F(PayloadStateTest, PayloadTypeMetricWhenTypeIsForcedFull) { OmahaResponse response; PayloadState payload_state; - FakeSystemState fake_system_state; // Mock the request to a request where the delta was disabled. - OmahaRequestParams params(&fake_system_state); + OmahaRequestParams params; params.set_delta_okay(false); - fake_system_state.set_request_params(¶ms); + FakeSystemState::Get()->set_request_params(¶ms); - EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); + EXPECT_TRUE(payload_state.Initialize()); SetupPayloadStateWith2Urls( "Hash6437", true, false, &payload_state, &response); // Simulate a successful download and update. payload_state.DownloadComplete(); - EXPECT_CALL(*fake_system_state.mock_metrics_reporter(), + EXPECT_CALL(*FakeSystemState::Get()->mock_metrics_reporter(), ReportSuccessfulUpdateMetrics( _, _, kPayloadTypeForcedFull, _, _, _, _, _, _, _)); payload_state.UpdateSucceeded(); @@ -1380,35 +1353,33 @@ TEST_F(PayloadStateTest, PayloadTypeMetricWhenTypeIsForcedFull) { TEST_F(PayloadStateTest, PayloadTypeMetricWhenTypeIsFull) { OmahaResponse response; PayloadState payload_state; - FakeSystemState fake_system_state; - EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); + EXPECT_TRUE(payload_state.Initialize()); SetupPayloadStateWith2Urls( "Hash6437", true, false, &payload_state, &response); // Mock the request to a request where the delta is enabled, although the // result is full. - OmahaRequestParams params(&fake_system_state); + OmahaRequestParams params; params.set_delta_okay(true); - fake_system_state.set_request_params(¶ms); + FakeSystemState::Get()->set_request_params(¶ms); // Simulate a successful download and update. payload_state.DownloadComplete(); - EXPECT_CALL(*fake_system_state.mock_metrics_reporter(), + EXPECT_CALL(*FakeSystemState::Get()->mock_metrics_reporter(), ReportSuccessfulUpdateMetrics( _, _, kPayloadTypeFull, _, _, _, _, _, _, _)); payload_state.UpdateSucceeded(); } TEST_F(PayloadStateTest, RebootAfterUpdateFailedMetric) { - FakeSystemState fake_system_state; OmahaResponse response; PayloadState payload_state; FakePrefs fake_prefs; - fake_system_state.set_prefs(&fake_prefs); + FakeSystemState::Get()->set_prefs(&fake_prefs); - EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); + EXPECT_TRUE(payload_state.Initialize()); SetupPayloadStateWith2Urls( "Hash3141", true, false, &payload_state, &response); @@ -1418,40 +1389,43 @@ TEST_F(PayloadStateTest, RebootAfterUpdateFailedMetric) { payload_state.ExpectRebootInNewVersion("Version:12345678"); // Reboot into the same environment to get an UMA metric with a value of 1. - EXPECT_CALL(*fake_system_state.mock_metrics_reporter(), + EXPECT_CALL(*FakeSystemState::Get()->mock_metrics_reporter(), ReportFailedUpdateCount(1)); payload_state.ReportFailedBootIfNeeded(); - Mock::VerifyAndClearExpectations(fake_system_state.mock_metrics_reporter()); + Mock::VerifyAndClearExpectations( + FakeSystemState::Get()->mock_metrics_reporter()); // Simulate a second update and reboot into the same environment, this should // send a value of 2. payload_state.ExpectRebootInNewVersion("Version:12345678"); - EXPECT_CALL(*fake_system_state.mock_metrics_reporter(), + EXPECT_CALL(*FakeSystemState::Get()->mock_metrics_reporter(), ReportFailedUpdateCount(2)); payload_state.ReportFailedBootIfNeeded(); - Mock::VerifyAndClearExpectations(fake_system_state.mock_metrics_reporter()); + Mock::VerifyAndClearExpectations( + FakeSystemState::Get()->mock_metrics_reporter()); // Simulate a third failed reboot to new version, but this time for a // different payload. This should send a value of 1 this time. payload_state.ExpectRebootInNewVersion("Version:3141592"); - EXPECT_CALL(*fake_system_state.mock_metrics_reporter(), + EXPECT_CALL(*FakeSystemState::Get()->mock_metrics_reporter(), ReportFailedUpdateCount(1)); payload_state.ReportFailedBootIfNeeded(); - Mock::VerifyAndClearExpectations(fake_system_state.mock_metrics_reporter()); + Mock::VerifyAndClearExpectations( + FakeSystemState::Get()->mock_metrics_reporter()); } TEST_F(PayloadStateTest, RebootAfterUpdateSucceed) { - FakeSystemState fake_system_state; OmahaResponse response; PayloadState payload_state; FakePrefs fake_prefs; - fake_system_state.set_prefs(&fake_prefs); + FakeSystemState::Get()->set_prefs(&fake_prefs); - FakeBootControl* fake_boot_control = fake_system_state.fake_boot_control(); + FakeBootControl* fake_boot_control = + FakeSystemState::Get()->fake_boot_control(); fake_boot_control->SetCurrentSlot(0); - EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); + EXPECT_TRUE(payload_state.Initialize()); SetupPayloadStateWith2Urls( "Hash3141", true, false, &payload_state, &response); @@ -1463,7 +1437,7 @@ TEST_F(PayloadStateTest, RebootAfterUpdateSucceed) { // Change the BootDevice to a different one, no metric should be sent. fake_boot_control->SetCurrentSlot(1); - EXPECT_CALL(*fake_system_state.mock_metrics_reporter(), + EXPECT_CALL(*FakeSystemState::Get()->mock_metrics_reporter(), ReportFailedUpdateCount(_)) .Times(0); payload_state.ReportFailedBootIfNeeded(); @@ -1475,13 +1449,12 @@ TEST_F(PayloadStateTest, RebootAfterUpdateSucceed) { } TEST_F(PayloadStateTest, RebootAfterCanceledUpdate) { - FakeSystemState fake_system_state; OmahaResponse response; PayloadState payload_state; FakePrefs fake_prefs; - fake_system_state.set_prefs(&fake_prefs); - EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); + FakeSystemState::Get()->set_prefs(&fake_prefs); + EXPECT_TRUE(payload_state.Initialize()); SetupPayloadStateWith2Urls( "Hash3141", true, false, &payload_state, &response); @@ -1490,7 +1463,7 @@ TEST_F(PayloadStateTest, RebootAfterCanceledUpdate) { payload_state.UpdateSucceeded(); payload_state.ExpectRebootInNewVersion("Version:12345678"); - EXPECT_CALL(*fake_system_state.mock_metrics_reporter(), + EXPECT_CALL(*FakeSystemState::Get()->mock_metrics_reporter(), ReportFailedUpdateCount(_)) .Times(0); @@ -1502,14 +1475,13 @@ TEST_F(PayloadStateTest, RebootAfterCanceledUpdate) { } TEST_F(PayloadStateTest, UpdateSuccessWithWipedPrefs) { - FakeSystemState fake_system_state; PayloadState payload_state; FakePrefs fake_prefs; - fake_system_state.set_prefs(&fake_prefs); - EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); + FakeSystemState::Get()->set_prefs(&fake_prefs); + EXPECT_TRUE(payload_state.Initialize()); - EXPECT_CALL(*fake_system_state.mock_metrics_reporter(), + EXPECT_CALL(*FakeSystemState::Get()->mock_metrics_reporter(), ReportFailedUpdateCount(_)) .Times(0); @@ -1520,11 +1492,10 @@ TEST_F(PayloadStateTest, UpdateSuccessWithWipedPrefs) { TEST_F(PayloadStateTest, DisallowP2PAfterTooManyAttempts) { OmahaResponse response; PayloadState payload_state; - FakeSystemState fake_system_state; FakePrefs fake_prefs; - fake_system_state.set_prefs(&fake_prefs); + FakeSystemState::Get()->set_prefs(&fake_prefs); - EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); + EXPECT_TRUE(payload_state.Initialize()); SetupPayloadStateWith2Urls( "Hash8593", true, false, &payload_state, &response); @@ -1541,13 +1512,12 @@ TEST_F(PayloadStateTest, DisallowP2PAfterTooManyAttempts) { TEST_F(PayloadStateTest, DisallowP2PAfterDeadline) { OmahaResponse response; PayloadState payload_state; - FakeSystemState fake_system_state; FakeClock fake_clock; FakePrefs fake_prefs; - fake_system_state.set_clock(&fake_clock); - fake_system_state.set_prefs(&fake_prefs); - EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); + FakeSystemState::Get()->set_clock(&fake_clock); + FakeSystemState::Get()->set_prefs(&fake_prefs); + EXPECT_TRUE(payload_state.Initialize()); SetupPayloadStateWith2Urls( "Hash8593", true, false, &payload_state, &response); @@ -1587,11 +1557,10 @@ TEST_F(PayloadStateTest, DisallowP2PAfterDeadline) { TEST_F(PayloadStateTest, P2PStateVarsInitialValue) { OmahaResponse response; PayloadState payload_state; - FakeSystemState fake_system_state; FakePrefs fake_prefs; - fake_system_state.set_prefs(&fake_prefs); - EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); + FakeSystemState::Get()->set_prefs(&fake_prefs); + EXPECT_TRUE(payload_state.Initialize()); SetupPayloadStateWith2Urls( "Hash8593", true, false, &payload_state, &response); @@ -1603,12 +1572,11 @@ TEST_F(PayloadStateTest, P2PStateVarsInitialValue) { TEST_F(PayloadStateTest, P2PStateVarsArePersisted) { OmahaResponse response; PayloadState payload_state; - FakeSystemState fake_system_state; FakeClock fake_clock; FakePrefs fake_prefs; - fake_system_state.set_clock(&fake_clock); - fake_system_state.set_prefs(&fake_prefs); - EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); + FakeSystemState::Get()->set_clock(&fake_clock); + FakeSystemState::Get()->set_prefs(&fake_prefs); + EXPECT_TRUE(payload_state.Initialize()); SetupPayloadStateWith2Urls( "Hash8593", true, false, &payload_state, &response); @@ -1624,7 +1592,7 @@ TEST_F(PayloadStateTest, P2PStateVarsArePersisted) { // Now create a new PayloadState and check that it loads the state // vars correctly. PayloadState payload_state2; - EXPECT_TRUE(payload_state2.Initialize(&fake_system_state)); + EXPECT_TRUE(payload_state2.Initialize()); EXPECT_EQ(1, payload_state2.GetP2PNumAttempts()); EXPECT_EQ(time, payload_state2.GetP2PFirstAttemptTimestamp()); } @@ -1632,13 +1600,12 @@ TEST_F(PayloadStateTest, P2PStateVarsArePersisted) { TEST_F(PayloadStateTest, P2PStateVarsAreClearedOnNewResponse) { OmahaResponse response; PayloadState payload_state; - FakeSystemState fake_system_state; FakeClock fake_clock; FakePrefs fake_prefs; - fake_system_state.set_clock(&fake_clock); - fake_system_state.set_prefs(&fake_prefs); + FakeSystemState::Get()->set_clock(&fake_clock); + FakeSystemState::Get()->set_prefs(&fake_prefs); - EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); + EXPECT_TRUE(payload_state.Initialize()); SetupPayloadStateWith2Urls( "Hash8593", true, false, &payload_state, &response); @@ -1663,11 +1630,10 @@ TEST_F(PayloadStateTest, P2PStateVarsAreClearedOnNewResponse) { TEST_F(PayloadStateTest, NextPayloadResetsUrlIndex) { PayloadState payload_state; - FakeSystemState fake_system_state; StrictMock mock_excluder; - EXPECT_CALL(*fake_system_state.mock_update_attempter(), GetExcluder()) + EXPECT_CALL(*FakeSystemState::Get()->mock_update_attempter(), GetExcluder()) .WillOnce(Return(&mock_excluder)); - EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); + EXPECT_TRUE(payload_state.Initialize()); OmahaResponse response; response.packages.push_back( @@ -1693,11 +1659,10 @@ TEST_F(PayloadStateTest, NextPayloadResetsUrlIndex) { TEST_F(PayloadStateTest, ExcludeNoopForNonExcludables) { PayloadState payload_state; - FakeSystemState fake_system_state; StrictMock mock_excluder; - EXPECT_CALL(*fake_system_state.mock_update_attempter(), GetExcluder()) + EXPECT_CALL(*FakeSystemState::Get()->mock_update_attempter(), GetExcluder()) .WillOnce(Return(&mock_excluder)); - EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); + EXPECT_TRUE(payload_state.Initialize()); OmahaResponse response; response.packages.push_back( @@ -1715,11 +1680,10 @@ TEST_F(PayloadStateTest, ExcludeNoopForNonExcludables) { TEST_F(PayloadStateTest, ExcludeOnlyCanExcludables) { PayloadState payload_state; - FakeSystemState fake_system_state; StrictMock mock_excluder; - EXPECT_CALL(*fake_system_state.mock_update_attempter(), GetExcluder()) + EXPECT_CALL(*FakeSystemState::Get()->mock_update_attempter(), GetExcluder()) .WillOnce(Return(&mock_excluder)); - EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); + EXPECT_TRUE(payload_state.Initialize()); OmahaResponse response; response.packages.push_back( @@ -1738,11 +1702,10 @@ TEST_F(PayloadStateTest, ExcludeOnlyCanExcludables) { TEST_F(PayloadStateTest, IncrementFailureExclusionTest) { PayloadState payload_state; - FakeSystemState fake_system_state; StrictMock mock_excluder; - EXPECT_CALL(*fake_system_state.mock_update_attempter(), GetExcluder()) + EXPECT_CALL(*FakeSystemState::Get()->mock_update_attempter(), GetExcluder()) .WillOnce(Return(&mock_excluder)); - EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); + EXPECT_TRUE(payload_state.Initialize()); OmahaResponse response; // Critical package. @@ -1782,11 +1745,10 @@ TEST_F(PayloadStateTest, IncrementFailureExclusionTest) { TEST_F(PayloadStateTest, HaltExclusionPostPayloadExhaustion) { PayloadState payload_state; - FakeSystemState fake_system_state; StrictMock mock_excluder; - EXPECT_CALL(*fake_system_state.mock_update_attempter(), GetExcluder()) + EXPECT_CALL(*FakeSystemState::Get()->mock_update_attempter(), GetExcluder()) .WillOnce(Return(&mock_excluder)); - EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); + EXPECT_TRUE(payload_state.Initialize()); OmahaResponse response; // Non-critical package. @@ -1813,8 +1775,7 @@ TEST_F(PayloadStateTest, HaltExclusionPostPayloadExhaustion) { TEST_F(PayloadStateTest, NonInfinitePayloadIndexIncrement) { PayloadState payload_state; - FakeSystemState fake_system_state; - EXPECT_TRUE(payload_state.Initialize(&fake_system_state)); + EXPECT_TRUE(payload_state.Initialize()); payload_state.SetResponse({}); diff --git a/cros/real_system_state.cc b/cros/real_system_state.cc index aff9863d..0b2b49d4 100644 --- a/cros/real_system_state.cc +++ b/cros/real_system_state.cc @@ -61,7 +61,7 @@ bool RealSystemState::Initialize() { LOG_IF(INFO, !hardware_->IsNormalBootMode()) << "Booted in dev mode."; LOG_IF(INFO, !hardware_->IsOfficialBuild()) << "Booted non-official build."; - connection_manager_ = connection_manager::CreateConnectionManager(this); + connection_manager_ = connection_manager::CreateConnectionManager(); if (!connection_manager_) { LOG(ERROR) << "Error initializing the ConnectionManagerInterface."; return false; @@ -133,8 +133,7 @@ bool RealSystemState::Initialize() { new CertificateChecker(prefs_.get(), &openssl_wrapper_)); certificate_checker_->Init(); - update_attempter_.reset( - new UpdateAttempter(this, certificate_checker_.get())); + update_attempter_.reset(new UpdateAttempter(certificate_checker_.get())); // Initialize the UpdateAttempter before the UpdateManager. update_attempter_->Init(); @@ -142,8 +141,7 @@ bool RealSystemState::Initialize() { // Initialize the Update Manager using the default state factory. chromeos_update_manager::State* um_state = chromeos_update_manager::DefaultStateFactory(&policy_provider_, - kiosk_app_proxy_.get(), - this); + kiosk_app_proxy_.get()); if (!um_state) { LOG(ERROR) << "Failed to initialize the Update Manager."; @@ -164,7 +162,7 @@ bool RealSystemState::Initialize() { kMaxP2PFilesToKeep, base::TimeDelta::FromDays(kMaxP2PFileAgeDays))); - if (!payload_state_.Initialize(this)) { + if (!payload_state_.Initialize()) { LOG(ERROR) << "Failed to initialize the payload state object."; return false; } diff --git a/cros/real_system_state.h b/cros/real_system_state.h index 1e45dc1a..348c31bc 100644 --- a/cros/real_system_state.h +++ b/cros/real_system_state.h @@ -47,14 +47,14 @@ namespace chromeos_update_engine { // used by the actual product code. class RealSystemState : public SystemState { public: - // Constructs all system objects that do not require separate initialization; - // see Initialize() below for the remaining ones. - RealSystemState() = default; ~RealSystemState() = default; - // Initializes and sets systems objects that require an initialization - // separately from construction. Returns |true| on success. - bool Initialize(); + static void CreateInstance() { + CHECK(!g_instance_) << "SystemState has been previously created."; + RealSystemState* rss = new RealSystemState(); + g_instance_.reset(rss); + LOG_IF(FATAL, !rss->Initialize()) << "Failed to initialize system state."; + } // SystemState overrides. void set_device_policy(const policy::DevicePolicy* device_policy) override { @@ -108,6 +108,14 @@ class RealSystemState : public SystemState { DlcServiceInterface* dlcservice() override { return dlcservice_.get(); } private: + // Constructs all system objects that do not require separate initialization; + // see Initialize() below for the remaining ones. + RealSystemState() = default; + + // Initializes and sets systems objects that require an initialization + // separately from construction. Returns |true| on success. + bool Initialize(); + // Real DBus proxies using the DBus connection. std::unique_ptr kiosk_app_proxy_; @@ -155,7 +163,7 @@ class RealSystemState : public SystemState { std::unique_ptr update_attempter_; // Common parameters for all Omaha requests. - OmahaRequestParams request_params_{this}; + OmahaRequestParams request_params_; std::unique_ptr p2p_manager_; diff --git a/cros/update_attempter.cc b/cros/update_attempter.cc index 5c21d04e..745272c0 100644 --- a/cros/update_attempter.cc +++ b/cros/update_attempter.cc @@ -126,10 +126,8 @@ ErrorCode GetErrorCodeForAction(AbstractAction* action, ErrorCode code) { return code; } -UpdateAttempter::UpdateAttempter(SystemState* system_state, - CertificateChecker* cert_checker) +UpdateAttempter::UpdateAttempter(CertificateChecker* cert_checker) : processor_(new ActionProcessor()), - system_state_(system_state), cert_checker_(cert_checker), is_install_(false) {} @@ -150,8 +148,8 @@ void UpdateAttempter::Init() { // Pulling from the SystemState can only be done after construction, since // this is an aggregate of various objects (such as the UpdateAttempter), // which requires them all to be constructed prior to it being used. - prefs_ = system_state_->prefs(); - omaha_request_params_ = system_state_->request_params(); + prefs_ = SystemState::Get()->prefs(); + omaha_request_params_ = SystemState::Get()->request_params(); if (cert_checker_) cert_checker_->SetObserver(this); @@ -171,7 +169,7 @@ bool UpdateAttempter::ScheduleUpdates() { return false; chromeos_update_manager::UpdateManager* const update_manager = - system_state_->update_manager(); + SystemState::Get()->update_manager(); CHECK(update_manager); Callback callback = Bind(&UpdateAttempter::OnUpdateScheduled, base::Unretained(this)); @@ -187,8 +185,8 @@ bool UpdateAttempter::StartUpdater() { // Initiate update checks. ScheduleUpdates(); - auto update_boot_flags_action = - std::make_unique(system_state_->boot_control()); + auto update_boot_flags_action = std::make_unique( + SystemState::Get()->boot_control()); processor_->EnqueueAction(std::move(update_boot_flags_action)); // Update boot flags after 45 seconds. MessageLoop::current()->PostDelayedTask( @@ -212,16 +210,16 @@ bool UpdateAttempter::StartUpdater() { void UpdateAttempter::CertificateChecked(ServerToCheck server_to_check, CertificateCheckResult result) { - system_state_->metrics_reporter()->ReportCertificateCheckMetrics( + SystemState::Get()->metrics_reporter()->ReportCertificateCheckMetrics( server_to_check, result); } bool UpdateAttempter::CheckAndReportDailyMetrics() { int64_t stored_value; - Time now = system_state_->clock()->GetWallclockTime(); - if (system_state_->prefs()->Exists(kPrefsDailyMetricsLastReportedAt) && - system_state_->prefs()->GetInt64(kPrefsDailyMetricsLastReportedAt, - &stored_value)) { + Time now = SystemState::Get()->clock()->GetWallclockTime(); + if (SystemState::Get()->prefs()->Exists(kPrefsDailyMetricsLastReportedAt) && + SystemState::Get()->prefs()->GetInt64(kPrefsDailyMetricsLastReportedAt, + &stored_value)) { Time last_reported_at = Time::FromInternalValue(stored_value); TimeDelta time_reported_since = now - last_reported_at; if (time_reported_since.InSeconds() < 0) { @@ -244,8 +242,8 @@ bool UpdateAttempter::CheckAndReportDailyMetrics() { } LOG(INFO) << "Reporting daily metrics."; - system_state_->prefs()->SetInt64(kPrefsDailyMetricsLastReportedAt, - now.ToInternalValue()); + SystemState::Get()->prefs()->SetInt64(kPrefsDailyMetricsLastReportedAt, + now.ToInternalValue()); ReportOSAge(); @@ -254,10 +252,6 @@ bool UpdateAttempter::CheckAndReportDailyMetrics() { void UpdateAttempter::ReportOSAge() { struct stat sb; - - if (system_state_ == nullptr) - return; - if (stat("/etc/lsb-release", &sb) != 0) { PLOG(ERROR) << "Error getting file status for /etc/lsb-release " << "(Note: this may happen in some unit tests)"; @@ -265,7 +259,7 @@ void UpdateAttempter::ReportOSAge() { } Time lsb_release_timestamp = Time::FromTimeSpec(sb.st_ctim); - Time now = system_state_->clock()->GetWallclockTime(); + Time now = SystemState::Get()->clock()->GetWallclockTime(); TimeDelta age = now - lsb_release_timestamp; if (age.InSeconds() < 0) { LOG(ERROR) << "The OS age (" << utils::FormatTimeDelta(age) @@ -274,7 +268,7 @@ void UpdateAttempter::ReportOSAge() { return; } - system_state_->metrics_reporter()->ReportDailyMetrics(age); + SystemState::Get()->metrics_reporter()->ReportDailyMetrics(age); } void UpdateAttempter::Update(const UpdateCheckParams& params) { @@ -293,8 +287,7 @@ void UpdateAttempter::Update(const UpdateCheckParams& params) { // not performing an update check because of this. LOG(INFO) << "Not updating b/c we already updated and we're waiting for " << "reboot, we'll ping Omaha instead"; - system_state_->metrics_reporter()->ReportUpdateCheckMetrics( - system_state_, + SystemState::Get()->metrics_reporter()->ReportUpdateCheckMetrics( metrics::CheckResult::kRebootPending, metrics::CheckReaction::kUnset, metrics::DownloadErrorCode::kUnset); @@ -337,8 +330,8 @@ void UpdateAttempter::RefreshDevicePolicy() { else LOG(INFO) << "No device policies/settings present."; - system_state_->set_device_policy(device_policy); - system_state_->p2p_manager()->SetDevicePolicy(device_policy); + SystemState::Get()->set_device_policy(device_policy); + SystemState::Get()->p2p_manager()->SetDevicePolicy(device_policy); } void UpdateAttempter::CalculateP2PParams(bool interactive) { @@ -351,31 +344,31 @@ void UpdateAttempter::CalculateP2PParams(bool interactive) { // (Why would a developer want to opt in? If they are working on the // update_engine or p2p codebases so they can actually test their code.) - if (system_state_ != nullptr) { - if (!system_state_->p2p_manager()->IsP2PEnabled()) { - LOG(INFO) << "p2p is not enabled - disallowing p2p for both" - << " downloading and sharing."; + if (!SystemState::Get()->p2p_manager()->IsP2PEnabled()) { + LOG(INFO) << "p2p is not enabled - disallowing p2p for both" + << " downloading and sharing."; + } else { + // Allow p2p for sharing, even in interactive checks. + use_p2p_for_sharing = true; + if (!interactive) { + LOG(INFO) << "Non-interactive check - allowing p2p for downloading"; + use_p2p_for_downloading = true; } else { - // Allow p2p for sharing, even in interactive checks. - use_p2p_for_sharing = true; - if (!interactive) { - LOG(INFO) << "Non-interactive check - allowing p2p for downloading"; - use_p2p_for_downloading = true; - } else { - LOG(INFO) << "Forcibly disabling use of p2p for downloading " - << "since this update attempt is interactive."; - } + LOG(INFO) << "Forcibly disabling use of p2p for downloading " + << "since this update attempt is interactive."; } } - PayloadStateInterface* const payload_state = system_state_->payload_state(); + PayloadStateInterface* const payload_state = + SystemState::Get()->payload_state(); payload_state->SetUsingP2PForDownloading(use_p2p_for_downloading); payload_state->SetUsingP2PForSharing(use_p2p_for_sharing); } bool UpdateAttempter::CalculateUpdateParams(const UpdateCheckParams& params) { http_response_code_ = 0; - PayloadStateInterface* const payload_state = system_state_->payload_state(); + PayloadStateInterface* const payload_state = + SystemState::Get()->payload_state(); // Refresh the policy before computing all the update parameters. RefreshDevicePolicy(); @@ -419,8 +412,9 @@ bool UpdateAttempter::CalculateUpdateParams(const UpdateCheckParams& params) { // Set Quick Fix Build token if policy is set and the device is enterprise // enrolled. string token; - if (system_state_ && system_state_->device_policy()) { - if (!system_state_->device_policy()->GetDeviceQuickFixBuildToken(&token)) + if (SystemState::Get()->device_policy()) { + if (!SystemState::Get()->device_policy()->GetDeviceQuickFixBuildToken( + &token)) token.clear(); } omaha_request_params_->set_autoupdate_token(token); @@ -470,7 +464,8 @@ void UpdateAttempter::CalculateScatteringParams(bool interactive) { // Take a copy of the old scatter value before we update it, as // we need to update the waiting period if this value changes. TimeDelta old_scatter_factor = scatter_factor_; - const policy::DevicePolicy* device_policy = system_state_->device_policy(); + const policy::DevicePolicy* device_policy = + SystemState::Get()->device_policy(); if (device_policy) { int64_t new_scatter_factor_in_secs = 0; device_policy->GetScatterFactorInSeconds(&new_scatter_factor_in_secs); @@ -484,8 +479,8 @@ void UpdateAttempter::CalculateScatteringParams(bool interactive) { LOG(INFO) << "Scattering disabled since scatter factor is set to 0"; } else if (interactive) { LOG(INFO) << "Scattering disabled as this is an interactive update check"; - } else if (system_state_->hardware()->IsOOBEEnabled() && - !system_state_->hardware()->IsOOBEComplete(nullptr)) { + } else if (SystemState::Get()->hardware()->IsOOBEEnabled() && + !SystemState::Get()->hardware()->IsOOBEComplete(nullptr)) { LOG(INFO) << "Scattering disabled since OOBE is enabled but not complete " "yet"; } else { @@ -587,14 +582,14 @@ void UpdateAttempter::GenerateNewWaitingPeriod() { // fails, we'll still be able to scatter based on our in-memory value. // The persistence only helps in ensuring a good overall distribution // across multiple devices if they tend to reboot too often. - system_state_->payload_state()->SetScatteringWaitPeriod( + SystemState::Get()->payload_state()->SetScatteringWaitPeriod( omaha_request_params_->waiting_period()); } void UpdateAttempter::CalculateStagingParams(bool interactive) { - bool oobe_complete = system_state_->hardware()->IsOOBEEnabled() && - system_state_->hardware()->IsOOBEComplete(nullptr); - auto device_policy = system_state_->device_policy(); + bool oobe_complete = SystemState::Get()->hardware()->IsOOBEEnabled() && + SystemState::Get()->hardware()->IsOOBEComplete(nullptr); + auto device_policy = SystemState::Get()->device_policy(); StagingCase staging_case = StagingCase::kOff; if (device_policy && !interactive && oobe_complete) { staging_wait_time_ = omaha_request_params_->waiting_period(); @@ -634,7 +629,7 @@ void UpdateAttempter::CalculateStagingParams(bool interactive) { bool UpdateAttempter::ResetDlcPrefs(const string& dlc_id) { vector failures; - PrefsInterface* prefs = system_state_->prefs(); + PrefsInterface* prefs = SystemState::Get()->prefs(); for (auto& sub_key : {kPrefsPingActive, kPrefsPingLastActive, kPrefsPingLastRollcall}) { auto key = prefs->CreateSubKey({kDlcPrefsSubDir, dlc_id, sub_key}); @@ -669,7 +664,7 @@ bool UpdateAttempter::SetDlcActiveValue(bool is_active, const string& dlc_id) { } LOG(INFO) << "Set DLC (" << dlc_id << ") to " << (is_active ? "Active" : "Inactive"); - PrefsInterface* prefs = system_state_->prefs(); + PrefsInterface* prefs = SystemState::Get()->prefs(); if (is_active) { auto ping_active_key = prefs->CreateSubKey({kDlcPrefsSubDir, dlc_id, kPrefsPingActive}); @@ -688,11 +683,11 @@ int64_t UpdateAttempter::GetPingMetadata(const string& metadata_key) const { // The first time a ping is sent, the metadata files containing the values // sent back by the server still don't exist. A value of -1 is used to // indicate this. - if (!system_state_->prefs()->Exists(metadata_key)) + if (!SystemState::Get()->prefs()->Exists(metadata_key)) return kPingNeverPinged; int64_t value; - if (system_state_->prefs()->GetInt64(metadata_key, &value)) + if (SystemState::Get()->prefs()->GetInt64(metadata_key, &value)) return value; // Return -2 when the file exists and there is a problem reading from it, or @@ -704,11 +699,11 @@ void UpdateAttempter::CalculateDlcParams() { // Set the |dlc_ids_| only for an update. This is required to get the // currently installed DLC(s). if (!is_install_ && - !system_state_->dlcservice()->GetDlcsToUpdate(&dlc_ids_)) { + !SystemState::Get()->dlcservice()->GetDlcsToUpdate(&dlc_ids_)) { LOG(INFO) << "Failed to retrieve DLC module IDs from dlcservice. Check the " "state of dlcservice, will not update DLC modules."; } - PrefsInterface* prefs = system_state_->prefs(); + PrefsInterface* prefs = SystemState::Get()->prefs(); map dlc_apps_params; for (const auto& dlc_id : dlc_ids_) { OmahaRequestParams::AppParams dlc_params{ @@ -760,64 +755,55 @@ void UpdateAttempter::BuildUpdateActions(bool interactive) { // Actions: auto update_check_fetcher = std::make_unique( - GetProxyResolver(), system_state_->hardware()); + GetProxyResolver(), SystemState::Get()->hardware()); update_check_fetcher->set_server_to_check(ServerToCheck::kUpdate); // Try harder to connect to the network, esp when not interactive. // See comment in libcurl_http_fetcher.cc. update_check_fetcher->set_no_network_max_retries(interactive ? 1 : 3); update_check_fetcher->set_is_update_check(true); - auto update_check_action = - std::make_unique(system_state_, - nullptr, - std::move(update_check_fetcher), - false, - session_id_); - auto response_handler_action = - std::make_unique(system_state_); - auto update_boot_flags_action = - std::make_unique(system_state_->boot_control()); + auto update_check_action = std::make_unique( + nullptr, std::move(update_check_fetcher), false, session_id_); + auto response_handler_action = std::make_unique(); + auto update_boot_flags_action = std::make_unique( + SystemState::Get()->boot_control()); auto download_started_action = std::make_unique( - system_state_, new OmahaEvent(OmahaEvent::kTypeUpdateDownloadStarted), std::make_unique(GetProxyResolver(), - system_state_->hardware()), + SystemState::Get()->hardware()), false, session_id_); - LibcurlHttpFetcher* download_fetcher = - new LibcurlHttpFetcher(GetProxyResolver(), system_state_->hardware()); + LibcurlHttpFetcher* download_fetcher = new LibcurlHttpFetcher( + GetProxyResolver(), SystemState::Get()->hardware()); download_fetcher->set_server_to_check(ServerToCheck::kDownload); if (interactive) download_fetcher->set_max_retry_count(kDownloadMaxRetryCountInteractive); download_fetcher->SetHeader(kXGoogleUpdateSessionId, session_id_); auto download_action = std::make_unique(prefs_, - system_state_->boot_control(), - system_state_->hardware(), - system_state_, + SystemState::Get()->boot_control(), + SystemState::Get()->hardware(), download_fetcher, // passes ownership interactive); download_action->set_delegate(this); auto download_finished_action = std::make_unique( - system_state_, new OmahaEvent(OmahaEvent::kTypeUpdateDownloadFinished), std::make_unique(GetProxyResolver(), - system_state_->hardware()), + SystemState::Get()->hardware()), false, session_id_); auto filesystem_verifier_action = std::make_unique( - system_state_->boot_control()->GetDynamicPartitionControl()); + SystemState::Get()->boot_control()->GetDynamicPartitionControl()); auto update_complete_action = std::make_unique( - system_state_, new OmahaEvent(OmahaEvent::kTypeUpdateComplete), std::make_unique(GetProxyResolver(), - system_state_->hardware()), + SystemState::Get()->hardware()), false, session_id_); auto postinstall_runner_action = std::make_unique( - system_state_->boot_control(), system_state_->hardware()); + SystemState::Get()->boot_control(), SystemState::Get()->hardware()); postinstall_runner_action->set_delegate(this); // Bond them together. We have to use the leaf-types when calling @@ -851,7 +837,8 @@ bool UpdateAttempter::Rollback(bool powerwash) { // Enterprise-enrolled devices have an empty owner in their device policy. string owner; RefreshDevicePolicy(); - const policy::DevicePolicy* device_policy = system_state_->device_policy(); + const policy::DevicePolicy* device_policy = + SystemState::Get()->device_policy(); if (device_policy && (!device_policy->GetOwner(&owner) || owner.empty())) { LOG(ERROR) << "Enterprise device detected. " << "Cannot perform a powerwash for enterprise devices."; @@ -870,10 +857,11 @@ bool UpdateAttempter::Rollback(bool powerwash) { LOG(INFO) << "Setting rollback options."; install_plan_.reset(new InstallPlan()); install_plan_->target_slot = GetRollbackSlot(); - install_plan_->source_slot = system_state_->boot_control()->GetCurrentSlot(); + install_plan_->source_slot = + SystemState::Get()->boot_control()->GetCurrentSlot(); - TEST_AND_RETURN_FALSE( - install_plan_->LoadPartitionsFromSlots(system_state_->boot_control())); + TEST_AND_RETURN_FALSE(install_plan_->LoadPartitionsFromSlots( + SystemState::Get()->boot_control())); install_plan_->powerwash_required = powerwash; LOG(INFO) << "Using this install plan:"; @@ -882,14 +870,14 @@ bool UpdateAttempter::Rollback(bool powerwash) { auto install_plan_action = std::make_unique(*install_plan_); auto postinstall_runner_action = std::make_unique( - system_state_->boot_control(), system_state_->hardware()); + SystemState::Get()->boot_control(), SystemState::Get()->hardware()); postinstall_runner_action->set_delegate(this); BondActions(install_plan_action.get(), postinstall_runner_action.get()); processor_->EnqueueAction(std::move(install_plan_action)); processor_->EnqueueAction(std::move(postinstall_runner_action)); // Update the payload state for Rollback. - system_state_->payload_state()->Rollback(); + SystemState::Get()->payload_state()->Rollback(); SetStatusAndNotify(UpdateStatus::ATTEMPTING_ROLLBACK); @@ -906,9 +894,10 @@ bool UpdateAttempter::CanRollback() const { BootControlInterface::Slot UpdateAttempter::GetRollbackSlot() const { LOG(INFO) << "UpdateAttempter::GetRollbackSlot"; - const unsigned int num_slots = system_state_->boot_control()->GetNumSlots(); + const unsigned int num_slots = + SystemState::Get()->boot_control()->GetNumSlots(); const BootControlInterface::Slot current_slot = - system_state_->boot_control()->GetCurrentSlot(); + SystemState::Get()->boot_control()->GetCurrentSlot(); LOG(INFO) << " Installed slots: " << num_slots; LOG(INFO) << " Booted from slot: " @@ -922,7 +911,7 @@ BootControlInterface::Slot UpdateAttempter::GetRollbackSlot() const { vector bootable_slots; for (BootControlInterface::Slot slot = 0; slot < num_slots; slot++) { if (slot != current_slot && - system_state_->boot_control()->IsSlotBootable(slot)) { + SystemState::Get()->boot_control()->IsSlotBootable(slot)) { LOG(INFO) << "Found bootable slot " << BootControlInterface::SlotName(slot); return slot; @@ -1028,7 +1017,7 @@ bool UpdateAttempter::CheckForInstall(const vector& dlc_ids, } bool UpdateAttempter::RebootIfNeeded() { - if (system_state_->power_manager()->RequestReboot()) + if (SystemState::Get()->power_manager()->RequestReboot()) return true; return RebootDirectly(); @@ -1040,7 +1029,7 @@ void UpdateAttempter::WriteUpdateCompletedMarker() { return; prefs_->SetString(kPrefsUpdateCompletedOnBootId, boot_id); - int64_t value = system_state_->clock()->GetBootTime().ToInternalValue(); + int64_t value = SystemState::Get()->clock()->GetBootTime().ToInternalValue(); prefs_->SetInt64(kPrefsUpdateCompletedBootTime, value); } @@ -1100,19 +1089,19 @@ void UpdateAttempter::OnUpdateScheduled(EvalStatus status, } void UpdateAttempter::UpdateLastCheckedTime() { - last_checked_time_ = system_state_->clock()->GetWallclockTime().ToTimeT(); + last_checked_time_ = + SystemState::Get()->clock()->GetWallclockTime().ToTimeT(); } void UpdateAttempter::UpdateRollbackHappened() { - DCHECK(system_state_); - DCHECK(system_state_->payload_state()); + DCHECK(SystemState::Get()->payload_state()); DCHECK(policy_provider_); - if (system_state_->payload_state()->GetRollbackHappened() && + if (SystemState::Get()->payload_state()->GetRollbackHappened() && (policy_provider_->device_policy_is_loaded() || policy_provider_->IsConsumerDevice())) { // Rollback happened, but we already went through OOBE and policy is // present or it's a consumer device. - system_state_->payload_state()->SetRollbackHappened(false); + SystemState::Get()->payload_state()->SetRollbackHappened(false); } } @@ -1155,7 +1144,7 @@ void UpdateAttempter::ProcessingDoneInternal(const ActionProcessor* processor, omaha_request_params_->app_version()); DeltaPerformer::ResetUpdateProgress(prefs_, false); - system_state_->payload_state()->UpdateSucceeded(); + SystemState::Get()->payload_state()->UpdateSucceeded(); // Since we're done with scattering fully at this point, this is the // safest point delete the state files, as we're sure that the status is @@ -1167,8 +1156,8 @@ void UpdateAttempter::ProcessingDoneInternal(const ActionProcessor* processor, // after reboot so that the same device is not favored or punished in any // way. prefs_->Delete(kPrefsUpdateCheckCount); - system_state_->payload_state()->SetScatteringWaitPeriod(TimeDelta()); - system_state_->payload_state()->SetStagingWaitPeriod(TimeDelta()); + SystemState::Get()->payload_state()->SetScatteringWaitPeriod(TimeDelta()); + SystemState::Get()->payload_state()->SetStagingWaitPeriod(TimeDelta()); prefs_->Delete(kPrefsUpdateFirstSeenAt); // Note: below this comment should only be on |ErrorCode::kSuccess|. @@ -1189,7 +1178,8 @@ vector UpdateAttempter::GetSuccessfulDlcIds() { void UpdateAttempter::ProcessingDoneInstall(const ActionProcessor* processor, ErrorCode code) { - if (!system_state_->dlcservice()->InstallCompleted(GetSuccessfulDlcIds())) + if (!SystemState::Get()->dlcservice()->InstallCompleted( + GetSuccessfulDlcIds())) LOG(WARNING) << "dlcservice didn't successfully handle install completion."; SetStatusAndNotify(UpdateStatus::IDLE); ScheduleUpdates(); @@ -1200,7 +1190,7 @@ void UpdateAttempter::ProcessingDoneUpdate(const ActionProcessor* processor, ErrorCode code) { WriteUpdateCompletedMarker(); - if (!system_state_->dlcservice()->UpdateCompleted(GetSuccessfulDlcIds())) + if (!SystemState::Get()->dlcservice()->UpdateCompleted(GetSuccessfulDlcIds())) LOG(WARNING) << "dlcservice didn't successfully handle update completion."; SetStatusAndNotify(UpdateStatus::UPDATED_NEED_REBOOT); ScheduleUpdates(); @@ -1222,19 +1212,19 @@ void UpdateAttempter::ProcessingDoneUpdate(const ActionProcessor* processor, // If we just downloaded a rollback image, we should preserve this fact // over the following powerwash. if (install_plan_->is_rollback) { - system_state_->payload_state()->SetRollbackHappened(true); - system_state_->metrics_reporter()->ReportEnterpriseRollbackMetrics( + SystemState::Get()->payload_state()->SetRollbackHappened(true); + SystemState::Get()->metrics_reporter()->ReportEnterpriseRollbackMetrics( /*success=*/true, install_plan_->version); } // Expect to reboot into the new version to send the proper metric during // next boot. - system_state_->payload_state()->ExpectRebootInNewVersion( + SystemState::Get()->payload_state()->ExpectRebootInNewVersion( target_version_uid); } else { // If we just finished a rollback, then we expect to have no Omaha // response. Otherwise, it's an error. - if (system_state_->payload_state()->GetRollbackVersion().empty()) { + if (SystemState::Get()->payload_state()->GetRollbackVersion().empty()) { LOG(ERROR) << "Can't send metrics because there was no Omaha response"; } } @@ -1382,7 +1372,7 @@ void UpdateAttempter::BytesReceived(uint64_t bytes_progressed, uint64_t total) { // The PayloadState keeps track of how many bytes were actually downloaded // from a given URL for the URL skipping logic. - system_state_->payload_state()->DownloadProgress(bytes_progressed); + SystemState::Get()->payload_state()->DownloadProgress(bytes_progressed); double progress = 0; if (total) @@ -1396,7 +1386,7 @@ void UpdateAttempter::BytesReceived(uint64_t bytes_progressed, } void UpdateAttempter::DownloadComplete() { - system_state_->payload_state()->DownloadComplete(); + SystemState::Get()->payload_state()->DownloadComplete(); } void UpdateAttempter::ProgressUpdate(double progress) { @@ -1441,7 +1431,7 @@ bool UpdateAttempter::ResetStatus() { ret_value = prefs_->Delete(kPrefsLastFp, {kDlcPrefsSubDir}) && ret_value; // Update the boot flags so the current slot has higher priority. - BootControlInterface* boot_control = system_state_->boot_control(); + BootControlInterface* boot_control = SystemState::Get()->boot_control(); if (!boot_control->SetActiveBootSlot(boot_control->GetCurrentSlot())) ret_value = false; @@ -1452,7 +1442,7 @@ bool UpdateAttempter::ResetStatus() { ret_value = false; // Notify the PayloadState that the successful payload was canceled. - system_state_->payload_state()->ResetUpdateStatus(); + SystemState::Get()->payload_state()->ResetUpdateStatus(); // The previous version is used to report back to omaha after reboot that // we actually rebooted into the new version from this "prev-version". We @@ -1482,8 +1472,9 @@ bool UpdateAttempter::GetStatus(UpdateEngineStatus* out_status) { out_status->is_install = is_install_; string str_eol_date; - if (system_state_->prefs()->Exists(kPrefsOmahaEolDate) && - !system_state_->prefs()->GetString(kPrefsOmahaEolDate, &str_eol_date)) + if (SystemState::Get()->prefs()->Exists(kPrefsOmahaEolDate) && + !SystemState::Get()->prefs()->GetString(kPrefsOmahaEolDate, + &str_eol_date)) LOG(ERROR) << "Failed to retrieve kPrefsOmahaEolDate pref."; out_status->eol_date = StringToEolDate(str_eol_date); @@ -1510,13 +1501,13 @@ void UpdateAttempter::BroadcastStatus() { uint32_t UpdateAttempter::GetErrorCodeFlags() { uint32_t flags = 0; - if (!system_state_->hardware()->IsNormalBootMode()) + if (!SystemState::Get()->hardware()->IsNormalBootMode()) flags |= static_cast(ErrorCode::kDevModeFlag); if (install_plan_ && install_plan_->is_resume) flags |= static_cast(ErrorCode::kResumedFlag); - if (!system_state_->hardware()->IsOfficialBuild()) + if (!SystemState::Get()->hardware()->IsOfficialBuild()) flags |= static_cast(ErrorCode::kTestImageFlag); if (!omaha_request_params_->IsUpdateUrlOfficial()) { @@ -1529,7 +1520,7 @@ uint32_t UpdateAttempter::GetErrorCodeFlags() { bool UpdateAttempter::ShouldCancel(ErrorCode* cancel_reason) { // Check if the channel we're attempting to update to is the same as the // target channel currently chosen by the user. - OmahaRequestParams* params = system_state_->request_params(); + OmahaRequestParams* params = SystemState::Get()->request_params(); if (params->download_channel() != params->target_channel()) { LOG(ERROR) << "Aborting download as target channel: " << params->target_channel() @@ -1587,21 +1578,20 @@ bool UpdateAttempter::ScheduleErrorEventAction() { return false; LOG(ERROR) << "Update failed."; - system_state_->payload_state()->UpdateFailed(error_event_->error_code); + SystemState::Get()->payload_state()->UpdateFailed(error_event_->error_code); // Send metrics if it was a rollback. if (install_plan_ && install_plan_->is_rollback) { - system_state_->metrics_reporter()->ReportEnterpriseRollbackMetrics( + SystemState::Get()->metrics_reporter()->ReportEnterpriseRollbackMetrics( /*success=*/false, install_plan_->version); } // Send it to Omaha. LOG(INFO) << "Reporting the error event"; auto error_event_action = std::make_unique( - system_state_, error_event_.release(), // Pass ownership. std::make_unique(GetProxyResolver(), - system_state_->hardware()), + SystemState::Get()->hardware()), false, session_id_); processor_->EnqueueAction(std::move(error_event_action)); @@ -1644,10 +1634,9 @@ void UpdateAttempter::PingOmaha() { ResetInteractivityFlags(); auto ping_action = std::make_unique( - system_state_, nullptr, std::make_unique(GetProxyResolver(), - system_state_->hardware()), + SystemState::Get()->hardware()), true, "" /* session_id */); processor_->set_delegate(nullptr); @@ -1730,9 +1719,9 @@ void UpdateAttempter::UpdateEngineStarted() { // in case we rebooted because of a crash of the old version, so we // can do a proper crash report with correct information. // This must be done before calling - // system_state_->payload_state()->UpdateEngineStarted() since it will + // SystemState::Get()->payload_state()->UpdateEngineStarted() since it will // delete SystemUpdated marker file. - if (system_state_->system_rebooted() && + if (SystemState::Get()->system_rebooted() && prefs_->Exists(kPrefsSystemUpdatedMarker)) { if (!prefs_->GetString(kPrefsPreviousVersion, &prev_version_)) { // If we fail to get the version string, make sure it stays empty. @@ -1740,20 +1729,19 @@ void UpdateAttempter::UpdateEngineStarted() { } } - system_state_->payload_state()->UpdateEngineStarted(); + SystemState::Get()->payload_state()->UpdateEngineStarted(); StartP2PAtStartup(); - excluder_ = CreateExcluder(system_state_->prefs()); + excluder_ = CreateExcluder(SystemState::Get()->prefs()); } bool UpdateAttempter::StartP2PAtStartup() { - if (system_state_ == nullptr || - !system_state_->p2p_manager()->IsP2PEnabled()) { + if (!SystemState::Get()->p2p_manager()->IsP2PEnabled()) { LOG(INFO) << "Not starting p2p at startup since it's not enabled."; return false; } - if (system_state_->p2p_manager()->CountSharedFiles() < 1) { + if (SystemState::Get()->p2p_manager()->CountSharedFiles() < 1) { LOG(INFO) << "Not starting p2p at startup since our application " << "is not sharing any files."; return false; @@ -1763,22 +1751,19 @@ bool UpdateAttempter::StartP2PAtStartup() { } bool UpdateAttempter::StartP2PAndPerformHousekeeping() { - if (system_state_ == nullptr) - return false; - - if (!system_state_->p2p_manager()->IsP2PEnabled()) { + if (!SystemState::Get()->p2p_manager()->IsP2PEnabled()) { LOG(INFO) << "Not starting p2p since it's not enabled."; return false; } LOG(INFO) << "Ensuring that p2p is running."; - if (!system_state_->p2p_manager()->EnsureP2PRunning()) { + if (!SystemState::Get()->p2p_manager()->EnsureP2PRunning()) { LOG(ERROR) << "Error starting p2p."; return false; } LOG(INFO) << "Performing p2p housekeeping."; - if (!system_state_->p2p_manager()->PerformHousekeeping()) { + if (!SystemState::Get()->p2p_manager()->PerformHousekeeping()) { LOG(ERROR) << "Error performing housekeeping for p2p."; return false; } @@ -1825,12 +1810,12 @@ bool UpdateAttempter::IsAnyUpdateSourceAllowed() const { // * The debugd dev features are accessible (i.e. in devmode with no owner). // This protects users running a base image, while still allowing a specific // window (gated by the debug dev features) where `cros flash` is usable. - if (!system_state_->hardware()->IsOfficialBuild()) { + if (!SystemState::Get()->hardware()->IsOfficialBuild()) { LOG(INFO) << "Non-official build; allowing any update source."; return true; } - if (system_state_->hardware()->AreDevFeaturesEnabled()) { + if (SystemState::Get()->hardware()->AreDevFeaturesEnabled()) { LOG(INFO) << "Developer features enabled; allowing custom update sources."; return true; } @@ -1841,20 +1826,22 @@ bool UpdateAttempter::IsAnyUpdateSourceAllowed() const { } void UpdateAttempter::ReportTimeToUpdateAppliedMetric() { - const policy::DevicePolicy* device_policy = system_state_->device_policy(); + const policy::DevicePolicy* device_policy = + SystemState::Get()->device_policy(); if (device_policy && device_policy->IsEnterpriseEnrolled()) { vector parsed_intervals; bool has_time_restrictions = device_policy->GetDisallowedTimeIntervals(&parsed_intervals); int64_t update_first_seen_at_int; - if (system_state_->prefs()->Exists(kPrefsUpdateFirstSeenAt)) { - if (system_state_->prefs()->GetInt64(kPrefsUpdateFirstSeenAt, - &update_first_seen_at_int)) { + if (SystemState::Get()->prefs()->Exists(kPrefsUpdateFirstSeenAt)) { + if (SystemState::Get()->prefs()->GetInt64(kPrefsUpdateFirstSeenAt, + &update_first_seen_at_int)) { TimeDelta update_delay = - system_state_->clock()->GetWallclockTime() - + SystemState::Get()->clock()->GetWallclockTime() - Time::FromInternalValue(update_first_seen_at_int); - system_state_->metrics_reporter() + SystemState::Get() + ->metrics_reporter() ->ReportEnterpriseUpdateSeenToDownloadDays(has_time_restrictions, update_delay.InDays()); } diff --git a/cros/update_attempter.h b/cros/update_attempter.h index 24c6f54e..09e613fa 100644 --- a/cros/update_attempter.h +++ b/cros/update_attempter.h @@ -65,7 +65,7 @@ class UpdateAttempter : public ActionProcessorDelegate, using UpdateAttemptFlags = update_engine::UpdateAttemptFlags; static const int kMaxDeltaUpdateFailures; - UpdateAttempter(SystemState* system_state, CertificateChecker* cert_checker); + explicit UpdateAttempter(CertificateChecker* cert_checker); ~UpdateAttempter() override; // Further initialization to be done post construction. @@ -360,7 +360,7 @@ class UpdateAttempter : public ActionProcessorDelegate, // Calculates all the scattering related parameters (such as waiting period, // which type of scattering is enabled, etc.) and also updates/deletes // the corresponding prefs file used in scattering. Should be called - // only after the device policy has been loaded and set in the system_state_. + // only after the device policy has been loaded and set in the system state. void CalculateScatteringParams(bool interactive); // Sets a random value for the waiting period to wait for before downloading @@ -460,10 +460,6 @@ class UpdateAttempter : public ActionProcessorDelegate, std::unique_ptr processor_; - // External state of the system outside the update_engine process - // carved out separately to mock out easily in unit tests. - SystemState* system_state_; - // Pointer to the certificate checker instance to use. CertificateChecker* cert_checker_; @@ -474,7 +470,7 @@ class UpdateAttempter : public ActionProcessorDelegate, std::unique_ptr install_plan_; // Pointer to the preferences store interface. This is just a cached - // copy of system_state->prefs() because it's used in many methods and + // copy of SystemState::Get()->prefs() because it's used in many methods and // is convenient this way. PrefsInterface* prefs_ = nullptr; diff --git a/cros/update_attempter_unittest.cc b/cros/update_attempter_unittest.cc index f3211a0e..ab4a5f2c 100644 --- a/cros/update_attempter_unittest.cc +++ b/cros/update_attempter_unittest.cc @@ -169,8 +169,7 @@ const char kRollbackVersion[] = "10575.39.2"; // methods. class UpdateAttempterUnderTest : public UpdateAttempter { public: - explicit UpdateAttempterUnderTest(SystemState* system_state) - : UpdateAttempter(system_state, nullptr) {} + UpdateAttempterUnderTest() : UpdateAttempter(nullptr) {} void Update(const UpdateCheckParams& params) override { update_called_ = true; @@ -223,27 +222,24 @@ class UpdateAttempterUnderTest : public UpdateAttempter { class UpdateAttempterTest : public ::testing::Test { protected: - UpdateAttempterTest() - : certificate_checker_(fake_system_state_.mock_prefs(), - &openssl_wrapper_) { + void SetUp() override { // Override system state members. - fake_system_state_.set_connection_manager(&mock_connection_manager); - fake_system_state_.set_update_attempter(&attempter_); - fake_system_state_.set_dlcservice(&mock_dlcservice_); - fake_system_state_.set_update_manager(&mock_update_manager_); + FakeSystemState::CreateInstance(); + FakeSystemState::Get()->set_connection_manager(&mock_connection_manager); + FakeSystemState::Get()->set_update_attempter(&attempter_); + FakeSystemState::Get()->set_dlcservice(&mock_dlcservice_); + FakeSystemState::Get()->set_update_manager(&mock_update_manager_); loop_.SetAsCurrent(); - certificate_checker_.Init(); + certificate_checker_.reset(new CertificateChecker( + FakeSystemState::Get()->mock_prefs(), &openssl_wrapper_)); + certificate_checker_->Init(); attempter_.set_forced_update_pending_callback( new base::Callback(base::Bind([](bool, bool) {}))); // Finish initializing the attempter. attempter_.Init(); - } - void SetUp() override { - EXPECT_NE(nullptr, attempter_.system_state_); - EXPECT_NE(nullptr, attempter_.system_state_->update_manager()); EXPECT_EQ(0, attempter_.http_response_code_); EXPECT_EQ(UpdateStatus::IDLE, attempter_.status_); EXPECT_EQ(0.0, attempter_.download_progress_); @@ -252,21 +248,21 @@ class UpdateAttempterTest : public ::testing::Test { EXPECT_EQ(0ULL, attempter_.new_payload_size_); processor_ = new NiceMock(); attempter_.processor_.reset(processor_); // Transfers ownership. - prefs_ = fake_system_state_.mock_prefs(); + prefs_ = FakeSystemState::Get()->mock_prefs(); // Setup store/load semantics of P2P properties via the mock |PayloadState|. actual_using_p2p_for_downloading_ = false; - EXPECT_CALL(*fake_system_state_.mock_payload_state(), + EXPECT_CALL(*FakeSystemState::Get()->mock_payload_state(), SetUsingP2PForDownloading(_)) .WillRepeatedly(SaveArg<0>(&actual_using_p2p_for_downloading_)); - EXPECT_CALL(*fake_system_state_.mock_payload_state(), + EXPECT_CALL(*FakeSystemState::Get()->mock_payload_state(), GetUsingP2PForDownloading()) .WillRepeatedly(ReturnPointee(&actual_using_p2p_for_downloading_)); actual_using_p2p_for_sharing_ = false; - EXPECT_CALL(*fake_system_state_.mock_payload_state(), + EXPECT_CALL(*FakeSystemState::Get()->mock_payload_state(), SetUsingP2PForSharing(_)) .WillRepeatedly(SaveArg<0>(&actual_using_p2p_for_sharing_)); - EXPECT_CALL(*fake_system_state_.mock_payload_state(), + EXPECT_CALL(*FakeSystemState::Get()->mock_payload_state(), GetUsingP2PForDownloading()) .WillRepeatedly(ReturnPointee(&actual_using_p2p_for_sharing_)); } @@ -320,16 +316,14 @@ class UpdateAttempterTest : public ::testing::Test { base::SingleThreadTaskExecutor base_loop_{base::MessagePumpType::IO}; brillo::BaseMessageLoop loop_{base_loop_.task_runner()}; - FakeSystemState fake_system_state_; - UpdateAttempterUnderTest attempter_{&fake_system_state_}; + UpdateAttempterUnderTest attempter_; OpenSSLWrapper openssl_wrapper_; - CertificateChecker certificate_checker_; + std::unique_ptr certificate_checker_; MockDlcService mock_dlcservice_; MockUpdateManager mock_update_manager_; NiceMock* processor_; - NiceMock* - prefs_; // Shortcut to |fake_system_state_->mock_prefs()|. + NiceMock* prefs_; NiceMock mock_connection_manager; // |CheckForUpdate()| test params. @@ -348,9 +342,9 @@ class UpdateAttempterTest : public ::testing::Test { void UpdateAttempterTest::TestCheckForUpdate() { // Setup attempter_.status_ = cfu_params_.status; - fake_system_state_.fake_hardware()->SetIsOfficialBuild( + FakeSystemState::Get()->fake_hardware()->SetIsOfficialBuild( cfu_params_.is_official_build); - fake_system_state_.fake_hardware()->SetAreDevFeaturesEnabled( + FakeSystemState::Get()->fake_hardware()->SetAreDevFeaturesEnabled( cfu_params_.are_dev_features_enabled); // Invocation @@ -506,7 +500,6 @@ TEST_F(UpdateAttempterTest, ActionCompletedDownloadTest) { unique_ptr fetcher(new MockHttpFetcher("", 0, nullptr)); fetcher->FailTransfer(503); // Sets the HTTP response code. DownloadAction action(prefs_, - nullptr, nullptr, nullptr, fetcher.release(), @@ -610,8 +603,7 @@ TEST_F(UpdateAttempterTest, BroadcastCompleteDownloadTest) { TEST_F(UpdateAttempterTest, ActionCompletedOmahaRequestTest) { unique_ptr fetcher(new MockHttpFetcher("", 0, nullptr)); fetcher->FailTransfer(500); // Sets the HTTP response code. - OmahaRequestAction action( - &fake_system_state_, nullptr, std::move(fetcher), false, ""); + OmahaRequestAction action(nullptr, std::move(fetcher), false, ""); ObjectCollectorAction collector_action; BondActions(&action, &collector_action); OmahaResponse response; @@ -630,7 +622,7 @@ TEST_F(UpdateAttempterTest, ConstructWithUpdatedMarkerTest) { string boot_id; EXPECT_TRUE(utils::GetBootId(&boot_id)); fake_prefs.SetString(kPrefsUpdateCompletedOnBootId, boot_id); - fake_system_state_.set_prefs(&fake_prefs); + FakeSystemState::Get()->set_prefs(&fake_prefs); attempter_.Init(); EXPECT_EQ(UpdateStatus::UPDATED_NEED_REBOOT, attempter_.status()); } @@ -639,12 +631,10 @@ TEST_F(UpdateAttempterTest, GetErrorCodeForActionTest) { EXPECT_EQ(ErrorCode::kSuccess, GetErrorCodeForAction(nullptr, ErrorCode::kSuccess)); - FakeSystemState fake_system_state; - OmahaRequestAction omaha_request_action( - &fake_system_state, nullptr, nullptr, false, ""); + OmahaRequestAction omaha_request_action(nullptr, nullptr, false, ""); EXPECT_EQ(ErrorCode::kOmahaRequestError, GetErrorCodeForAction(&omaha_request_action, ErrorCode::kError)); - OmahaResponseHandlerAction omaha_response_handler_action(&fake_system_state_); + OmahaResponseHandlerAction omaha_response_handler_action; EXPECT_EQ( ErrorCode::kOmahaResponseHandlerError, GetErrorCodeForAction(&omaha_response_handler_action, ErrorCode::kError)); @@ -654,7 +644,8 @@ TEST_F(UpdateAttempterTest, GetErrorCodeForActionTest) { ErrorCode::kFilesystemVerifierError, GetErrorCodeForAction(&filesystem_verifier_action, ErrorCode::kError)); PostinstallRunnerAction postinstall_runner_action( - fake_system_state.fake_boot_control(), fake_system_state.fake_hardware()); + FakeSystemState::Get()->fake_boot_control(), + FakeSystemState::Get()->fake_hardware()); EXPECT_EQ( ErrorCode::kPostinstallRunnerError, GetErrorCodeForAction(&postinstall_runner_action, ErrorCode::kError)); @@ -709,16 +700,17 @@ TEST_F(UpdateAttempterTest, MarkDeltaUpdateFailureTest) { TEST_F(UpdateAttempterTest, ScheduleErrorEventActionNoEventTest) { EXPECT_CALL(*processor_, EnqueueAction(_)).Times(0); EXPECT_CALL(*processor_, StartProcessing()).Times(0); - EXPECT_CALL(*fake_system_state_.mock_payload_state(), UpdateFailed(_)) + EXPECT_CALL(*FakeSystemState::Get()->mock_payload_state(), UpdateFailed(_)) .Times(0); OmahaResponse response; string url1 = "http://url1"; response.packages.push_back({.payload_urls = {url1, "https://url"}}); - EXPECT_CALL(*(fake_system_state_.mock_payload_state()), GetCurrentUrl()) + EXPECT_CALL(*(FakeSystemState::Get()->mock_payload_state()), GetCurrentUrl()) .WillRepeatedly(Return(url1)); - fake_system_state_.mock_payload_state()->SetResponse(response); + FakeSystemState::Get()->mock_payload_state()->SetResponse(response); attempter_.ScheduleErrorEventAction(); - EXPECT_EQ(url1, fake_system_state_.mock_payload_state()->GetCurrentUrl()); + EXPECT_EQ(url1, + FakeSystemState::Get()->mock_payload_state()->GetCurrentUrl()); } TEST_F(UpdateAttempterTest, ScheduleErrorEventActionTest) { @@ -727,7 +719,7 @@ TEST_F(UpdateAttempterTest, ScheduleErrorEventActionTest) { &AbstractAction::Type, OmahaRequestAction::StaticType())))); EXPECT_CALL(*processor_, StartProcessing()); ErrorCode err = ErrorCode::kError; - EXPECT_CALL(*fake_system_state_.mock_payload_state(), UpdateFailed(err)); + EXPECT_CALL(*FakeSystemState::Get()->mock_payload_state(), UpdateFailed(err)); attempter_.error_event_.reset(new OmahaEvent( OmahaEvent::kTypeUpdateComplete, OmahaEvent::kResultError, err)); attempter_.ScheduleErrorEventAction(); @@ -799,7 +791,7 @@ void UpdateAttempterTest::RollbackTestStart(bool enterprise_rollback, // Create a device policy so that we can change settings. auto device_policy = std::make_unique(); EXPECT_CALL(*device_policy, LoadPolicy()).WillRepeatedly(Return(true)); - fake_system_state_.set_device_policy(device_policy.get()); + FakeSystemState::Get()->set_device_policy(device_policy.get()); if (enterprise_rollback) { // We return an empty owner as this is an enterprise. EXPECT_CALL(*device_policy, GetOwner(_)) @@ -818,8 +810,8 @@ void UpdateAttempterTest::RollbackTestStart(bool enterprise_rollback, BootControlInterface::Slot rollback_slot = 1; LOG(INFO) << "Test Mark Bootable: " << BootControlInterface::SlotName(rollback_slot); - fake_system_state_.fake_boot_control()->SetSlotBootable(rollback_slot, - true); + FakeSystemState::Get()->fake_boot_control()->SetSlotBootable(rollback_slot, + true); } bool is_rollback_allowed = false; @@ -944,7 +936,7 @@ TEST_F(UpdateAttempterTest, CreatePendingErrorEventResumedTest) { TEST_F(UpdateAttempterTest, P2PNotStartedAtStartupWhenNotEnabled) { MockP2PManager mock_p2p_manager; - fake_system_state_.set_p2p_manager(&mock_p2p_manager); + FakeSystemState::Get()->set_p2p_manager(&mock_p2p_manager); mock_p2p_manager.fake().SetP2PEnabled(false); EXPECT_CALL(mock_p2p_manager, EnsureP2PRunning()).Times(0); attempter_.UpdateEngineStarted(); @@ -952,7 +944,7 @@ TEST_F(UpdateAttempterTest, P2PNotStartedAtStartupWhenNotEnabled) { TEST_F(UpdateAttempterTest, P2PNotStartedAtStartupWhenEnabledButNotSharing) { MockP2PManager mock_p2p_manager; - fake_system_state_.set_p2p_manager(&mock_p2p_manager); + FakeSystemState::Get()->set_p2p_manager(&mock_p2p_manager); mock_p2p_manager.fake().SetP2PEnabled(true); EXPECT_CALL(mock_p2p_manager, EnsureP2PRunning()).Times(0); attempter_.UpdateEngineStarted(); @@ -960,7 +952,7 @@ TEST_F(UpdateAttempterTest, P2PNotStartedAtStartupWhenEnabledButNotSharing) { TEST_F(UpdateAttempterTest, P2PStartedAtStartupWhenEnabledAndSharing) { MockP2PManager mock_p2p_manager; - fake_system_state_.set_p2p_manager(&mock_p2p_manager); + FakeSystemState::Get()->set_p2p_manager(&mock_p2p_manager); mock_p2p_manager.fake().SetP2PEnabled(true); mock_p2p_manager.fake().SetCountSharedFilesResult(1); EXPECT_CALL(mock_p2p_manager, EnsureP2PRunning()); @@ -978,7 +970,7 @@ void UpdateAttempterTest::P2PNotEnabledStart() { // If P2P is not enabled, check that we do not attempt housekeeping // and do not convey that P2P is to be used. MockP2PManager mock_p2p_manager; - fake_system_state_.set_p2p_manager(&mock_p2p_manager); + FakeSystemState::Get()->set_p2p_manager(&mock_p2p_manager); mock_p2p_manager.fake().SetP2PEnabled(false); EXPECT_CALL(mock_p2p_manager, PerformHousekeeping()).Times(0); attempter_.Update({}); @@ -998,7 +990,7 @@ void UpdateAttempterTest::P2PEnabledStartingFailsStart() { // If P2P is enabled, but starting it fails ensure we don't do // any housekeeping and do not convey that P2P should be used. MockP2PManager mock_p2p_manager; - fake_system_state_.set_p2p_manager(&mock_p2p_manager); + FakeSystemState::Get()->set_p2p_manager(&mock_p2p_manager); mock_p2p_manager.fake().SetP2PEnabled(true); mock_p2p_manager.fake().SetEnsureP2PRunningResult(false); mock_p2p_manager.fake().SetPerformHousekeepingResult(false); @@ -1021,7 +1013,7 @@ void UpdateAttempterTest::P2PEnabledHousekeepingFailsStart() { // If P2P is enabled, starting it works but housekeeping fails, ensure // we do not convey P2P is to be used. MockP2PManager mock_p2p_manager; - fake_system_state_.set_p2p_manager(&mock_p2p_manager); + FakeSystemState::Get()->set_p2p_manager(&mock_p2p_manager); mock_p2p_manager.fake().SetP2PEnabled(true); mock_p2p_manager.fake().SetEnsureP2PRunningResult(true); mock_p2p_manager.fake().SetPerformHousekeepingResult(false); @@ -1041,7 +1033,7 @@ TEST_F(UpdateAttempterTest, P2PEnabled) { void UpdateAttempterTest::P2PEnabledStart() { MockP2PManager mock_p2p_manager; - fake_system_state_.set_p2p_manager(&mock_p2p_manager); + FakeSystemState::Get()->set_p2p_manager(&mock_p2p_manager); // If P2P is enabled and starting it works, check that we performed // housekeeping and that we convey P2P should be used. mock_p2p_manager.fake().SetP2PEnabled(true); @@ -1063,7 +1055,7 @@ TEST_F(UpdateAttempterTest, P2PEnabledInteractive) { void UpdateAttempterTest::P2PEnabledInteractiveStart() { MockP2PManager mock_p2p_manager; - fake_system_state_.set_p2p_manager(&mock_p2p_manager); + FakeSystemState::Get()->set_p2p_manager(&mock_p2p_manager); // For an interactive check, if P2P is enabled and starting it // works, check that we performed housekeeping and that we convey // P2P should be used for sharing but NOT for downloading. @@ -1092,7 +1084,7 @@ void UpdateAttempterTest::ReadScatterFactorFromPolicyTestStart() { auto device_policy = std::make_unique(); EXPECT_CALL(*device_policy, LoadPolicy()).WillRepeatedly(Return(true)); - fake_system_state_.set_device_policy(device_policy.get()); + FakeSystemState::Get()->set_device_policy(device_policy.get()); EXPECT_CALL(*device_policy, GetScatterFactorInSeconds(_)) .WillRepeatedly( @@ -1122,7 +1114,7 @@ void UpdateAttempterTest::DecrementUpdateCheckCountTestStart() { FakePrefs fake_prefs; attempter_.prefs_ = &fake_prefs; - fake_system_state_.fake_hardware()->SetIsOOBEComplete(Time::UnixEpoch()); + FakeSystemState::Get()->fake_hardware()->SetIsOOBEComplete(Time::UnixEpoch()); EXPECT_TRUE(fake_prefs.SetInt64(kPrefsUpdateCheckCount, initial_value)); @@ -1130,7 +1122,7 @@ void UpdateAttempterTest::DecrementUpdateCheckCountTestStart() { auto device_policy = std::make_unique(); EXPECT_CALL(*device_policy, LoadPolicy()).WillRepeatedly(Return(true)); - fake_system_state_.set_device_policy(device_policy.get()); + FakeSystemState::Get()->set_device_policy(device_policy.get()); EXPECT_CALL(*device_policy, GetScatterFactorInSeconds(_)) .WillRepeatedly( @@ -1179,8 +1171,8 @@ void UpdateAttempterTest::NoScatteringDoneDuringManualUpdateTestStart() { FakePrefs fake_prefs; attempter_.prefs_ = &fake_prefs; - fake_system_state_.fake_hardware()->SetIsOOBEComplete(Time::UnixEpoch()); - fake_system_state_.set_prefs(&fake_prefs); + FakeSystemState::Get()->fake_hardware()->SetIsOOBEComplete(Time::UnixEpoch()); + FakeSystemState::Get()->set_prefs(&fake_prefs); EXPECT_TRUE( fake_prefs.SetInt64(kPrefsWallClockScatteringWaitPeriod, initial_value)); @@ -1192,7 +1184,7 @@ void UpdateAttempterTest::NoScatteringDoneDuringManualUpdateTestStart() { auto device_policy = std::make_unique(); EXPECT_CALL(*device_policy, LoadPolicy()).WillRepeatedly(Return(true)); - fake_system_state_.set_device_policy(device_policy.get()); + FakeSystemState::Get()->set_device_policy(device_policy.get()); EXPECT_CALL(*device_policy, GetScatterFactorInSeconds(_)) .WillRepeatedly( @@ -1221,7 +1213,7 @@ void UpdateAttempterTest::NoScatteringDoneDuringManualUpdateTestStart() { void UpdateAttempterTest::SetUpStagingTest(const StagingSchedule& schedule, FakePrefs* prefs) { attempter_.prefs_ = prefs; - fake_system_state_.set_prefs(prefs); + FakeSystemState::Get()->set_prefs(prefs); int64_t initial_value = 8; EXPECT_TRUE( @@ -1231,7 +1223,7 @@ void UpdateAttempterTest::SetUpStagingTest(const StagingSchedule& schedule, auto device_policy = std::make_unique(); EXPECT_CALL(*device_policy, LoadPolicy()).WillRepeatedly(Return(true)); - fake_system_state_.set_device_policy(device_policy.get()); + FakeSystemState::Get()->set_device_policy(device_policy.get()); EXPECT_CALL(*device_policy, GetDeviceUpdateStagingSchedule(_)) .WillRepeatedly(DoAll(SetArgPointee<0>(schedule), Return(true))); @@ -1250,7 +1242,7 @@ TEST_F(UpdateAttempterTest, StagingSetsPrefsAndTurnsOffScattering) { void UpdateAttempterTest::StagingSetsPrefsAndTurnsOffScatteringStart() { // Tests that staging sets its prefs properly and turns off scattering. - fake_system_state_.fake_hardware()->SetIsOOBEComplete(Time::UnixEpoch()); + FakeSystemState::Get()->fake_hardware()->SetIsOOBEComplete(Time::UnixEpoch()); FakePrefs fake_prefs; SetUpStagingTest(kValidStagingSchedule, &fake_prefs); @@ -1307,7 +1299,7 @@ TEST_F(UpdateAttempterTest, StagingOffIfInteractive) { void UpdateAttempterTest::StagingOffIfInteractiveStart() { // Tests that staging is turned off when an interactive update is requested. - fake_system_state_.fake_hardware()->SetIsOOBEComplete(Time::UnixEpoch()); + FakeSystemState::Get()->fake_hardware()->SetIsOOBEComplete(Time::UnixEpoch()); FakePrefs fake_prefs; SetUpStagingTest(kValidStagingSchedule, &fake_prefs); @@ -1326,8 +1318,8 @@ TEST_F(UpdateAttempterTest, StagingOffIfOobe) { void UpdateAttempterTest::StagingOffIfOobeStart() { // Tests that staging is turned off if OOBE hasn't been completed. - fake_system_state_.fake_hardware()->SetIsOOBEEnabled(true); - fake_system_state_.fake_hardware()->UnsetIsOOBEComplete(); + FakeSystemState::Get()->fake_hardware()->SetIsOOBEEnabled(true); + FakeSystemState::Get()->fake_hardware()->UnsetIsOOBEComplete(); FakePrefs fake_prefs; SetUpStagingTest(kValidStagingSchedule, &fake_prefs); @@ -1342,8 +1334,8 @@ TEST_F(UpdateAttempterTest, ReportDailyMetrics) { FakeClock fake_clock; FakePrefs fake_prefs; - fake_system_state_.set_clock(&fake_clock); - fake_system_state_.set_prefs(&fake_prefs); + FakeSystemState::Get()->set_clock(&fake_clock); + FakeSystemState::Get()->set_prefs(&fake_prefs); Time epoch = Time::FromInternalValue(0); fake_clock.SetWallclockTime(epoch); @@ -1404,9 +1396,9 @@ TEST_F(UpdateAttempterTest, ReportDailyMetrics) { TEST_F(UpdateAttempterTest, BootTimeInUpdateMarkerFile) { FakeClock fake_clock; fake_clock.SetBootTime(Time::FromTimeT(42)); - fake_system_state_.set_clock(&fake_clock); + FakeSystemState::Get()->set_clock(&fake_clock); FakePrefs fake_prefs; - fake_system_state_.set_prefs(&fake_prefs); + FakeSystemState::Get()->set_prefs(&fake_prefs); attempter_.Init(); Time boot_time; @@ -1419,19 +1411,19 @@ TEST_F(UpdateAttempterTest, BootTimeInUpdateMarkerFile) { } TEST_F(UpdateAttempterTest, AnyUpdateSourceAllowedUnofficial) { - fake_system_state_.fake_hardware()->SetIsOfficialBuild(false); + FakeSystemState::Get()->fake_hardware()->SetIsOfficialBuild(false); EXPECT_TRUE(attempter_.IsAnyUpdateSourceAllowed()); } TEST_F(UpdateAttempterTest, AnyUpdateSourceAllowedOfficialDevmode) { - fake_system_state_.fake_hardware()->SetIsOfficialBuild(true); - fake_system_state_.fake_hardware()->SetAreDevFeaturesEnabled(true); + FakeSystemState::Get()->fake_hardware()->SetIsOfficialBuild(true); + FakeSystemState::Get()->fake_hardware()->SetAreDevFeaturesEnabled(true); EXPECT_TRUE(attempter_.IsAnyUpdateSourceAllowed()); } TEST_F(UpdateAttempterTest, AnyUpdateSourceDisallowedOfficialNormal) { - fake_system_state_.fake_hardware()->SetIsOfficialBuild(true); - fake_system_state_.fake_hardware()->SetAreDevFeaturesEnabled(false); + FakeSystemState::Get()->fake_hardware()->SetIsOfficialBuild(true); + FakeSystemState::Get()->fake_hardware()->SetAreDevFeaturesEnabled(false); EXPECT_FALSE(attempter_.IsAnyUpdateSourceAllowed()); } @@ -1623,8 +1615,8 @@ TEST_F(UpdateAttempterTest, CheckForUpdateMissingForcedCallback2) { } TEST_F(UpdateAttempterTest, CheckForInstallTest) { - fake_system_state_.fake_hardware()->SetIsOfficialBuild(true); - fake_system_state_.fake_hardware()->SetAreDevFeaturesEnabled(false); + FakeSystemState::Get()->fake_hardware()->SetIsOfficialBuild(true); + FakeSystemState::Get()->fake_hardware()->SetAreDevFeaturesEnabled(false); attempter_.CheckForInstall({}, "autest"); EXPECT_EQ(constants::kOmahaDefaultAUTestURL, attempter_.forced_omaha_url()); @@ -1662,19 +1654,21 @@ TEST_F(UpdateAttempterTest, TargetVersionPrefixSetAndReset) { UpdateCheckParams params; attempter_.CalculateUpdateParams({.target_version_prefix = "1234"}); EXPECT_EQ("1234", - fake_system_state_.request_params()->target_version_prefix()); + FakeSystemState::Get()->request_params()->target_version_prefix()); attempter_.CalculateUpdateParams({}); - EXPECT_TRUE( - fake_system_state_.request_params()->target_version_prefix().empty()); + EXPECT_TRUE(FakeSystemState::Get() + ->request_params() + ->target_version_prefix() + .empty()); } TEST_F(UpdateAttempterTest, TargetChannelHintSetAndReset) { attempter_.CalculateUpdateParams({.lts_tag = "hint"}); - EXPECT_EQ("hint", fake_system_state_.request_params()->lts_tag()); + EXPECT_EQ("hint", FakeSystemState::Get()->request_params()->lts_tag()); attempter_.CalculateUpdateParams({}); - EXPECT_TRUE(fake_system_state_.request_params()->lts_tag().empty()); + EXPECT_TRUE(FakeSystemState::Get()->request_params()->lts_tag().empty()); } TEST_F(UpdateAttempterTest, RollbackAllowedSetAndReset) { @@ -1683,44 +1677,47 @@ TEST_F(UpdateAttempterTest, RollbackAllowedSetAndReset) { .rollback_allowed = true, .rollback_allowed_milestones = 4, }); - EXPECT_TRUE(fake_system_state_.request_params()->rollback_allowed()); - EXPECT_EQ(4, - fake_system_state_.request_params()->rollback_allowed_milestones()); + EXPECT_TRUE(FakeSystemState::Get()->request_params()->rollback_allowed()); + EXPECT_EQ( + 4, + FakeSystemState::Get()->request_params()->rollback_allowed_milestones()); attempter_.CalculateUpdateParams({ .target_version_prefix = "1234", .rollback_allowed_milestones = 4, }); - EXPECT_FALSE(fake_system_state_.request_params()->rollback_allowed()); - EXPECT_EQ(4, - fake_system_state_.request_params()->rollback_allowed_milestones()); + EXPECT_FALSE(FakeSystemState::Get()->request_params()->rollback_allowed()); + EXPECT_EQ( + 4, + FakeSystemState::Get()->request_params()->rollback_allowed_milestones()); } TEST_F(UpdateAttempterTest, ChannelDowngradeNoRollback) { base::ScopedTempDir tempdir; ASSERT_TRUE(tempdir.CreateUniqueTempDir()); - fake_system_state_.request_params()->set_root(tempdir.GetPath().value()); + FakeSystemState::Get()->request_params()->set_root(tempdir.GetPath().value()); attempter_.CalculateUpdateParams({ .target_channel = "stable-channel", }); - EXPECT_FALSE(fake_system_state_.request_params()->is_powerwash_allowed()); + EXPECT_FALSE( + FakeSystemState::Get()->request_params()->is_powerwash_allowed()); } TEST_F(UpdateAttempterTest, ChannelDowngradeRollback) { base::ScopedTempDir tempdir; ASSERT_TRUE(tempdir.CreateUniqueTempDir()); - fake_system_state_.request_params()->set_root(tempdir.GetPath().value()); + FakeSystemState::Get()->request_params()->set_root(tempdir.GetPath().value()); attempter_.CalculateUpdateParams({ .rollback_on_channel_downgrade = true, .target_channel = "stable-channel", }); - EXPECT_TRUE(fake_system_state_.request_params()->is_powerwash_allowed()); + EXPECT_TRUE(FakeSystemState::Get()->request_params()->is_powerwash_allowed()); } TEST_F(UpdateAttempterTest, UpdateDeferredByPolicyTest) { // Construct an OmahaResponseHandlerAction that has processed an InstallPlan, // but the update is being deferred by the Policy. - OmahaResponseHandlerAction response_action(&fake_system_state_); + OmahaResponseHandlerAction response_action; response_action.install_plan_.version = "a.b.c.d"; response_action.install_plan_.payloads.push_back( {.size = 1234ULL, .type = InstallPayloadType::kFull}); @@ -1781,14 +1778,14 @@ TEST_F(UpdateAttempterTest, RollbackNotAllowed) { UpdateCheckParams params = {.updates_enabled = true, .rollback_allowed = false}; attempter_.OnUpdateScheduled(EvalStatus::kSucceeded, params); - EXPECT_FALSE(fake_system_state_.request_params()->rollback_allowed()); + EXPECT_FALSE(FakeSystemState::Get()->request_params()->rollback_allowed()); } TEST_F(UpdateAttempterTest, RollbackAllowed) { UpdateCheckParams params = {.updates_enabled = true, .rollback_allowed = true}; attempter_.OnUpdateScheduled(EvalStatus::kSucceeded, params); - EXPECT_TRUE(fake_system_state_.request_params()->rollback_allowed()); + EXPECT_TRUE(FakeSystemState::Get()->request_params()->rollback_allowed()); } TEST_F(UpdateAttempterTest, InteractiveUpdateUsesPassedRestrictions) { @@ -1815,7 +1812,8 @@ TEST_F(UpdateAttempterTest, NonInteractiveUpdateUsesSetRestrictions) { void UpdateAttempterTest::ResetRollbackHappenedStart(bool is_consumer, bool is_policy_loaded, bool expected_reset) { - EXPECT_CALL(*fake_system_state_.mock_payload_state(), GetRollbackHappened()) + EXPECT_CALL(*FakeSystemState::Get()->mock_payload_state(), + GetRollbackHappened()) .WillRepeatedly(Return(true)); auto mock_policy_provider = std::make_unique>(); @@ -1826,7 +1824,7 @@ void UpdateAttempterTest::ResetRollbackHappenedStart(bool is_consumer, const policy::MockDevicePolicy device_policy; EXPECT_CALL(*mock_policy_provider, GetDevicePolicy()) .WillRepeatedly(ReturnRef(device_policy)); - EXPECT_CALL(*fake_system_state_.mock_payload_state(), + EXPECT_CALL(*FakeSystemState::Get()->mock_payload_state(), SetRollbackHappened(false)) .Times(expected_reset ? 1 : 0); attempter_.policy_provider_ = std::move(mock_policy_provider); @@ -1868,7 +1866,7 @@ TEST_F(UpdateAttempterTest, SetRollbackHappenedRollback) { attempter_.install_plan_.reset(new InstallPlan); attempter_.install_plan_->is_rollback = true; - EXPECT_CALL(*fake_system_state_.mock_payload_state(), + EXPECT_CALL(*FakeSystemState::Get()->mock_payload_state(), SetRollbackHappened(true)) .Times(1); attempter_.ProcessingDone(nullptr, ErrorCode::kSuccess); @@ -1878,7 +1876,7 @@ TEST_F(UpdateAttempterTest, SetRollbackHappenedNotRollback) { attempter_.install_plan_.reset(new InstallPlan); attempter_.install_plan_->is_rollback = false; - EXPECT_CALL(*fake_system_state_.mock_payload_state(), + EXPECT_CALL(*FakeSystemState::Get()->mock_payload_state(), SetRollbackHappened(true)) .Times(0); attempter_.ProcessingDone(nullptr, ErrorCode::kSuccess); @@ -1889,7 +1887,7 @@ TEST_F(UpdateAttempterTest, RollbackMetricsRollbackSuccess) { attempter_.install_plan_->is_rollback = true; attempter_.install_plan_->version = kRollbackVersion; - EXPECT_CALL(*fake_system_state_.mock_metrics_reporter(), + EXPECT_CALL(*FakeSystemState::Get()->mock_metrics_reporter(), ReportEnterpriseRollbackMetrics(true, kRollbackVersion)) .Times(1); attempter_.ProcessingDone(nullptr, ErrorCode::kSuccess); @@ -1900,7 +1898,7 @@ TEST_F(UpdateAttempterTest, RollbackMetricsNotRollbackSuccess) { attempter_.install_plan_->is_rollback = false; attempter_.install_plan_->version = kRollbackVersion; - EXPECT_CALL(*fake_system_state_.mock_metrics_reporter(), + EXPECT_CALL(*FakeSystemState::Get()->mock_metrics_reporter(), ReportEnterpriseRollbackMetrics(_, _)) .Times(0); attempter_.ProcessingDone(nullptr, ErrorCode::kSuccess); @@ -1911,7 +1909,7 @@ TEST_F(UpdateAttempterTest, RollbackMetricsRollbackFailure) { attempter_.install_plan_->is_rollback = true; attempter_.install_plan_->version = kRollbackVersion; - EXPECT_CALL(*fake_system_state_.mock_metrics_reporter(), + EXPECT_CALL(*FakeSystemState::Get()->mock_metrics_reporter(), ReportEnterpriseRollbackMetrics(false, kRollbackVersion)) .Times(1); MockAction action; @@ -1924,7 +1922,7 @@ TEST_F(UpdateAttempterTest, RollbackMetricsNotRollbackFailure) { attempter_.install_plan_->is_rollback = false; attempter_.install_plan_->version = kRollbackVersion; - EXPECT_CALL(*fake_system_state_.mock_metrics_reporter(), + EXPECT_CALL(*FakeSystemState::Get()->mock_metrics_reporter(), ReportEnterpriseRollbackMetrics(_, _)) .Times(0); MockAction action; @@ -1933,7 +1931,7 @@ TEST_F(UpdateAttempterTest, RollbackMetricsNotRollbackFailure) { } TEST_F(UpdateAttempterTest, TimeToUpdateAppliedMetricFailure) { - EXPECT_CALL(*fake_system_state_.mock_metrics_reporter(), + EXPECT_CALL(*FakeSystemState::Get()->mock_metrics_reporter(), ReportEnterpriseUpdateSeenToDownloadDays(_, _)) .Times(0); attempter_.ProcessingDone(nullptr, ErrorCode::kOmahaUpdateDeferredPerPolicy); @@ -1941,12 +1939,12 @@ TEST_F(UpdateAttempterTest, TimeToUpdateAppliedMetricFailure) { TEST_F(UpdateAttempterTest, TimeToUpdateAppliedOnNonEnterprise) { auto device_policy = std::make_unique(); - fake_system_state_.set_device_policy(device_policy.get()); + FakeSystemState::Get()->set_device_policy(device_policy.get()); // Make device policy return that this is not enterprise enrolled EXPECT_CALL(*device_policy, IsEnterpriseEnrolled()).WillOnce(Return(false)); // Ensure that the metric is not recorded. - EXPECT_CALL(*fake_system_state_.mock_metrics_reporter(), + EXPECT_CALL(*FakeSystemState::Get()->mock_metrics_reporter(), ReportEnterpriseUpdateSeenToDownloadDays(_, _)) .Times(0); attempter_.ProcessingDone(nullptr, ErrorCode::kSuccess); @@ -1956,7 +1954,7 @@ TEST_F(UpdateAttempterTest, TimeToUpdateAppliedWithTimeRestrictionMetricSuccess) { constexpr int kDaysToUpdate = 15; auto device_policy = std::make_unique(); - fake_system_state_.set_device_policy(device_policy.get()); + FakeSystemState::Get()->set_device_policy(device_policy.get()); // Make device policy return that this is enterprise enrolled EXPECT_CALL(*device_policy, IsEnterpriseEnrolled()).WillOnce(Return(true)); // Pretend that there's a time restriction policy in place @@ -1973,10 +1971,10 @@ TEST_F(UpdateAttempterTest, update_first_seen_at + TimeDelta::FromDays(kDaysToUpdate); fake_clock.SetWallclockTime(update_finished_at); - fake_system_state_.set_clock(&fake_clock); - fake_system_state_.set_prefs(&fake_prefs); + FakeSystemState::Get()->set_clock(&fake_clock); + FakeSystemState::Get()->set_prefs(&fake_prefs); - EXPECT_CALL(*fake_system_state_.mock_metrics_reporter(), + EXPECT_CALL(*FakeSystemState::Get()->mock_metrics_reporter(), ReportEnterpriseUpdateSeenToDownloadDays(true, kDaysToUpdate)) .Times(1); attempter_.ProcessingDone(nullptr, ErrorCode::kSuccess); @@ -1986,7 +1984,7 @@ TEST_F(UpdateAttempterTest, TimeToUpdateAppliedWithoutTimeRestrictionMetricSuccess) { constexpr int kDaysToUpdate = 15; auto device_policy = std::make_unique(); - fake_system_state_.set_device_policy(device_policy.get()); + FakeSystemState::Get()->set_device_policy(device_policy.get()); // Make device policy return that this is enterprise enrolled EXPECT_CALL(*device_policy, IsEnterpriseEnrolled()).WillOnce(Return(true)); // Pretend that there's no time restriction policy in place @@ -2003,10 +2001,10 @@ TEST_F(UpdateAttempterTest, update_first_seen_at + TimeDelta::FromDays(kDaysToUpdate); fake_clock.SetWallclockTime(update_finished_at); - fake_system_state_.set_clock(&fake_clock); - fake_system_state_.set_prefs(&fake_prefs); + FakeSystemState::Get()->set_clock(&fake_clock); + FakeSystemState::Get()->set_prefs(&fake_prefs); - EXPECT_CALL(*fake_system_state_.mock_metrics_reporter(), + EXPECT_CALL(*FakeSystemState::Get()->mock_metrics_reporter(), ReportEnterpriseUpdateSeenToDownloadDays(false, kDaysToUpdate)) .Times(1); attempter_.ProcessingDone(nullptr, ErrorCode::kSuccess); @@ -2160,7 +2158,7 @@ void UpdateAttempterTest::UpdateToQuickFixBuildStart(bool set_token) { // policy is set and the device is enterprise enrolled based on |set_token|. string token = set_token ? "some_token" : ""; auto device_policy = std::make_unique(); - fake_system_state_.set_device_policy(device_policy.get()); + FakeSystemState::Get()->set_device_policy(device_policy.get()); EXPECT_CALL(*device_policy, LoadPolicy()).WillRepeatedly(Return(true)); if (set_token) @@ -2372,12 +2370,12 @@ TEST_F(UpdateAttempterTest, MissingEolTest) { TEST_F(UpdateAttempterTest, CalculateDlcParamsInstallTest) { string dlc_id = "dlc0"; FakePrefs fake_prefs; - fake_system_state_.set_prefs(&fake_prefs); + FakeSystemState::Get()->set_prefs(&fake_prefs); attempter_.is_install_ = true; attempter_.dlc_ids_ = {dlc_id}; attempter_.CalculateDlcParams(); - OmahaRequestParams* params = fake_system_state_.request_params(); + OmahaRequestParams* params = FakeSystemState::Get()->request_params(); EXPECT_EQ(1, params->dlc_apps_params().count(params->GetDlcAppId(dlc_id))); OmahaRequestParams::AppParams dlc_app_params = params->dlc_apps_params().at(params->GetDlcAppId(dlc_id)); @@ -2387,16 +2385,16 @@ TEST_F(UpdateAttempterTest, CalculateDlcParamsInstallTest) { // the values sent by Omaha. auto last_active_key = PrefsInterface::CreateSubKey( {kDlcPrefsSubDir, dlc_id, kPrefsPingLastActive}); - EXPECT_FALSE(fake_system_state_.prefs()->Exists(last_active_key)); + EXPECT_FALSE(FakeSystemState::Get()->prefs()->Exists(last_active_key)); auto last_rollcall_key = PrefsInterface::CreateSubKey( {kDlcPrefsSubDir, dlc_id, kPrefsPingLastRollcall}); - EXPECT_FALSE(fake_system_state_.prefs()->Exists(last_rollcall_key)); + EXPECT_FALSE(FakeSystemState::Get()->prefs()->Exists(last_rollcall_key)); } TEST_F(UpdateAttempterTest, CalculateDlcParamsNoPrefFilesTest) { string dlc_id = "dlc0"; FakePrefs fake_prefs; - fake_system_state_.set_prefs(&fake_prefs); + FakeSystemState::Get()->set_prefs(&fake_prefs); EXPECT_CALL(mock_dlcservice_, GetDlcsToUpdate(_)) .WillOnce( DoAll(SetArgPointee<0>(std::vector({dlc_id})), Return(true))); @@ -2404,7 +2402,7 @@ TEST_F(UpdateAttempterTest, CalculateDlcParamsNoPrefFilesTest) { attempter_.is_install_ = false; attempter_.CalculateDlcParams(); - OmahaRequestParams* params = fake_system_state_.request_params(); + OmahaRequestParams* params = FakeSystemState::Get()->request_params(); EXPECT_EQ(1, params->dlc_apps_params().count(params->GetDlcAppId(dlc_id))); OmahaRequestParams::AppParams dlc_app_params = params->dlc_apps_params().at(params->GetDlcAppId(dlc_id)); @@ -2419,7 +2417,7 @@ TEST_F(UpdateAttempterTest, CalculateDlcParamsNoPrefFilesTest) { TEST_F(UpdateAttempterTest, CalculateDlcParamsNonParseableValuesTest) { string dlc_id = "dlc0"; MemoryPrefs prefs; - fake_system_state_.set_prefs(&prefs); + FakeSystemState::Get()->set_prefs(&prefs); EXPECT_CALL(mock_dlcservice_, GetDlcsToUpdate(_)) .WillOnce( DoAll(SetArgPointee<0>(std::vector({dlc_id})), Return(true))); @@ -2431,13 +2429,13 @@ TEST_F(UpdateAttempterTest, CalculateDlcParamsNonParseableValuesTest) { {kDlcPrefsSubDir, dlc_id, kPrefsPingLastActive}); auto last_rollcall_key = PrefsInterface::CreateSubKey( {kDlcPrefsSubDir, dlc_id, kPrefsPingLastRollcall}); - fake_system_state_.prefs()->SetString(active_key, "z2yz"); - fake_system_state_.prefs()->SetString(last_active_key, "z2yz"); - fake_system_state_.prefs()->SetString(last_rollcall_key, "z2yz"); + FakeSystemState::Get()->prefs()->SetString(active_key, "z2yz"); + FakeSystemState::Get()->prefs()->SetString(last_active_key, "z2yz"); + FakeSystemState::Get()->prefs()->SetString(last_rollcall_key, "z2yz"); attempter_.is_install_ = false; attempter_.CalculateDlcParams(); - OmahaRequestParams* params = fake_system_state_.request_params(); + OmahaRequestParams* params = FakeSystemState::Get()->request_params(); EXPECT_EQ(1, params->dlc_apps_params().count(params->GetDlcAppId(dlc_id))); OmahaRequestParams::AppParams dlc_app_params = params->dlc_apps_params().at(params->GetDlcAppId(dlc_id)); @@ -2452,7 +2450,7 @@ TEST_F(UpdateAttempterTest, CalculateDlcParamsNonParseableValuesTest) { TEST_F(UpdateAttempterTest, CalculateDlcParamsValidValuesTest) { string dlc_id = "dlc0"; MemoryPrefs fake_prefs; - fake_system_state_.set_prefs(&fake_prefs); + FakeSystemState::Get()->set_prefs(&fake_prefs); EXPECT_CALL(mock_dlcservice_, GetDlcsToUpdate(_)) .WillOnce( DoAll(SetArgPointee<0>(std::vector({dlc_id})), Return(true))); @@ -2465,13 +2463,13 @@ TEST_F(UpdateAttempterTest, CalculateDlcParamsValidValuesTest) { auto last_rollcall_key = PrefsInterface::CreateSubKey( {kDlcPrefsSubDir, dlc_id, kPrefsPingLastRollcall}); - fake_system_state_.prefs()->SetInt64(active_key, 1); - fake_system_state_.prefs()->SetInt64(last_active_key, 78); - fake_system_state_.prefs()->SetInt64(last_rollcall_key, 99); + FakeSystemState::Get()->prefs()->SetInt64(active_key, 1); + FakeSystemState::Get()->prefs()->SetInt64(last_active_key, 78); + FakeSystemState::Get()->prefs()->SetInt64(last_rollcall_key, 99); attempter_.is_install_ = false; attempter_.CalculateDlcParams(); - OmahaRequestParams* params = fake_system_state_.request_params(); + OmahaRequestParams* params = FakeSystemState::Get()->request_params(); EXPECT_EQ(1, params->dlc_apps_params().count(params->GetDlcAppId(dlc_id))); OmahaRequestParams::AppParams dlc_app_params = params->dlc_apps_params().at(params->GetDlcAppId(dlc_id)); @@ -2486,61 +2484,61 @@ TEST_F(UpdateAttempterTest, CalculateDlcParamsValidValuesTest) { TEST_F(UpdateAttempterTest, CalculateDlcParamsRemoveStaleMetadata) { string dlc_id = "dlc0"; FakePrefs fake_prefs; - fake_system_state_.set_prefs(&fake_prefs); + FakeSystemState::Get()->set_prefs(&fake_prefs); auto active_key = PrefsInterface::CreateSubKey({kDlcPrefsSubDir, dlc_id, kPrefsPingActive}); auto last_active_key = PrefsInterface::CreateSubKey( {kDlcPrefsSubDir, dlc_id, kPrefsPingLastActive}); auto last_rollcall_key = PrefsInterface::CreateSubKey( {kDlcPrefsSubDir, dlc_id, kPrefsPingLastRollcall}); - fake_system_state_.prefs()->SetInt64(active_key, kPingInactiveValue); - fake_system_state_.prefs()->SetInt64(last_active_key, 0); - fake_system_state_.prefs()->SetInt64(last_rollcall_key, 0); - EXPECT_TRUE(fake_system_state_.prefs()->Exists(active_key)); - EXPECT_TRUE(fake_system_state_.prefs()->Exists(last_active_key)); - EXPECT_TRUE(fake_system_state_.prefs()->Exists(last_rollcall_key)); + FakeSystemState::Get()->prefs()->SetInt64(active_key, kPingInactiveValue); + FakeSystemState::Get()->prefs()->SetInt64(last_active_key, 0); + FakeSystemState::Get()->prefs()->SetInt64(last_rollcall_key, 0); + EXPECT_TRUE(FakeSystemState::Get()->prefs()->Exists(active_key)); + EXPECT_TRUE(FakeSystemState::Get()->prefs()->Exists(last_active_key)); + EXPECT_TRUE(FakeSystemState::Get()->prefs()->Exists(last_rollcall_key)); attempter_.dlc_ids_ = {dlc_id}; attempter_.is_install_ = true; attempter_.CalculateDlcParams(); - EXPECT_FALSE(fake_system_state_.prefs()->Exists(last_active_key)); - EXPECT_FALSE(fake_system_state_.prefs()->Exists(last_rollcall_key)); + EXPECT_FALSE(FakeSystemState::Get()->prefs()->Exists(last_active_key)); + EXPECT_FALSE(FakeSystemState::Get()->prefs()->Exists(last_rollcall_key)); // Active key is set on install. - EXPECT_TRUE(fake_system_state_.prefs()->Exists(active_key)); + EXPECT_TRUE(FakeSystemState::Get()->prefs()->Exists(active_key)); int64_t temp_int; - EXPECT_TRUE(fake_system_state_.prefs()->GetInt64(active_key, &temp_int)); + EXPECT_TRUE(FakeSystemState::Get()->prefs()->GetInt64(active_key, &temp_int)); EXPECT_EQ(temp_int, kPingActiveValue); } TEST_F(UpdateAttempterTest, SetDlcActiveValue) { string dlc_id = "dlc0"; FakePrefs fake_prefs; - fake_system_state_.set_prefs(&fake_prefs); + FakeSystemState::Get()->set_prefs(&fake_prefs); attempter_.SetDlcActiveValue(true, dlc_id); int64_t temp_int; auto active_key = PrefsInterface::CreateSubKey({kDlcPrefsSubDir, dlc_id, kPrefsPingActive}); - EXPECT_TRUE(fake_system_state_.prefs()->Exists(active_key)); - EXPECT_TRUE(fake_system_state_.prefs()->GetInt64(active_key, &temp_int)); + EXPECT_TRUE(FakeSystemState::Get()->prefs()->Exists(active_key)); + EXPECT_TRUE(FakeSystemState::Get()->prefs()->GetInt64(active_key, &temp_int)); EXPECT_EQ(temp_int, kPingActiveValue); } TEST_F(UpdateAttempterTest, SetDlcInactive) { string dlc_id = "dlc0"; MemoryPrefs fake_prefs; - fake_system_state_.set_prefs(&fake_prefs); + FakeSystemState::Get()->set_prefs(&fake_prefs); auto sub_keys = { kPrefsPingActive, kPrefsPingLastActive, kPrefsPingLastRollcall}; for (auto& sub_key : sub_keys) { auto key = PrefsInterface::CreateSubKey({kDlcPrefsSubDir, dlc_id, sub_key}); - fake_system_state_.prefs()->SetInt64(key, 1); - EXPECT_TRUE(fake_system_state_.prefs()->Exists(key)); + FakeSystemState::Get()->prefs()->SetInt64(key, 1); + EXPECT_TRUE(FakeSystemState::Get()->prefs()->Exists(key)); } attempter_.SetDlcActiveValue(false, dlc_id); for (auto& sub_key : sub_keys) { auto key = PrefsInterface::CreateSubKey({kDlcPrefsSubDir, dlc_id, sub_key}); - EXPECT_FALSE(fake_system_state_.prefs()->Exists(key)); + EXPECT_FALSE(FakeSystemState::Get()->prefs()->Exists(key)); } } diff --git a/download_action.cc b/download_action.cc index 10dffd2d..adae1281 100644 --- a/download_action.cc +++ b/download_action.cc @@ -29,6 +29,7 @@ #include "update_engine/common/boot_control_interface.h" #include "update_engine/common/error_code_utils.h" #include "update_engine/common/multi_range_http_fetcher.h" +#include "update_engine/common/system_state.h" #include "update_engine/common/utils.h" #include "update_engine/cros/omaha_request_params.h" #include "update_engine/cros/p2p_manager.h" @@ -42,13 +43,11 @@ namespace chromeos_update_engine { DownloadAction::DownloadAction(PrefsInterface* prefs, BootControlInterface* boot_control, HardwareInterface* hardware, - SystemState* system_state, HttpFetcher* http_fetcher, bool interactive) : prefs_(prefs), boot_control_(boot_control), hardware_(hardware), - system_state_(system_state), http_fetcher_(new MultiRangeHttpFetcher(http_fetcher)), interactive_(interactive), writer_(nullptr), @@ -68,7 +67,8 @@ void DownloadAction::CloseP2PSharingFd(bool delete_p2p_file) { } if (delete_p2p_file) { - FilePath path = system_state_->p2p_manager()->FileGetPath(p2p_file_id_); + FilePath path = + SystemState::Get()->p2p_manager()->FileGetPath(p2p_file_id_); if (unlink(path.value().c_str()) != 0) { PLOG(ERROR) << "Error deleting p2p file " << path.value(); } else { @@ -81,7 +81,7 @@ void DownloadAction::CloseP2PSharingFd(bool delete_p2p_file) { } bool DownloadAction::SetupP2PSharingFd() { - P2PManager* p2p_manager = system_state_->p2p_manager(); + P2PManager* p2p_manager = SystemState::Get()->p2p_manager(); if (!p2p_manager->FileShare(p2p_file_id_, payload_->size)) { LOG(ERROR) << "Unable to share file via p2p"; @@ -295,8 +295,9 @@ void DownloadAction::StartDownloading() { } } - if (system_state_ != nullptr) { - const PayloadStateInterface* payload_state = system_state_->payload_state(); + if (SystemState::Get() != nullptr) { + const PayloadStateInterface* payload_state = + SystemState::Get()->payload_state(); string file_id = utils::CalculateP2PFileId(payload_->hash, payload_->size); if (payload_state->GetUsingP2PForSharing()) { // If we're sharing the update, store the file_id to convey @@ -309,7 +310,7 @@ void DownloadAction::StartDownloading() { // hash. If this is the case, we NEED to clean it up otherwise // we're essentially timing out other peers downloading from us // (since we're never going to complete the file). - FilePath path = system_state_->p2p_manager()->FileGetPath(file_id); + FilePath path = SystemState::Get()->p2p_manager()->FileGetPath(file_id); if (!path.empty()) { if (unlink(path.value().c_str()) != 0) { PLOG(ERROR) << "Error deleting p2p file " << path.value(); @@ -391,10 +392,10 @@ bool DownloadAction::ReceivedBytes(HttpFetcher* fetcher, // Call p2p_manager_->FileMakeVisible() when we've successfully // verified the manifest! - if (!p2p_visible_ && system_state_ && delta_performer_.get() && + if (!p2p_visible_ && SystemState::Get() && delta_performer_.get() && delta_performer_->IsManifestValid()) { LOG(INFO) << "Manifest has been validated. Making p2p file visible."; - system_state_->p2p_manager()->FileMakeVisible(p2p_file_id_); + SystemState::Get()->p2p_manager()->FileMakeVisible(p2p_file_id_); p2p_visible_ = true; } return true; @@ -416,7 +417,7 @@ void DownloadAction::TransferComplete(HttpFetcher* fetcher, bool successful) { code = delta_performer_->VerifyPayload(payload_->hash, payload_->size); if (code == ErrorCode::kSuccess) { if (payload_ < &install_plan_.payloads.back() && - system_state_->payload_state()->NextPayload()) { + SystemState::Get()->payload_state()->NextPayload()) { LOG(INFO) << "Incrementing to next payload"; // No need to reset if this payload was already applied. if (delta_performer_ && !payload_->already_applied) @@ -425,7 +426,7 @@ void DownloadAction::TransferComplete(HttpFetcher* fetcher, bool successful) { bytes_received_previous_payloads_ += payload_->size; payload_++; install_plan_.download_url = - system_state_->payload_state()->GetCurrentUrl(); + SystemState::Get()->payload_state()->GetCurrentUrl(); StartDownloading(); return; } diff --git a/download_action_android_unittest.cc b/download_action_android_unittest.cc index f2229770..7db1c605 100644 --- a/download_action_android_unittest.cc +++ b/download_action_android_unittest.cc @@ -77,7 +77,6 @@ TEST_F(DownloadActionTest, CacheManifestInvalid) { std::make_unique(&prefs, &boot_control, nullptr, - nullptr, http_fetcher, false /* interactive */); download_action->set_in_pipe(action_pipe); diff --git a/download_action_unittest.cc b/download_action_unittest.cc index 5264b0f7..565c6782 100644 --- a/download_action_unittest.cc +++ b/download_action_unittest.cc @@ -57,7 +57,9 @@ using testing::InSequence; using testing::Return; using testing::SetArgPointee; -class DownloadActionTest : public ::testing::Test {}; +class DownloadActionTest : public ::testing::Test { + void SetUp() { FakeSystemState::CreateInstance(); } +}; namespace { @@ -128,9 +130,9 @@ void StartProcessorInRunLoop(ActionProcessor* processor, void TestWithData(const brillo::Blob& data, int fail_write, bool use_download_delegate) { + FakeSystemState::CreateInstance(); brillo::FakeMessageLoop loop(nullptr); loop.SetAsCurrent(); - FakeSystemState fake_system_state; ScopedTempFile output_temp_file; TestDirectFileWriter writer; @@ -149,9 +151,9 @@ void TestWithData(const brillo::Blob& data, install_plan.target_slot = 1; // We mark both slots as bootable. Only the target slot should be unbootable // after the download starts. - fake_system_state.fake_boot_control()->SetSlotBootable( + FakeSystemState::Get()->fake_boot_control()->SetSlotBootable( install_plan.source_slot, true); - fake_system_state.fake_boot_control()->SetSlotBootable( + FakeSystemState::Get()->fake_boot_control()->SetSlotBootable( install_plan.target_slot, true); auto feeder_action = std::make_unique>(); feeder_action->set_obj(install_plan); @@ -161,9 +163,8 @@ void TestWithData(const brillo::Blob& data, // takes ownership of passed in HttpFetcher auto download_action = std::make_unique(&prefs, - fake_system_state.boot_control(), - fake_system_state.hardware(), - &fake_system_state, + FakeSystemState::Get()->boot_control(), + FakeSystemState::Get()->hardware(), http_fetcher, false /* interactive */); download_action->SetTestFileWriter(&writer); @@ -194,9 +195,9 @@ void TestWithData(const brillo::Blob& data, loop.Run(); EXPECT_FALSE(loop.PendingTasks()); - EXPECT_TRUE(fake_system_state.fake_boot_control()->IsSlotBootable( + EXPECT_TRUE(FakeSystemState::Get()->fake_boot_control()->IsSlotBootable( install_plan.source_slot)); - EXPECT_FALSE(fake_system_state.fake_boot_control()->IsSlotBootable( + EXPECT_FALSE(FakeSystemState::Get()->fake_boot_control()->IsSlotBootable( install_plan.target_slot)); } } // namespace @@ -251,8 +252,7 @@ TEST(DownloadActionTest, MultiPayloadProgressTest) { payload_datas.emplace_back(2 * kMockHttpFetcherChunkSize); brillo::FakeMessageLoop loop(nullptr); loop.SetAsCurrent(); - FakeSystemState fake_system_state; - EXPECT_CALL(*fake_system_state.mock_payload_state(), NextPayload()) + EXPECT_CALL(*FakeSystemState::Get()->mock_payload_state(), NextPayload()) .WillOnce(Return(true)); MockFileWriter mock_file_writer; @@ -277,9 +277,8 @@ TEST(DownloadActionTest, MultiPayloadProgressTest) { // takes ownership of passed in HttpFetcher auto download_action = std::make_unique(&prefs, - fake_system_state.boot_control(), - fake_system_state.hardware(), - &fake_system_state, + FakeSystemState::Get()->boot_control(), + FakeSystemState::Get()->hardware(), http_fetcher, false /* interactive */); download_action->SetTestFileWriter(&mock_file_writer); @@ -346,6 +345,7 @@ void TerminateEarlyTestStarter(ActionProcessor* processor) { } void TestTerminateEarly(bool use_download_delegate) { + FakeSystemState::CreateInstance(); brillo::FakeMessageLoop loop(nullptr); loop.SetAsCurrent(); @@ -362,13 +362,12 @@ void TestTerminateEarly(bool use_download_delegate) { InstallPlan install_plan; install_plan.payloads.resize(1); feeder_action->set_obj(install_plan); - FakeSystemState fake_system_state_; + MockPrefs prefs; auto download_action = std::make_unique( &prefs, - fake_system_state_.boot_control(), - fake_system_state_.hardware(), - &fake_system_state_, + FakeSystemState::Get()->boot_control(), + FakeSystemState::Get()->hardware(), new MockHttpFetcher(data.data(), data.size(), nullptr), false /* interactive */); download_action->SetTestFileWriter(&writer); @@ -461,6 +460,7 @@ class PassObjectOutTestProcessorDelegate : public ActionProcessorDelegate { } // namespace TEST(DownloadActionTest, PassObjectOutTest) { + FakeSystemState::CreateInstance(); brillo::FakeMessageLoop loop(nullptr); loop.SetAsCurrent(); @@ -475,12 +475,10 @@ TEST(DownloadActionTest, PassObjectOutTest) { auto feeder_action = std::make_unique>(); feeder_action->set_obj(install_plan); MockPrefs prefs; - FakeSystemState fake_system_state_; auto download_action = std::make_unique(&prefs, - fake_system_state_.boot_control(), - fake_system_state_.hardware(), - &fake_system_state_, + FakeSystemState::Get()->boot_control(), + FakeSystemState::Get()->hardware(), new MockHttpFetcher("x", 1, nullptr), false /* interactive */); download_action->SetTestFileWriter(&writer); @@ -512,12 +510,15 @@ TEST(DownloadActionTest, PassObjectOutTest) { class P2PDownloadActionTest : public testing::Test { protected: P2PDownloadActionTest() - : start_at_offset_(0), fake_um_(fake_system_state_.fake_clock()) {} + : start_at_offset_(0), fake_um_(FakeSystemState::Get()->fake_clock()) {} ~P2PDownloadActionTest() override {} // Derived from testing::Test. - void SetUp() override { loop_.SetAsCurrent(); } + void SetUp() override { + loop_.SetAsCurrent(); + FakeSystemState::CreateInstance(); + } // Derived from testing::Test. void TearDown() override { EXPECT_FALSE(loop_.PendingTasks()); } @@ -539,14 +540,14 @@ class P2PDownloadActionTest : public testing::Test { "cros_au", 3, base::TimeDelta::FromDays(5))); - fake_system_state_.set_p2p_manager(p2p_manager_.get()); + FakeSystemState::Get()->set_p2p_manager(p2p_manager_.get()); } // To be called by tests to perform the download. The // |use_p2p_to_share| parameter is used to indicate whether the // payload should be shared via p2p. void StartDownload(bool use_p2p_to_share) { - EXPECT_CALL(*fake_system_state_.mock_payload_state(), + EXPECT_CALL(*FakeSystemState::Get()->mock_payload_state(), GetUsingP2PForSharing()) .WillRepeatedly(Return(use_p2p_to_share)); @@ -564,9 +565,8 @@ class P2PDownloadActionTest : public testing::Test { // Note that DownloadAction takes ownership of the passed in HttpFetcher. auto download_action = std::make_unique( &prefs, - fake_system_state_.boot_control(), - fake_system_state_.hardware(), - &fake_system_state_, + FakeSystemState::Get()->boot_control(), + FakeSystemState::Get()->hardware(), new MockHttpFetcher(data_.c_str(), data_.length(), nullptr), false /* interactive */); auto http_fetcher = download_action->http_fetcher(); @@ -603,9 +603,6 @@ class P2PDownloadActionTest : public testing::Test { // The ActionProcessor used for running the actions. ActionProcessor processor_; - // A fake system state. - FakeSystemState fake_system_state_; - // The data being downloaded. string data_; diff --git a/metrics_utils.h b/metrics_utils.h index 5952ec34..3aac4e5b 100644 --- a/metrics_utils.h +++ b/metrics_utils.h @@ -30,8 +30,6 @@ namespace chromeos_update_engine { -class SystemState; - namespace metrics_utils { // Transforms a ErrorCode value into a metrics::DownloadErrorCode. diff --git a/payload_generator/generate_delta_main.cc b/payload_generator/generate_delta_main.cc index 29ec290d..7724c9c5 100644 --- a/payload_generator/generate_delta_main.cc +++ b/payload_generator/generate_delta_main.cc @@ -220,7 +220,6 @@ bool ApplyPayload(const string& payload_file, std::make_unique(&prefs, &fake_boot_control, &fake_hardware, - nullptr, new FileFetcher(), true /* interactive */); auto filesystem_verifier_action = std::make_unique( diff --git a/update_manager/real_system_provider.cc b/update_manager/real_system_provider.cc index 8d30f7f1..34397f33 100644 --- a/update_manager/real_system_provider.cc +++ b/update_manager/real_system_provider.cc @@ -24,11 +24,13 @@ #include "update_engine/common/boot_control_interface.h" #include "update_engine/common/hardware_interface.h" +#include "update_engine/common/system_state.h" #include "update_engine/common/utils.h" #include "update_engine/cros/omaha_request_params.h" #include "update_engine/update_manager/generic_variables.h" #include "update_engine/update_manager/variable.h" +using chromeos_update_engine::SystemState; using std::string; namespace chromeos_update_manager { @@ -98,19 +100,20 @@ class RetryPollVariable : public Variable { bool RealSystemProvider::Init() { var_is_normal_boot_mode_.reset(new ConstCopyVariable( - "is_normal_boot_mode", system_state_->hardware()->IsNormalBootMode())); + "is_normal_boot_mode", + SystemState::Get()->hardware()->IsNormalBootMode())); var_is_official_build_.reset(new ConstCopyVariable( - "is_official_build", system_state_->hardware()->IsOfficialBuild())); + "is_official_build", SystemState::Get()->hardware()->IsOfficialBuild())); var_is_oobe_complete_.reset(new CallCopyVariable( "is_oobe_complete", base::Bind(&chromeos_update_engine::HardwareInterface::IsOOBEComplete, - base::Unretained(system_state_->hardware()), + base::Unretained(SystemState::Get()->hardware()), nullptr))); var_num_slots_.reset(new ConstCopyVariable( - "num_slots", system_state_->boot_control()->GetNumSlots())); + "num_slots", SystemState::Get()->boot_control()->GetNumSlots())); var_kiosk_required_platform_version_.reset(new RetryPollVariable( "kiosk_required_platform_version", @@ -120,7 +123,7 @@ bool RealSystemProvider::Init() { var_chromeos_version_.reset(new ConstCopyVariable( "chromeos_version", - base::Version(system_state_->request_params()->app_version()))); + base::Version(SystemState::Get()->request_params()->app_version()))); return true; } diff --git a/update_manager/real_system_provider.h b/update_manager/real_system_provider.h index 91fee7f7..558d3be4 100644 --- a/update_manager/real_system_provider.h +++ b/update_manager/real_system_provider.h @@ -22,7 +22,6 @@ #include -#include "update_engine/common/system_state.h" #include "update_engine/update_manager/system_provider.h" namespace org { @@ -37,10 +36,8 @@ namespace chromeos_update_manager { class RealSystemProvider : public SystemProvider { public: RealSystemProvider( - chromeos_update_engine::SystemState* system_state, org::chromium::KioskAppServiceInterfaceProxyInterface* kiosk_app_proxy) - : system_state_(system_state), kiosk_app_proxy_(kiosk_app_proxy) { - } + : kiosk_app_proxy_(kiosk_app_proxy) {} // Initializes the provider and returns whether it succeeded. bool Init(); @@ -80,8 +77,6 @@ class RealSystemProvider : public SystemProvider { std::unique_ptr> var_kiosk_required_platform_version_; std::unique_ptr> var_chromeos_version_; - chromeos_update_engine::SystemState* const system_state_; - org::chromium::KioskAppServiceInterfaceProxyInterface* const kiosk_app_proxy_; DISALLOW_COPY_AND_ASSIGN(RealSystemProvider); diff --git a/update_manager/real_system_provider_unittest.cc b/update_manager/real_system_provider_unittest.cc index 3c77ac75..9abcad06 100644 --- a/update_manager/real_system_provider_unittest.cc +++ b/update_manager/real_system_provider_unittest.cc @@ -29,6 +29,7 @@ #include "update_engine/cros/fake_system_state.h" #include "update_engine/update_manager/umtest_utils.h" +using chromeos_update_engine::FakeSystemState; using org::chromium::KioskAppServiceInterfaceProxyMock; using std::unique_ptr; using testing::_; @@ -45,17 +46,16 @@ namespace chromeos_update_manager { class UmRealSystemProviderTest : public ::testing::Test { protected: void SetUp() override { + FakeSystemState::CreateInstance(); kiosk_app_proxy_mock_.reset(new KioskAppServiceInterfaceProxyMock()); ON_CALL(*kiosk_app_proxy_mock_, GetRequiredPlatformVersion(_, _, _)) .WillByDefault( DoAll(SetArgPointee<0>(kRequiredPlatformVersion), Return(true))); - provider_.reset(new RealSystemProvider(&fake_system_state_, - kiosk_app_proxy_mock_.get())); + provider_.reset(new RealSystemProvider(kiosk_app_proxy_mock_.get())); EXPECT_TRUE(provider_->Init()); } - chromeos_update_engine::FakeSystemState fake_system_state_; unique_ptr provider_; unique_ptr kiosk_app_proxy_mock_; @@ -70,17 +70,17 @@ TEST_F(UmRealSystemProviderTest, InitTest) { } TEST_F(UmRealSystemProviderTest, IsOOBECompleteTrue) { - fake_system_state_.fake_hardware()->SetIsOOBEComplete(base::Time()); + FakeSystemState::Get()->fake_hardware()->SetIsOOBEComplete(base::Time()); UmTestUtils::ExpectVariableHasValue(true, provider_->var_is_oobe_complete()); } TEST_F(UmRealSystemProviderTest, IsOOBECompleteFalse) { - fake_system_state_.fake_hardware()->UnsetIsOOBEComplete(); + FakeSystemState::Get()->fake_hardware()->UnsetIsOOBEComplete(); UmTestUtils::ExpectVariableHasValue(false, provider_->var_is_oobe_complete()); } TEST_F(UmRealSystemProviderTest, VersionFromRequestParams) { - fake_system_state_.request_params()->set_app_version("1.2.3"); + FakeSystemState::Get()->request_params()->set_app_version("1.2.3"); // Call |Init| again to pick up the version. EXPECT_TRUE(provider_->Init()); diff --git a/update_manager/real_updater_provider.cc b/update_manager/real_updater_provider.cc index e975b80e..6da30c96 100644 --- a/update_manager/real_updater_provider.cc +++ b/update_manager/real_updater_provider.cc @@ -29,6 +29,7 @@ #include "update_engine/client_library/include/update_engine/update_status.h" #include "update_engine/common/clock_interface.h" #include "update_engine/common/prefs.h" +#include "update_engine/common/system_state.h" #include "update_engine/cros/omaha_request_params.h" #include "update_engine/cros/update_attempter.h" #include "update_engine/update_status_utils.h" @@ -49,25 +50,16 @@ namespace chromeos_update_manager { template class UpdaterVariableBase : public Variable { public: - UpdaterVariableBase(const string& name, - VariableMode mode, - SystemState* system_state) - : Variable(name, mode), system_state_(system_state) {} - - protected: - // The system state used for pulling information from the updater. - inline SystemState* system_state() const { return system_state_; } - - private: - SystemState* const system_state_; + UpdaterVariableBase(const string& name, VariableMode mode) + : Variable(name, mode) {} }; // Helper class for issuing a GetStatus() to the UpdateAttempter. class GetStatusHelper { public: - GetStatusHelper(SystemState* system_state, string* errmsg) { - is_success_ = - system_state->update_attempter()->GetStatus(&update_engine_status_); + explicit GetStatusHelper(string* errmsg) { + is_success_ = SystemState::Get()->update_attempter()->GetStatus( + &update_engine_status_); if (!is_success_ && errmsg) { *errmsg = "Failed to get a status update from the update engine"; } @@ -97,12 +89,12 @@ class GetStatusHelper { // A variable reporting the time when a last update check was issued. class LastCheckedTimeVariable : public UpdaterVariableBase