diff --git a/contracts/hydro/src/contract.rs b/contracts/hydro/src/contract.rs index 32e3311..abc9aa7 100644 --- a/contracts/hydro/src/contract.rs +++ b/contracts/hydro/src/contract.rs @@ -16,8 +16,7 @@ use neutron_sdk::sudo::msg::SudoMsg; use crate::error::ContractError; use crate::lsm_integration::{ add_validator_shares_to_round_total, get_total_power_for_round, - get_validator_power_ratio_for_round, initialize_validator_store, validate_denom, - COSMOS_VALIDATOR_PREFIX, + get_validator_power_ratio_for_round, validate_denom, COSMOS_VALIDATOR_PREFIX, }; use crate::msg::{ExecuteMsg, InstantiateMsg, LiquidityDeployment, ProposalToLockups, TrancheInfo}; use crate::query::{ @@ -38,9 +37,13 @@ use crate::score_keeper::{ use crate::state::{ Constants, LockEntry, Proposal, RoundLockPowerSchedule, Tranche, ValidatorInfo, Vote, VoteWithPower, CONSTANTS, ICQ_MANAGERS, LIQUIDITY_DEPLOYMENTS_MAP, LOCKED_TOKENS, LOCKS_MAP, - LOCK_ID, PROPOSAL_MAP, PROPS_BY_SCORE, PROP_ID, TRANCHE_ID, TRANCHE_MAP, VALIDATORS_INFO, - VALIDATORS_PER_ROUND, VALIDATORS_STORE_INITIALIZED, VALIDATOR_TO_QUERY_ID, VOTE_MAP, - VOTING_ALLOWED_ROUND, WHITELIST, WHITELIST_ADMINS, + LOCK_ID, PROPOSAL_MAP, PROPS_BY_SCORE, PROP_ID, ROUND_TO_HEIGHT_RANGE, TRANCHE_ID, TRANCHE_MAP, + USER_LOCKS, VALIDATORS_INFO, VALIDATORS_PER_ROUND, VALIDATORS_STORE_INITIALIZED, + VALIDATOR_TO_QUERY_ID, VOTE_MAP, VOTING_ALLOWED_ROUND, WHITELIST, WHITELIST_ADMINS, +}; +use crate::utils::{ + load_constants_active_at_timestamp, load_current_constants, run_on_each_transaction, + update_locked_tokens_info, validate_locked_tokens_caps, }; use crate::validators_icqs::{ build_create_interchain_query_submsg, handle_delivered_interchain_query_result, @@ -61,7 +64,7 @@ pub const MIN_DEPLOYMENT_DURATION: u64 = 1; #[cfg_attr(not(feature = "library"), entry_point)] pub fn instantiate( deps: DepsMut, - _env: Env, + env: Env, info: MessageInfo, msg: InstantiateMsg, ) -> Result, ContractError> { @@ -79,6 +82,7 @@ pub fn instantiate( lock_epoch_length: msg.lock_epoch_length, first_round_start: msg.first_round_start, max_locked_tokens: msg.max_locked_tokens.u128(), + current_users_extra_cap: 0, max_validator_shares_participating: msg.max_validator_shares_participating, hub_connection_id: msg.hub_connection_id, hub_transfer_channel_id: msg.hub_transfer_channel_id, @@ -88,7 +92,7 @@ pub fn instantiate( round_lock_power_schedule: RoundLockPowerSchedule::new(msg.round_lock_power_schedule), }; - CONSTANTS.save(deps.storage, &state)?; + CONSTANTS.save(deps.storage, env.block.time.nanos(), &state)?; LOCKED_TOKENS.save(deps.storage, &0)?; LOCK_ID.save(deps.storage, &0)?; PROP_ID.save(deps.storage, &0)?; @@ -156,6 +160,10 @@ pub fn execute( info: MessageInfo, msg: ExecuteMsg, ) -> Result, ContractError> { + let constants = load_current_constants(&deps.as_ref(), &env)?; + let current_round = compute_current_round_id(&env, &constants)?; + run_on_each_transaction(deps.storage, &env, current_round)?; + match msg { ExecuteMsg::LockTokens { lock_duration } => lock_tokens(deps, env, info, lock_duration), ExecuteMsg::RefreshLockDuration { @@ -190,22 +198,33 @@ pub fn execute( remove_from_whitelist(deps, env, info, address) } ExecuteMsg::UpdateConfig { + activate_at, + max_locked_tokens, + current_users_extra_cap, + max_deployment_duration, + } => update_config( + deps, + env, + info, + activate_at, max_locked_tokens, + current_users_extra_cap, max_deployment_duration, - } => update_config(deps, info, max_locked_tokens, max_deployment_duration), - ExecuteMsg::Pause {} => pause_contract(deps, info), - ExecuteMsg::AddTranche { tranche } => add_tranche(deps, info, tranche), + ), + ExecuteMsg::DeleteConfigs { timestamps } => delete_configs(deps, &env, info, timestamps), + ExecuteMsg::Pause {} => pause_contract(deps, &env, info), + ExecuteMsg::AddTranche { tranche } => add_tranche(deps, env, info, tranche), ExecuteMsg::EditTranche { tranche_id, tranche_name, tranche_metadata, - } => edit_tranche(deps, info, tranche_id, tranche_name, tranche_metadata), + } => edit_tranche(deps, env, info, tranche_id, tranche_name, tranche_metadata), ExecuteMsg::CreateICQsForValidators { validators } => { create_icqs_for_validators(deps, env, info, validators) } - ExecuteMsg::AddICQManager { address } => add_icq_manager(deps, info, address), - ExecuteMsg::RemoveICQManager { address } => remove_icq_manager(deps, info, address), - ExecuteMsg::WithdrawICQFunds { amount } => withdraw_icq_funds(deps, info, amount), + ExecuteMsg::AddICQManager { address } => add_icq_manager(deps, env, info, address), + ExecuteMsg::RemoveICQManager { address } => remove_icq_manager(deps, env, info, address), + ExecuteMsg::WithdrawICQFunds { amount } => withdraw_icq_funds(deps, env, info, amount), ExecuteMsg::AddLiquidityDeployment { round_id, tranche_id, @@ -232,7 +251,7 @@ pub fn execute( round_id, tranche_id, proposal_id, - } => remove_liquidity_deployment(deps, info, round_id, tranche_id, proposal_id), + } => remove_liquidity_deployment(deps, env, info, round_id, tranche_id, proposal_id), } } @@ -243,12 +262,12 @@ pub fn execute( // Update total round power // Create entry in LocksMap fn lock_tokens( - deps: DepsMut, + mut deps: DepsMut, env: Env, info: MessageInfo, lock_duration: u64, ) -> Result, ContractError> { - let constants = CONSTANTS.load(deps.storage)?; + let constants = load_current_constants(&deps.as_ref(), &env)?; validate_contract_is_not_paused(&constants)?; validate_lock_duration( @@ -258,7 +277,6 @@ fn lock_tokens( )?; let current_round = compute_current_round_id(&env, &constants)?; - initialize_validator_store(deps.storage, current_round)?; if info.funds.len() != 1 { return Err(ContractError::Std(StdError::generic_err( @@ -269,19 +287,20 @@ fn lock_tokens( let funds = info.funds[0].clone(); let validator = - validate_denom(deps.as_ref(), env.clone(), &constants, funds.denom).map_err(|err| { + validate_denom(deps.as_ref(), current_round, &constants, funds.denom).map_err(|err| { ContractError::Std(StdError::generic_err(format!("validating denom: {}", err))) })?; - // validate that this wouldn't cause the contract to have more locked tokens than the limit + let total_locked_tokens = LOCKED_TOKENS.load(deps.storage)?; let amount_to_lock = info.funds[0].amount.u128(); - let locked_tokens = LOCKED_TOKENS.load(deps.storage)?; - - if locked_tokens + amount_to_lock > constants.max_locked_tokens { - return Err(ContractError::Std(StdError::generic_err( - "The limit for locking tokens has been reached. No more tokens can be locked.", - ))); - } + let locking_info = validate_locked_tokens_caps( + &deps, + &constants, + current_round, + &info.sender, + total_locked_tokens, + amount_to_lock, + )?; // validate that the user does not have too many locks if get_lock_count(deps.as_ref(), info.sender.clone()) >= MAX_LOCK_ENTRIES { @@ -293,6 +312,7 @@ fn lock_tokens( let lock_id = LOCK_ID.load(deps.storage)?; LOCK_ID.save(deps.storage, &(lock_id + 1))?; + let lock_entry = LockEntry { lock_id, funds: info.funds[0].clone(), @@ -300,11 +320,37 @@ fn lock_tokens( lock_end: env.block.time.plus_nanos(lock_duration), }; let lock_end = lock_entry.lock_end.nanos(); - LOCKS_MAP.save(deps.storage, (info.sender.clone(), lock_id), &lock_entry)?; - LOCKED_TOKENS.save(deps.storage, &(locked_tokens + amount_to_lock))?; + LOCKS_MAP.save( + deps.storage, + (info.sender.clone(), lock_id), + &lock_entry, + env.block.height, + )?; + + USER_LOCKS.update( + deps.storage, + info.sender.clone(), + env.block.height, + |current_locks| -> Result, StdError> { + match current_locks { + None => Ok(vec![lock_id]), + Some(mut current_locks) => { + current_locks.push(lock_id); + Ok(current_locks) + } + } + }, + )?; + + update_locked_tokens_info( + &mut deps, + current_round, + &info.sender, + total_locked_tokens, + locking_info, + )?; // If user already voted for some proposals in the current round, update the voting power on those proposals. - let mut deps = deps; update_voting_power_on_proposals( &mut deps, &info.sender, @@ -321,8 +367,10 @@ fn lock_tokens( update_total_time_weighted_shares( &mut deps, + env.block.height, &constants, current_round, + current_round, last_round_with_power, lock_end, validator, @@ -353,7 +401,7 @@ fn refresh_lock_duration( lock_ids: Vec, lock_duration: u64, ) -> Result, ContractError> { - let constants = CONSTANTS.load(deps.storage)?; + let constants = load_current_constants(&deps.as_ref(), &env)?; validate_contract_is_not_paused(&constants)?; @@ -371,7 +419,6 @@ fn refresh_lock_duration( } let current_round_id = compute_current_round_id(&env, &constants)?; - initialize_validator_store(deps.storage, current_round_id)?; let mut response = Response::new() .add_attribute("action", "refresh_lock_duration") @@ -422,10 +469,15 @@ fn refresh_single_lock( ))); } lock_entry.lock_end = Timestamp::from_nanos(new_lock_end); - LOCKS_MAP.save(deps.storage, (info.sender.clone(), lock_id), &lock_entry)?; + LOCKS_MAP.save( + deps.storage, + (info.sender.clone(), lock_id), + &lock_entry, + env.block.height, + )?; let validator_result = validate_denom( deps.as_ref(), - env.clone(), + current_round_id, constants, lock_entry.funds.denom.clone(), ); @@ -448,8 +500,10 @@ fn refresh_single_lock( let new_last_round_with_power = compute_round_id_for_timestamp(constants, new_lock_end)? - 1; update_total_time_weighted_shares( deps, + env.block.height, constants, current_round_id, + current_round_id, new_last_round_with_power, new_lock_end, validator, @@ -505,7 +559,7 @@ fn unlock_tokens( info: MessageInfo, lock_ids: Option>, ) -> Result, ContractError> { - let constants = CONSTANTS.load(deps.storage)?; + let constants = load_current_constants(&deps.as_ref(), &env)?; validate_contract_is_not_paused(&constants)?; // TODO: reenable this when we implement slashing @@ -557,8 +611,7 @@ fn unlock_tokens( total_unlocked_amount += send.amount; - // Delete entry from LocksMap - to_delete.push((info.sender.clone(), lock_id)); + to_delete.push(lock_id); unlocked_lock_ids.push(lock_id.to_string()); unlocked_tokens.push(send.to_string()); @@ -566,10 +619,30 @@ fn unlock_tokens( } // Delete unlocked locks - for (addr, lock_id) in to_delete { - LOCKS_MAP.remove(deps.storage, (addr, lock_id)); + for lock_id in to_delete.iter() { + LOCKS_MAP.remove( + deps.storage, + (info.sender.clone(), *lock_id), + env.block.height, + )?; } + let to_delete: HashSet = to_delete.into_iter().collect(); + USER_LOCKS.update( + deps.storage, + info.sender.clone(), + env.block.height, + |current_locks| -> Result, StdError> { + match current_locks { + None => Ok(vec![]), + Some(mut current_locks) => { + current_locks.retain(|lock_id| !to_delete.contains(lock_id)); + Ok(current_locks) + } + } + }, + )?; + if !total_unlocked_amount.is_zero() { LOCKED_TOKENS.update( deps.storage, @@ -592,7 +665,7 @@ fn validate_previous_round_vote( env: &Env, sender: Addr, ) -> Result<(), ContractError> { - let constants = CONSTANTS.load(deps.storage)?; + let constants = load_current_constants(&deps.as_ref(), env)?; let current_round_id = compute_current_round_id(env, &constants)?; if current_round_id > 0 { let previous_round_id = current_round_id - 1; @@ -631,12 +704,10 @@ fn create_proposal( deployment_duration: u64, minimum_atom_liquidity_request: Uint128, ) -> Result, ContractError> { - let constants = CONSTANTS.load(deps.storage)?; + let constants = load_current_constants(&deps.as_ref(), &env)?; validate_contract_is_not_paused(&constants)?; let current_round_id = compute_current_round_id(&env, &constants)?; - // this is just to initialize the store on the first action in each round - initialize_validator_store(deps.storage, current_round_id)?; // if no round_id is provided, use the current round let round_id = round_id.unwrap_or(current_round_id); @@ -747,13 +818,10 @@ fn vote( // - To enable switching votes (and for other stuff too), we store the vote in VOTE_MAP. // - When a user votes the second time in a round, the information about their previous vote from VOTE_MAP is used to reverse the effect of their previous vote. // - This leads to slightly higher gas costs for each vote, in exchange for a much lower gas cost at the end of the round. - let constants = CONSTANTS.load(deps.storage)?; + let constants = load_current_constants(&deps.as_ref(), &env)?; validate_contract_is_not_paused(&constants)?; let round_id = compute_current_round_id(&env, &constants)?; - // voting can never be the first action in a round (since one can only vote on proposals in the current round, and a proposal must be created first) - // however, to be safe, we initialize the validator store here, since this is more robust in case we change something about voting later - initialize_validator_store(deps.storage, round_id)?; // check that the tranche with the given id exists TRANCHE_MAP.load(deps.storage, tranche_id)?; @@ -909,7 +977,7 @@ fn vote( // get the validator from the denom let validator = match validate_denom( deps.as_ref(), - env.clone(), + round_id, &constants, lock_entry.clone().funds.denom, ) { @@ -1024,11 +1092,11 @@ pub fn get_lock_time_weighted_shares( // Adds a new account address to the whitelist. fn add_to_whitelist( deps: DepsMut, - _env: Env, + env: Env, info: MessageInfo, address: String, ) -> Result, ContractError> { - let constants = CONSTANTS.load(deps.storage)?; + let constants = load_current_constants(&deps.as_ref(), &env)?; validate_contract_is_not_paused(&constants)?; validate_sender_is_whitelist_admin(&deps, &info)?; @@ -1056,11 +1124,11 @@ fn add_to_whitelist( // Removes an account address from the whitelist. fn remove_from_whitelist( deps: DepsMut, - _env: Env, + env: Env, info: MessageInfo, address: String, ) -> Result, ContractError> { - let constants = CONSTANTS.load(deps.storage)?; + let constants = load_current_constants(&deps.as_ref(), &env)?; validate_contract_is_not_paused(&constants)?; validate_sender_is_whitelist_admin(&deps, &info)?; @@ -1081,13 +1149,29 @@ fn remove_from_whitelist( fn update_config( deps: DepsMut, + env: Env, info: MessageInfo, + activate_at: Timestamp, max_locked_tokens: Option, + current_users_extra_cap: Option, max_deployment_duration: Option, ) -> Result, ContractError> { - let mut constants = CONSTANTS.load(deps.storage)?; + if env.block.time > activate_at { + return Err(ContractError::Std(StdError::generic_err( + "Can not update config in the past.", + ))); + } + // Validate that the contract is not paused based on the current constants + let constants = load_current_constants(&deps.as_ref(), &env)?; validate_contract_is_not_paused(&constants)?; + + // Load the Constants active at the given timestamp and base the updates on them. + // This allows us to update the Constants in arbitrary order. E.g. at the similar block + // height we can schedule multiple updates for the future, where each new Constants will + // have the changes introduced by earlier ones. + let mut constants = load_constants_active_at_timestamp(&deps.as_ref(), activate_at)?.1; + validate_sender_is_whitelist_admin(&deps, &info)?; let mut response = Response::new() @@ -1099,6 +1183,14 @@ fn update_config( response = response.add_attribute("new_max_locked_tokens", max_locked_tokens.to_string()); } + if let Some(current_users_extra_cap) = current_users_extra_cap { + constants.current_users_extra_cap = current_users_extra_cap; + response = response.add_attribute( + "new_current_users_extra_cap", + current_users_extra_cap.to_string(), + ); + } + if let Some(max_deployment_duration) = max_deployment_duration { constants.max_deployment_duration = max_deployment_duration; response = response.add_attribute( @@ -1107,26 +1199,67 @@ fn update_config( ); } - CONSTANTS.save(deps.storage, &constants)?; + CONSTANTS.save(deps.storage, activate_at.nanos(), &constants)?; Ok(response) } +fn delete_configs( + deps: DepsMut, + env: &Env, + info: MessageInfo, + timestamps: Vec, +) -> Result, ContractError> { + let constants = load_current_constants(&deps.as_ref(), env)?; + + validate_contract_is_not_paused(&constants)?; + validate_sender_is_whitelist_admin(&deps, &info)?; + + let timestamps_to_delete: Vec = timestamps + .into_iter() + .filter_map(|timestamp| { + if CONSTANTS.has(deps.storage, timestamp.nanos()) { + Some(timestamp.nanos()) + } else { + None + } + }) + .collect(); + + for timestamp in ×tamps_to_delete { + CONSTANTS.remove(deps.storage, *timestamp); + } + + Ok(Response::new() + .add_attribute("action", "delete_configs") + .add_attribute("sender", info.sender) + .add_attribute( + "deleted_configs_at_timestamps", + timestamps_to_delete + .into_iter() + .map(|timestamp| timestamp.to_string()) + .collect::>() + .join(", "), + )) +} + // Pause: // Validate that the contract isn't already paused // Validate sender is whitelist admin // Set paused to true and save the changes fn pause_contract( deps: DepsMut, + env: &Env, info: MessageInfo, ) -> Result, ContractError> { - let mut constants = CONSTANTS.load(deps.storage)?; + let (timestamp, mut constants) = + load_constants_active_at_timestamp(&deps.as_ref(), env.block.time)?; validate_contract_is_not_paused(&constants)?; validate_sender_is_whitelist_admin(&deps, &info)?; constants.paused = true; - CONSTANTS.save(deps.storage, &constants)?; + CONSTANTS.save(deps.storage, timestamp, &constants)?; Ok(Response::new() .add_attribute("action", "pause_contract") @@ -1141,10 +1274,11 @@ fn pause_contract( // Add new tranche to the store fn add_tranche( deps: DepsMut, + env: Env, info: MessageInfo, tranche: TrancheInfo, ) -> Result, ContractError> { - let constants = CONSTANTS.load(deps.storage)?; + let constants = load_current_constants(&deps.as_ref(), &env)?; let tranche_name = tranche.name.trim().to_string(); validate_contract_is_not_paused(&constants)?; @@ -1177,12 +1311,13 @@ fn add_tranche( // Update the tranche in the store fn edit_tranche( deps: DepsMut, + env: Env, info: MessageInfo, tranche_id: u64, tranche_name: Option, tranche_metadata: Option, ) -> Result, ContractError> { - let constants = CONSTANTS.load(deps.storage)?; + let constants = load_current_constants(&deps.as_ref(), &env)?; validate_contract_is_not_paused(&constants)?; validate_sender_is_whitelist_admin(&deps, &info)?; @@ -1226,12 +1361,11 @@ fn create_icqs_for_validators( info: MessageInfo, validators: Vec, ) -> Result, ContractError> { - let constants = CONSTANTS.load(deps.storage)?; + let constants = load_current_constants(&deps.as_ref(), &env)?; validate_contract_is_not_paused(&constants)?; // This function will return error if the first round hasn't started yet. It is necessarry // that it has started, since handling the results of the interchain queries relies on this. - let round_id = compute_current_round_id(&env, &constants)?; - initialize_validator_store(deps.storage, round_id)?; + compute_current_round_id(&env, &constants)?; let mut valid_addresses = HashSet::new(); for validator in validators @@ -1310,10 +1444,11 @@ fn validate_icq_deposit_funds_sent( fn add_icq_manager( deps: DepsMut, + env: Env, info: MessageInfo, address: String, ) -> Result, ContractError> { - let constants = CONSTANTS.load(deps.storage)?; + let constants = load_current_constants(&deps.as_ref(), &env)?; validate_contract_is_not_paused(&constants)?; validate_sender_is_whitelist_admin(&deps, &info)?; @@ -1337,10 +1472,11 @@ fn add_icq_manager( fn remove_icq_manager( deps: DepsMut, + env: Env, info: MessageInfo, address: String, ) -> Result, ContractError> { - let constants = CONSTANTS.load(deps.storage)?; + let constants = load_current_constants(&deps.as_ref(), &env)?; validate_contract_is_not_paused(&constants)?; validate_sender_is_whitelist_admin(&deps, &info)?; @@ -1369,10 +1505,11 @@ fn remove_icq_manager( // top validators. fn withdraw_icq_funds( deps: DepsMut, + env: Env, info: MessageInfo, amount: Uint128, ) -> Result, ContractError> { - let constants = CONSTANTS.load(deps.storage)?; + let constants = load_current_constants(&deps.as_ref(), &env)?; validate_contract_is_not_paused(&constants)?; validate_address_is_icq_manager(&deps, info.sender.clone())?; @@ -1407,7 +1544,7 @@ pub fn add_liquidity_deployment( info: MessageInfo, deployment: LiquidityDeployment, ) -> Result, ContractError> { - let constants = CONSTANTS.load(deps.storage)?; + let constants = load_current_constants(&deps.as_ref(), &env)?; validate_contract_is_not_paused(&constants)?; validate_sender_is_whitelist_admin(&deps, &info)?; @@ -1474,12 +1611,13 @@ pub fn add_liquidity_deployment( // This will return an error if the deployment does not exist. pub fn remove_liquidity_deployment( deps: DepsMut, + env: Env, info: MessageInfo, round_id: u64, tranche_id: u64, proposal_id: u64, ) -> Result, ContractError> { - let constants = CONSTANTS.load(deps.storage)?; + let constants = load_current_constants(&deps.as_ref(), &env)?; validate_contract_is_not_paused(&constants)?; validate_sender_is_whitelist_admin(&deps, &info)?; @@ -1549,7 +1687,7 @@ fn validate_tranche_name_uniqueness( #[cfg_attr(not(feature = "library"), entry_point)] pub fn query(deps: Deps, env: Env, msg: QueryMsg) -> StdResult { match msg { - QueryMsg::Constants {} => to_json_binary(&query_constants(deps)?), + QueryMsg::Constants {} => to_json_binary(&query_constants(deps, env)?), QueryMsg::Tranches {} => to_json_binary(&query_tranches(deps)?), QueryMsg::AllUserLockups { address, @@ -1603,7 +1741,7 @@ pub fn query(deps: Deps, env: Env, msg: QueryMsg) -> StdResult to_json_binary(&query_current_round_id(deps, env)?), - QueryMsg::RoundEnd { round_id } => to_json_binary(&query_round_end(deps, round_id)?), + QueryMsg::RoundEnd { round_id } => to_json_binary(&query_round_end(deps, env, round_id)?), QueryMsg::TopNProposals { round_id, tranche_id, @@ -1692,9 +1830,9 @@ pub fn query_round_total_power( }) } -pub fn query_constants(deps: Deps) -> StdResult { +pub fn query_constants(deps: Deps, env: Env) -> StdResult { Ok(ConstantsResponse { - constants: CONSTANTS.load(deps.storage)?, + constants: load_current_constants(&deps, &env)?, }) } @@ -1710,7 +1848,7 @@ fn get_user_lockups_with_predicate( let raw_lockups = query_user_lockups(deps, addr, predicate, start_from, limit); - let constants = CONSTANTS.load(deps.storage)?; + let constants = load_current_constants(&deps, &env)?; let current_round_id = compute_current_round_id(&env, &constants)?; let round_end = compute_round_end(&constants, current_round_id)?; @@ -1718,14 +1856,7 @@ fn get_user_lockups_with_predicate( let enriched_lockups = raw_lockups .iter() .map(|lock| { - to_lockup_with_power( - deps, - env.clone(), - &constants, - current_round_id, - round_end, - lock.clone(), - ) + to_lockup_with_power(deps, &constants, current_round_id, round_end, lock.clone()) }) .collect(); @@ -1777,7 +1908,7 @@ fn enrich_lockups_with_tranche_infos( .map(|tranche| tranche.unwrap().1.id) .collect::>(); - let constants = CONSTANTS.load(deps.storage)?; + let constants = load_current_constants(&deps, &env)?; let current_round_id = compute_current_round_id(&env, &constants)?; // enrich lockups with some info per tranche @@ -1917,31 +2048,110 @@ pub fn query_user_voting_power( env: Env, address: String, ) -> StdResult { - let user_address = deps.api.addr_validate(&address)?; - let constants = CONSTANTS.load(deps.storage)?; - let current_round_id = compute_current_round_id(&env, &constants)?; + Ok(UserVotingPowerResponse { + voting_power: get_current_user_voting_power( + &deps, + &env, + deps.api.addr_validate(&address)?, + )?, + }) +} + +pub fn get_current_user_voting_power( + deps: &Deps, + env: &Env, + address: Addr, +) -> StdResult { + let constants = load_current_constants(deps, env)?; + let current_round_id = compute_current_round_id(env, &constants)?; let round_end = compute_round_end(&constants, current_round_id)?; - let voting_power = LOCKS_MAP - .prefix(user_address) - .range(deps.storage, None, None, Order::Ascending) - .map(|l| l.unwrap().1) - .filter(|l| l.lock_end > round_end) + let filter = |lock_res: Result<(u64, LockEntry), StdError>| match lock_res { + Err(_) => None, + Ok(lock_tuple) => { + if lock_tuple.1.lock_end > round_end { + Some(lock_tuple.1) + } else { + None + } + } + }; + + get_user_voting_power( + deps, + &constants, + address.clone(), + current_round_id, + round_end, + filter, + ) +} + +pub fn get_user_voting_power_for_past_round( + deps: &Deps, + constants: &Constants, + address: Addr, + round_id: u64, +) -> StdResult { + let round_end = compute_round_end(constants, round_id)?; + let load_height = ROUND_TO_HEIGHT_RANGE + .load(deps.storage, round_id)? + .highest_known_height; + + let user_locks_ids = USER_LOCKS + .may_load_at_height(deps.storage, address.clone(), load_height)? + .unwrap_or_default(); + + Ok(user_locks_ids + .into_iter() + .filter_map(|lock_id| { + match LOCKS_MAP.may_load_at_height( + deps.storage, + (address.clone(), lock_id), + load_height, + ) { + Err(_) => None, + Ok(lock) => match lock { + None => None, + Some(lock) => { + if lock.lock_end <= round_end { + None + } else { + Some(lock) + } + } + }, + } + }) .map(|lockup| { - to_lockup_with_power( - deps, - env.clone(), - &constants, - current_round_id, - round_end, - lockup, - ) - .current_voting_power - .u128() + to_lockup_with_power(*deps, constants, round_id, round_end, lockup) + .current_voting_power + .u128() }) - .sum(); + .sum()) +} - Ok(UserVotingPowerResponse { voting_power }) +pub fn get_user_voting_power( + deps: &Deps, + constants: &Constants, + address: Addr, + round_id: u64, + round_end: Timestamp, + filter: T, +) -> StdResult +where + T: FnMut(Result<(u64, LockEntry), StdError>) -> Option, +{ + Ok(LOCKS_MAP + .prefix(address) + .range(deps.storage, None, None, Order::Ascending) + .filter_map(filter) + .map(|lockup| { + to_lockup_with_power(*deps, constants, round_id, round_end, lockup) + .current_voting_power + .u128() + }) + .sum()) } // This function queries user votes for the given round and tranche. @@ -2036,7 +2246,7 @@ pub fn query_current_round_id( deps: Deps, env: Env, ) -> StdResult { - let constants = &CONSTANTS.load(deps.storage)?; + let constants = &load_current_constants(&deps, &env)?; let round_id = compute_round_id_for_timestamp(constants, env.block.time.nanos())?; let round_end = compute_round_end(constants, round_id)?; @@ -2047,8 +2257,12 @@ pub fn query_current_round_id( }) } -pub fn query_round_end(deps: Deps, round_id: u64) -> StdResult { - let constants = &CONSTANTS.load(deps.storage)?; +pub fn query_round_end( + deps: Deps, + env: Env, + round_id: u64, +) -> StdResult { + let constants = &load_current_constants(&deps, &env)?; let round_end = compute_round_end(constants, round_id)?; Ok(RoundEndResponse { round_end }) @@ -2504,7 +2718,9 @@ fn update_proposal_and_props_by_score_maps( #[allow(clippy::too_many_arguments)] // complex function that needs a lot of arguments fn update_total_time_weighted_shares( deps: &mut DepsMut, + current_height: u64, constants: &Constants, + current_round: u64, start_round_id: u64, end_round_id: u64, lock_end: u64, @@ -2515,6 +2731,12 @@ fn update_total_time_weighted_shares( where T: Fn(u64, Timestamp, Uint128) -> Uint128, { + // We need the validator power ratio to update the total voting power of current and possibly future rounds. + // It is loaded outside of the loop to save some gas. We use the validator power ratio from the current round, + // since it is not populated for future rounds yet. + let validator_power_ratio = + get_validator_power_ratio_for_round(deps.storage, current_round, shares_validator.clone())?; + for round in start_round_id..=end_round_id { let round_end = compute_round_end(constants, round)?; let lockup_length = lock_end - round_end.nanos(); @@ -2536,8 +2758,10 @@ where // add the shares to the total power in the round add_validator_shares_to_round_total( deps.storage, + current_height, round, shares_validator.clone(), + validator_power_ratio, scaled_shares, )?; } @@ -2555,13 +2779,12 @@ fn get_lock_count(deps: Deps, user_address: Addr) -> usize { fn to_lockup_with_power( deps: Deps, - env: Env, constants: &Constants, round_id: u64, round_end: Timestamp, lock_entry: LockEntry, ) -> LockEntryWithPower { - match validate_denom(deps, env.clone(), constants, lock_entry.funds.denom.clone()) { + match validate_denom(deps, round_id, constants, lock_entry.funds.denom.clone()) { Err(_) => { // If we fail to resove the denom, or the validator has dropped // from the top N, then this lockup has zero voting power. diff --git a/contracts/hydro/src/error.rs b/contracts/hydro/src/error.rs index bca8c11..7f8891e 100644 --- a/contracts/hydro/src/error.rs +++ b/contracts/hydro/src/error.rs @@ -1,4 +1,4 @@ -use cosmwasm_std::{OverflowError, StdError}; +use cosmwasm_std::{CheckedFromRatioError, OverflowError, StdError}; use cw_utils::PaymentError; use neutron_sdk::NeutronError; use thiserror::Error; @@ -11,6 +11,9 @@ pub enum ContractError { #[error("{0}")] OverflowError(#[from] OverflowError), + #[error("{0}")] + CheckedFromRatioError(#[from] CheckedFromRatioError), + #[error("Unauthorized")] Unauthorized, diff --git a/contracts/hydro/src/lib.rs b/contracts/hydro/src/lib.rs index aa1f4b9..d49ff17 100644 --- a/contracts/hydro/src/lib.rs +++ b/contracts/hydro/src/lib.rs @@ -6,6 +6,7 @@ pub mod msg; pub mod query; pub mod score_keeper; pub mod state; +pub mod utils; pub mod validators_icqs; #[cfg(test)] @@ -28,3 +29,12 @@ mod testing_fractional_voting; #[cfg(test)] mod testing_deployments; + +#[cfg(test)] +mod testing_utils; + +#[cfg(test)] +mod testing_compounder_cap; + +#[cfg(test)] +mod testing_snapshoting; diff --git a/contracts/hydro/src/lsm_integration.rs b/contracts/hydro/src/lsm_integration.rs index 8956749..149f3b2 100644 --- a/contracts/hydro/src/lsm_integration.rs +++ b/contracts/hydro/src/lsm_integration.rs @@ -1,14 +1,13 @@ -use cosmwasm_std::{Decimal, Deps, Env, Order, StdError, StdResult, Storage}; +use cosmwasm_std::{Decimal, Deps, Order, StdError, StdResult, Storage, Uint128}; use neutron_sdk::bindings::query::NeutronQuery; use neutron_std::types::ibc::applications::transfer::v1::{DenomTrace, TransferQuerier}; use crate::state::{ - ValidatorInfo, SCALED_ROUND_POWER_SHARES_MAP, VALIDATORS_INFO, VALIDATORS_PER_ROUND, - VALIDATORS_STORE_INITIALIZED, + ValidatorInfo, SCALED_ROUND_POWER_SHARES_MAP, TOTAL_VOTING_POWER_PER_ROUND, VALIDATORS_INFO, + VALIDATORS_PER_ROUND, VALIDATORS_STORE_INITIALIZED, }; use crate::{ - contract::compute_current_round_id, score_keeper::{get_total_power_for_proposal, update_power_ratio_for_proposal}, state::{Constants, Proposal, PROPOSAL_MAP, PROPS_BY_SCORE, TRANCHE_MAP}, }; @@ -22,16 +21,15 @@ pub const COSMOS_VALIDATOR_ADDR_LENGTH: usize = 52; // e.g. cosmosvaloper15w6ra6 // Returns OK if the denom is a valid IBC denom representing LSM // tokenized share transferred directly from the Cosmos Hub -// of a validator that is also currently among the top -// max_validators validators, and returns the address of that validator. +// of a validator that is also among the top max_validators validators +// for the given round, and returns the address of that validator. pub fn validate_denom( deps: Deps, - env: Env, + round_id: u64, constants: &Constants, denom: String, ) -> StdResult { let validator = resolve_validator_from_denom(&deps, constants, denom)?; - let round_id = compute_current_round_id(&env, constants)?; let max_validators = constants.max_validator_shares_participating; if is_active_round_validator(deps.storage, round_id, &validator) { @@ -133,6 +131,35 @@ fn query_ibc_denom_trace(deps: &Deps, denom: String) -> StdResult< .ok_or(StdError::generic_err("Failed to obtain IBC denom trace")) } +/// Updates all the required stores each time some validator's power ratio is changed +pub fn update_stores_due_to_power_ratio_change( + storage: &mut dyn Storage, + current_height: u64, + validator: &str, + current_round_id: u64, + old_power_ratio: Decimal, + new_power_ratio: Decimal, +) -> StdResult<()> { + update_scores_due_to_power_ratio_change( + storage, + validator, + current_round_id, + old_power_ratio, + new_power_ratio, + )?; + + update_total_power_due_to_power_ratio_change( + storage, + current_height, + validator, + current_round_id, + old_power_ratio, + new_power_ratio, + )?; + + Ok(()) +} + // Applies the new power ratio for the validator to score keepers. // It updates: // * all proposals of that round @@ -215,31 +242,96 @@ pub fn update_scores_due_to_power_ratio_change( Ok(()) } -pub fn get_total_power_for_round(deps: Deps, round_id: u64) -> StdResult { - // get the current validators for that round - let validators = get_round_validators(deps, round_id); - - // compute the total power - let mut total = Decimal::zero(); - for validator in validators { - let shares = SCALED_ROUND_POWER_SHARES_MAP - .may_load(deps.storage, (round_id, validator.address.clone()))? - .unwrap_or(Decimal::zero()); - total += shares * validator.power_ratio; +// Updates the total voting power for the current and future rounds when the given validator power ratio changes. +pub fn update_total_power_due_to_power_ratio_change( + storage: &mut dyn Storage, + current_height: u64, + validator: &str, + current_round_id: u64, + old_power_ratio: Decimal, + new_power_ratio: Decimal, +) -> StdResult<()> { + let mut round_id = current_round_id; + + // Try to update the total voting power starting from the current round id and moving to next rounds until + // we reach the round for which there is no entry in the TOTAL_VOTING_POWER_PER_ROUND. This implies the first + // round in which no lock entry gives voting power, which also must be true for all rounds after that round, + // so we break the loop at that point. + loop { + let old_total_voting_power = + match TOTAL_VOTING_POWER_PER_ROUND.may_load(storage, round_id)? { + None => break, + Some(total_voting_power) => Decimal::from_ratio(total_voting_power, Uint128::one()), + }; + + let validator_shares = + get_validator_shares_for_round(storage, round_id, validator.to_owned())?; + if validator_shares == Decimal::zero() { + // If we encounter a round that doesn't have this validator shares, then no subsequent + // round could also have its shares, so break early to save some gas. + break; + } + + let old_validator_shares_power = validator_shares * old_power_ratio; + let new_validator_shares_power = validator_shares * new_power_ratio; + + let new_total_voting_power = old_total_voting_power + .checked_add(new_validator_shares_power)? + .checked_sub(old_validator_shares_power)?; + + TOTAL_VOTING_POWER_PER_ROUND.save( + storage, + round_id, + &new_total_voting_power.to_uint_floor(), + current_height, + )?; + + round_id += 1; } - Ok(total) + Ok(()) +} + +pub fn get_total_power_for_round(deps: Deps, round_id: u64) -> StdResult { + Ok( + match TOTAL_VOTING_POWER_PER_ROUND.may_load(deps.storage, round_id)? { + None => Decimal::zero(), + Some(total_voting_power) => Decimal::from_ratio(total_voting_power, Uint128::one()), + }, + ) } pub fn add_validator_shares_to_round_total( storage: &mut dyn Storage, + current_height: u64, round_id: u64, validator: String, + val_power_ratio: Decimal, num_shares: Decimal, ) -> StdResult<()> { + // Update validator shares for the round let current_shares = get_validator_shares_for_round(storage, round_id, validator.clone())?; let new_shares = current_shares + num_shares; - SCALED_ROUND_POWER_SHARES_MAP.save(storage, (round_id, validator), &new_shares) + SCALED_ROUND_POWER_SHARES_MAP.save(storage, (round_id, validator.clone()), &new_shares)?; + + // Update total voting power for the round + TOTAL_VOTING_POWER_PER_ROUND.update( + storage, + round_id, + current_height, + |total_power_before| -> Result { + let total_power_before = match total_power_before { + None => Decimal::zero(), + Some(total_power_before) => Decimal::from_ratio(total_power_before, Uint128::one()), + }; + + Ok(total_power_before + .checked_add(num_shares.checked_mul(val_power_ratio)?)? + .to_uint_floor()) + }, + )?; + + Ok(()) } pub fn get_validator_shares_for_round( @@ -310,7 +402,7 @@ pub fn initialize_validator_store_helper( } // copy the information from the previous round - let val_infos = load_validators_infos(storage, round_id - 1)?; + let val_infos = load_validators_infos(storage, round_id - 1); for val_info in val_infos { let address = val_info.clone().address; @@ -334,10 +426,13 @@ pub fn initialize_validator_store_helper( } // load_validators_infos needs to be its own function to borrow the storage -fn load_validators_infos(storage: &dyn Storage, round_id: u64) -> StdResult> { +pub fn load_validators_infos(storage: &dyn Storage, round_id: u64) -> Vec { VALIDATORS_INFO .prefix(round_id) .range(storage, None, None, Order::Ascending) - .map(|val_info_res| val_info_res.map(|val_info| val_info.1)) + .filter_map(|val_info| match val_info { + Err(_) => None, + Ok(val_info) => Some(val_info.1), + }) .collect() } diff --git a/contracts/hydro/src/migration/migrate.rs b/contracts/hydro/src/migration/migrate.rs index 47fd400..6ae8cfc 100644 --- a/contracts/hydro/src/migration/migrate.rs +++ b/contracts/hydro/src/migration/migrate.rs @@ -7,19 +7,20 @@ use cw2::{get_contract_version, set_contract_version}; use neutron_sdk::bindings::msg::NeutronMsg; use neutron_sdk::bindings::query::NeutronQuery; +use super::unreleased::migrate_v3_0_0_to_unreleased; use super::v3_0_0::MigrateMsgV3_0_0; pub const CONTRACT_VERSION_V1_1_0: &str = "1.1.0"; pub const CONTRACT_VERSION_V2_0_1: &str = "2.0.1"; pub const CONTRACT_VERSION_V2_0_2: &str = "2.0.2"; -pub const CONTRACT_VERSION_V2_1_0: &str = "2.0.2"; +pub const CONTRACT_VERSION_V2_1_0: &str = "2.1.0"; pub const CONTRACT_VERSION_V3_0_0: &str = "3.0.0"; pub const CONTRACT_VERSION_UNRELEASED: &str = "4.0.0"; #[cfg_attr(not(feature = "library"), entry_point)] pub fn migrate( - deps: DepsMut, - _env: Env, + mut deps: DepsMut, + env: Env, _msg: MigrateMsgV3_0_0, ) -> Result, ContractError> { let contract_version = get_contract_version(deps.storage)?; @@ -30,7 +31,7 @@ pub fn migrate( ))); } - // no migration necessary from 2.1.0 to 3.0.0 + migrate_v3_0_0_to_unreleased(&mut deps, env)?; set_contract_version(deps.storage, CONTRACT_NAME, CONTRACT_VERSION)?; diff --git a/contracts/hydro/src/migration/mod.rs b/contracts/hydro/src/migration/mod.rs index 3c29627..ab48d88 100644 --- a/contracts/hydro/src/migration/mod.rs +++ b/contracts/hydro/src/migration/mod.rs @@ -1,2 +1,3 @@ pub mod migrate; +pub mod unreleased; pub mod v3_0_0; diff --git a/contracts/hydro/src/migration/unreleased.rs b/contracts/hydro/src/migration/unreleased.rs new file mode 100644 index 0000000..a3ffbb6 --- /dev/null +++ b/contracts/hydro/src/migration/unreleased.rs @@ -0,0 +1,186 @@ +use std::collections::HashMap; + +use cosmwasm_std::{Addr, Decimal, DepsMut, Env, Order, StdResult}; +use cw_storage_plus::{Item, Map}; +use neutron_sdk::bindings::query::NeutronQuery; + +use crate::{ + contract::compute_current_round_id, + error::ContractError, + lsm_integration::load_validators_infos, + migration::v3_0_0::ConstantsV3_0_0, + state::{ + Constants, HeightRange, LockEntry, CONSTANTS, HEIGHT_TO_ROUND, LOCKS_MAP, + ROUND_TO_HEIGHT_RANGE, SCALED_ROUND_POWER_SHARES_MAP, TOTAL_VOTING_POWER_PER_ROUND, + USER_LOCKS, + }, +}; + +pub fn migrate_v3_0_0_to_unreleased( + deps: &mut DepsMut, + env: Env, +) -> Result<(), ContractError> { + let constants = migrate_constants(deps)?; + let round_id = compute_current_round_id(&env, &constants)?; + + populate_total_power_for_rounds(deps, &env, round_id)?; + migrate_user_lockups(deps, &env)?; + populate_round_height_mappings(deps, &env, round_id)?; + + Ok(()) +} + +// Convert CONSTANTS storage from Item to Map and insert single constants instance +// under the timestamp of the first round start time, and set the extra_cap to zero. +fn migrate_constants(deps: &mut DepsMut) -> StdResult { + const OLD_CONSTANTS: Item = Item::new("constants"); + let old_constants = OLD_CONSTANTS.load(deps.storage)?; + + let new_constants = Constants { + round_length: old_constants.round_length, + lock_epoch_length: old_constants.lock_epoch_length, + first_round_start: old_constants.first_round_start, + max_locked_tokens: old_constants.max_locked_tokens, + max_validator_shares_participating: old_constants.max_validator_shares_participating, + hub_connection_id: old_constants.hub_connection_id, + hub_transfer_channel_id: old_constants.hub_transfer_channel_id, + icq_update_period: old_constants.icq_update_period, + paused: old_constants.paused, + max_deployment_duration: old_constants.max_deployment_duration, + round_lock_power_schedule: old_constants.round_lock_power_schedule, + current_users_extra_cap: 0, // set the extra cap to 0 during the migration + }; + + OLD_CONSTANTS.remove(deps.storage); + CONSTANTS.save( + deps.storage, + new_constants.first_round_start.nanos(), + &new_constants, + )?; + + Ok(new_constants) +} + +// Populate round total power starting from round 0 and all the way to the last round +// in which any existing lock gives voting power. +fn populate_total_power_for_rounds( + deps: &mut DepsMut, + env: &Env, + current_round_id: u64, +) -> StdResult<()> { + let current_validator_ratios: HashMap = + load_validators_infos(deps.storage, current_round_id) + .iter() + .map(|validator_info| (validator_info.address.clone(), validator_info.power_ratio)) + .collect(); + + let mut round_id = 0; + loop { + let validator_power_ratios: HashMap = if round_id >= current_round_id { + current_validator_ratios.clone() + } else { + load_validators_infos(deps.storage, round_id) + .iter() + .map(|validator_info| (validator_info.address.clone(), validator_info.power_ratio)) + .collect() + }; + + let round_validator_shares = SCALED_ROUND_POWER_SHARES_MAP + .prefix(round_id) + .range(deps.storage, None, None, Order::Ascending) + .filter_map(|val_shares| match val_shares { + Err(_) => None, + Ok(val_shares) => Some(val_shares), + }) + .collect::>(); + + // When we encounter the round with zero shares of any validator, it means that there + // was no lock entry that would give voting power for the given round, or any subsequent + // rounds, so we break the loop at that point. + if round_validator_shares.is_empty() { + break; + } + + let round_total_power: Decimal = round_validator_shares + .iter() + .map(|validator_shares| { + validator_power_ratios + .get(&validator_shares.0) + .map_or_else(Decimal::zero, |power_ratio| { + power_ratio * validator_shares.1 + }) + }) + .sum(); + + TOTAL_VOTING_POWER_PER_ROUND.save( + deps.storage, + round_id, + &round_total_power.to_uint_ceil(), + env.block.height, + )?; + + round_id += 1; + } + + Ok(()) +} + +// Converts the LOCKS_MAP from Map into SnapshotMap and populates USER_LOCKS map. +fn migrate_user_lockups(deps: &mut DepsMut, env: &Env) -> StdResult<()> { + const OLD_LOCKS_MAP: Map<(Addr, u64), LockEntry> = Map::new("locks_map"); + + let mut user_locks_map: HashMap> = HashMap::new(); + let user_lockups: Vec<(Addr, LockEntry)> = OLD_LOCKS_MAP + .range(deps.storage, None, None, Order::Ascending) + .filter_map(|lockup| match lockup { + Err(_) => None, + Ok(lockup) => { + user_locks_map + .entry(lockup.0 .0.clone()) + .and_modify(|user_locks| user_locks.push(lockup.1.lock_id)) + .or_insert(vec![lockup.1.lock_id]); + + Some((lockup.0 .0, lockup.1)) + } + }) + .collect(); + + for user_lockup in &user_lockups { + OLD_LOCKS_MAP.remove(deps.storage, (user_lockup.0.clone(), user_lockup.1.lock_id)); + } + + for user_lockup in user_lockups { + LOCKS_MAP.save( + deps.storage, + (user_lockup.0.clone(), user_lockup.1.lock_id), + &user_lockup.1, + env.block.height, + )?; + } + + for user_locks in user_locks_map { + USER_LOCKS.save(deps.storage, user_locks.0, &user_locks.1, env.block.height)?; + } + + Ok(()) +} + +// Populates ROUND_TO_HEIGHT_RANGE and HEIGHT_TO_ROUND maps +fn populate_round_height_mappings( + deps: &mut DepsMut, + env: &Env, + current_round_id: u64, +) -> StdResult<()> { + ROUND_TO_HEIGHT_RANGE.save( + deps.storage, + current_round_id, + &HeightRange { + lowest_known_height: env.block.height, + highest_known_height: env.block.height, + }, + )?; + + HEIGHT_TO_ROUND.save(deps.storage, env.block.height, ¤t_round_id)?; + + Ok(()) +} diff --git a/contracts/hydro/src/migration/v3_0_0.rs b/contracts/hydro/src/migration/v3_0_0.rs index aa12869..b5a5d78 100644 --- a/contracts/hydro/src/migration/v3_0_0.rs +++ b/contracts/hydro/src/migration/v3_0_0.rs @@ -1,5 +1,24 @@ +use cosmwasm_schema::cw_serde; +use cosmwasm_std::Timestamp; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; +use crate::state::RoundLockPowerSchedule; + #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] pub struct MigrateMsgV3_0_0 {} + +#[cw_serde] +pub struct ConstantsV3_0_0 { + pub round_length: u64, + pub lock_epoch_length: u64, + pub first_round_start: Timestamp, + pub max_locked_tokens: u128, + pub max_validator_shares_participating: u64, + pub hub_connection_id: String, + pub hub_transfer_channel_id: String, + pub icq_update_period: u64, + pub paused: bool, + pub max_deployment_duration: u64, + pub round_lock_power_schedule: RoundLockPowerSchedule, +} diff --git a/contracts/hydro/src/msg.rs b/contracts/hydro/src/msg.rs index 36cbd45..4a1c3b4 100644 --- a/contracts/hydro/src/msg.rs +++ b/contracts/hydro/src/msg.rs @@ -67,9 +67,14 @@ pub enum ExecuteMsg { address: String, }, UpdateConfig { + activate_at: Timestamp, max_locked_tokens: Option, + current_users_extra_cap: Option, max_deployment_duration: Option, }, + DeleteConfigs { + timestamps: Vec, + }, Pause {}, AddTranche { tranche: TrancheInfo, diff --git a/contracts/hydro/src/state.rs b/contracts/hydro/src/state.rs index fd35d24..8ca9319 100644 --- a/contracts/hydro/src/state.rs +++ b/contracts/hydro/src/state.rs @@ -1,10 +1,11 @@ use cosmwasm_schema::cw_serde; use cosmwasm_std::{Addr, Coin, Decimal, Timestamp, Uint128}; -use cw_storage_plus::{Item, Map}; +use cw_storage_plus::{Item, Map, SnapshotMap, Strategy}; use crate::msg::LiquidityDeployment; -pub const CONSTANTS: Item = Item::new("constants"); +// CONSTANTS: key(activation_timestamp) -> Constants +pub const CONSTANTS: Map = Map::new("constants"); #[cw_serde] pub struct LockPowerEntry { @@ -58,7 +59,15 @@ pub struct Constants { pub round_length: u64, pub lock_epoch_length: u64, pub first_round_start: Timestamp, + // The maximum number of tokens that can be locked by any users (currently known and the future ones) pub max_locked_tokens: u128, + // The maximum number of tokens (out of the max_locked_tokens) that is reserved for locking only + // for currently known users. This field is intended to be set to some value greater than zero at + // the begining of the round, and such Constants would apply only for predefined period of time. + // After this period has expired, a new Constants would be activated that would set this value to + // zero, which would allow any user to lock any amount that possibly wasn't filled, but was reserved + // for this cap. + pub current_users_extra_cap: u128, pub max_validator_shares_participating: u64, pub hub_connection_id: String, pub hub_transfer_channel_id: String, @@ -71,6 +80,16 @@ pub struct Constants { // the total number of tokens locked in the contract pub const LOCKED_TOKENS: Item = Item::new("locked_tokens"); +// Tracks the total number of tokens locked in extra cap, for the given round +// EXTRA_LOCKED_TOKENS_ROUND_TOTAL: key(round_id) -> uint128 +pub const EXTRA_LOCKED_TOKENS_ROUND_TOTAL: Map = + Map::new("extra_locked_tokens_round_total"); + +// Tracks the number of tokens locked in extra cap by specific user, for the given round +// EXTRA_LOCKED_TOKENS_CURRENT_USERS: key(round_id, sender_address) -> uint128 +pub const EXTRA_LOCKED_TOKENS_CURRENT_USERS: Map<(u64, Addr), u128> = + Map::new("extra_locked_tokens_current_users"); + pub const LOCK_ID: Item = Item::new("lock_id"); // stores the current PROP_ID, in order to ensure that each proposal has a unique ID @@ -78,7 +97,13 @@ pub const LOCK_ID: Item = Item::new("lock_id"); pub const PROP_ID: Item = Item::new("prop_id"); // LOCKS_MAP: key(sender_address, lock_id) -> LockEntry -pub const LOCKS_MAP: Map<(Addr, u64), LockEntry> = Map::new("locks_map"); +pub const LOCKS_MAP: SnapshotMap<(Addr, u64), LockEntry> = SnapshotMap::new( + "locks_map", + "locks_map__checkpoints", + "locks_map__changelog", + Strategy::EveryBlock, +); + #[cw_serde] pub struct LockEntry { pub lock_id: u64, @@ -87,6 +112,25 @@ pub struct LockEntry { pub lock_end: Timestamp, } +// Stores the lockup IDs that belong to a user. Snapshoted so that we can determine which lockups +// user had at a given height and use this info to compute users voting power at that height. +// USER_LOCKS: key(user_address) -> Vec +pub const USER_LOCKS: SnapshotMap> = SnapshotMap::new( + "user_locks", + "user_locks__checkpoints", + "user_locks__changelog", + Strategy::EveryBlock, +); + +// This is the total voting power of all users combined. +// TOTAL_VOTING_POWER_PER_ROUND: key(round_id) -> total_voting_power +pub const TOTAL_VOTING_POWER_PER_ROUND: SnapshotMap = SnapshotMap::new( + "total_voting_power_per_round", + "total_voting_power_per_round__checkpoints", + "total_voting_power_per_round__changelog", + Strategy::EveryBlock, +); + // PROPOSAL_MAP: key(round_id, tranche_id, prop_id) -> Proposal pub const PROPOSAL_MAP: Map<(u64, u64, u64), Proposal> = Map::new("prop_map"); #[cw_serde] @@ -228,3 +272,24 @@ impl ValidatorInfo { // LIQUIDITY_DEPLOYMENTS_MAP: key(round_id, tranche_id, prop_id) -> deployment pub const LIQUIDITY_DEPLOYMENTS_MAP: Map<(u64, u64, u64), LiquidityDeployment> = Map::new("liquidity_deployments_map"); + +// Stores the mapping between the round_id and the range of known block heights for that round. +// The lowest_known_height is the height at which the first transaction was executed, and the +// highest_known_height is the height at which the last transaction was executed against the smart +// contract in the given round. +// Notice that the round could span beyond these boundaries, but we don't have a way to know that. +// Besides, the info we store here is sufficient for our needs. +// ROUND_TO_HEIGHT_RANGE: key(round_id) -> HeightRange +pub const ROUND_TO_HEIGHT_RANGE: Map = Map::new("round_to_height_range"); + +// Stores the mapping between the block height and round. It gets populated +// each time a transaction is executed against the smart contract. +// HEIGHT_TO_ROUND: key(block_height) -> round_id +pub const HEIGHT_TO_ROUND: Map = Map::new("height_to_round"); + +#[cw_serde] +#[derive(Default)] +pub struct HeightRange { + pub lowest_known_height: u64, + pub highest_known_height: u64, +} diff --git a/contracts/hydro/src/testing.rs b/contracts/hydro/src/testing.rs index 0448da5..928fa58 100644 --- a/contracts/hydro/src/testing.rs +++ b/contracts/hydro/src/testing.rs @@ -1,4 +1,4 @@ -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use std::str::FromStr; use crate::contract::{ @@ -6,7 +6,7 @@ use crate::contract::{ query_whitelist_admins, MAX_LOCK_ENTRIES, }; use crate::msg::{ProposalToLockups, TrancheInfo}; -use crate::state::{LockEntry, RoundLockPowerSchedule, Vote, VOTE_MAP}; +use crate::state::{LockEntry, RoundLockPowerSchedule, Vote, CONSTANTS, USER_LOCKS, VOTE_MAP}; use crate::testing_lsm_integration::set_validator_infos_for_round; use crate::testing_mocks::{ denom_trace_grpc_query_mock, mock_dependencies, no_op_grpc_query_mock, MockQuerier, @@ -123,10 +123,10 @@ fn instantiate_test() { let msg = get_default_instantiate_msg(&deps.api); - let res = instantiate(deps.as_mut(), env, info, msg.clone()); + let res = instantiate(deps.as_mut(), env.clone(), info, msg.clone()); assert!(res.is_ok()); - let res = query_constants(deps.as_ref()); + let res = query_constants(deps.as_ref(), env); assert!(res.is_ok()); let constants = res.unwrap().constants; @@ -243,6 +243,17 @@ fn lock_tokens_basic_test() { // check that the power is correct: 3000 tokens locked for three epochs // so power is 3000 * 1.5 = 4500 assert_eq!(4500, lockup.current_voting_power.u128()); + + // check that the USER_LOCKS are updated as expected + let expected_lock_ids = HashSet::from([ + res.lockups[0].lock_entry.lock_id, + res.lockups[1].lock_entry.lock_id, + ]); + let mut user_lock_ids = USER_LOCKS + .load(&deps.storage, info2.sender.clone()) + .unwrap(); + user_lock_ids.retain(|lock_id| !expected_lock_ids.contains(lock_id)); + assert!(user_lock_ids.is_empty()); } #[test] @@ -2225,14 +2236,14 @@ fn test_round_id_computation() { msg.first_round_start = Timestamp::from_nanos(contract_start_time); let mut env = mock_env(); - env.block.time = Timestamp::from_nanos(contract_start_time); + env.block.time = Timestamp::from_nanos(current_time); let info = get_message_info(&deps.api, "addr0000", &[]); let _ = instantiate(deps.as_mut(), env.clone(), info.clone(), msg.clone()).unwrap(); // set the time to the current time env.block.time = Timestamp::from_nanos(current_time); - let constants = query_constants(deps.as_ref()); + let constants = query_constants(deps.as_ref(), env.clone()); assert!(constants.is_ok()); let round_id = compute_current_round_id(&env, &constants.unwrap().constants); @@ -2587,17 +2598,37 @@ fn max_locked_tokens_test() { let res = execute(deps.as_mut(), env.clone(), info.clone(), lock_msg.clone()); assert!(res.is_ok()); - // a privileged user can update the maximum allowed locked tokens + // a privileged user can update the maximum allowed locked tokens, but only for the future info = get_message_info(&deps.api, "addr0001", &[]); let update_max_locked_tokens_msg = ExecuteMsg::UpdateConfig { + activate_at: env.block.time.minus_hours(1), max_locked_tokens: Some(3000), + current_users_extra_cap: None, max_deployment_duration: None, }; let res = execute( deps.as_mut(), env.clone(), info.clone(), - update_max_locked_tokens_msg, + update_max_locked_tokens_msg.clone(), + ); + assert!(res + .unwrap_err() + .to_string() + .contains("Can not update config in the past.")); + + // this time with a valid activation timestamp + let update_max_locked_tokens_msg = ExecuteMsg::UpdateConfig { + activate_at: env.block.time, + max_locked_tokens: Some(3000), + current_users_extra_cap: None, + max_deployment_duration: None, + }; + let res = execute( + deps.as_mut(), + env.clone(), + info.clone(), + update_max_locked_tokens_msg.clone(), ); assert!(res.is_ok()); @@ -2622,6 +2653,93 @@ fn max_locked_tokens_test() { .unwrap_err() .to_string() .contains("The limit for locking tokens has been reached. No more tokens can be locked.")); + + // increase the maximum allowed locked tokens by 500, starting in 1 hour + info = get_message_info(&deps.api, "addr0001", &[]); + let update_max_locked_tokens_msg = ExecuteMsg::UpdateConfig { + activate_at: env.block.time.plus_hours(1), + max_locked_tokens: Some(3500), + current_users_extra_cap: None, + max_deployment_duration: None, + }; + let res = execute( + deps.as_mut(), + env.clone(), + info.clone(), + update_max_locked_tokens_msg, + ); + assert!(res.is_ok()); + + // try to lock additional 500 tokens before the time is reached to increase the cap + info = get_message_info( + &deps.api, + "addr0002", + &[Coin::new(500u64, IBC_DENOM_1.to_string())], + ); + let res = execute(deps.as_mut(), env.clone(), info.clone(), lock_msg.clone()); + assert!(res.is_err()); + assert!(res + .unwrap_err() + .to_string() + .contains("The limit for locking tokens has been reached. No more tokens can be locked.")); + + // advance the chain by 1h 0m 1s and verify user can lock additional 500 tokens + env.block.time = env.block.time.plus_seconds(3601); + + // now a user can lock up to additional 500 tokens + let res = execute(deps.as_mut(), env.clone(), info.clone(), lock_msg.clone()); + assert!(res.is_ok()); +} + +#[test] +fn delete_configs_test() { + let first_round_start_time = Timestamp::from_nanos(1737540000000000000); + let initial_block_height = 19_185_000; + + let (mut deps, mut env) = (mock_dependencies(no_op_grpc_query_mock()), mock_env()); + let info = get_message_info(&deps.api, "addr0000", &[]); + + env.block.time = first_round_start_time; + env.block.height = initial_block_height; + + let mut msg = get_default_instantiate_msg(&deps.api); + msg.whitelist_admins = vec![get_address_as_str(&deps.api, "addr0000")]; + msg.first_round_start = first_round_start_time; + + let res = instantiate(deps.as_mut(), env.clone(), info.clone(), msg.clone()); + assert!(res.is_ok()); + + let mut configs_timestamps = vec![]; + for i in 1..=5 { + let timestamp = env.block.time.plus_days(i); + configs_timestamps.push(timestamp); + + let update_max_locked_tokens_msg = ExecuteMsg::UpdateConfig { + activate_at: timestamp, + max_locked_tokens: Some((i * 1000) as u128), + current_users_extra_cap: None, + max_deployment_duration: None, + }; + let res = execute( + deps.as_mut(), + env.clone(), + info.clone(), + update_max_locked_tokens_msg.clone(), + ); + assert!(res.is_ok()); + } + + env.block.time = env.block.time.plus_days(2); + + let msg = ExecuteMsg::DeleteConfigs { + timestamps: configs_timestamps.clone(), + }; + let res = execute(deps.as_mut(), env.clone(), info.clone(), msg); + assert!(res.is_ok()); + + for timestamp in configs_timestamps { + assert!(!CONSTANTS.has(&deps.storage, timestamp.nanos())); + } } #[test] @@ -2647,7 +2765,7 @@ fn contract_pausing_test() { let res = execute(deps.as_mut(), env.clone(), info.clone(), msg.clone()); assert!(res.is_ok()); - let constants = query_constants(deps.as_ref()); + let constants = query_constants(deps.as_ref(), env.clone()); assert!(constants.is_ok()); assert!(constants.unwrap().constants.paused); @@ -2681,9 +2799,12 @@ fn contract_pausing_test() { address: whitelist_admin.to_string(), }, ExecuteMsg::UpdateConfig { + activate_at: env.block.time, max_locked_tokens: None, + current_users_extra_cap: None, max_deployment_duration: None, }, + ExecuteMsg::DeleteConfigs { timestamps: vec![] }, ExecuteMsg::Pause {}, ExecuteMsg::AddTranche { tranche: TrancheInfo { diff --git a/contracts/hydro/src/testing_compounder_cap.rs b/contracts/hydro/src/testing_compounder_cap.rs new file mode 100644 index 0000000..e933600 --- /dev/null +++ b/contracts/hydro/src/testing_compounder_cap.rs @@ -0,0 +1,551 @@ +use cosmos_sdk_proto::prost::Message; +use std::collections::HashMap; + +use cosmwasm_std::{ + testing::{mock_env, MockApi, MockStorage}, + Addr, Binary, Coin, Env, OwnedDeps, Storage, Timestamp, Uint128, +}; +use neutron_sdk::{ + bindings::{query::NeutronQuery, types::StorageValue}, + interchain_queries::{types::QueryType, v047::types::STAKING_STORE_KEY}, + sudo::msg::SudoMsg, +}; + +use crate::{ + contract::{execute, instantiate, sudo}, + lsm_integration::get_total_power_for_round, + msg::ExecuteMsg, + state::{EXTRA_LOCKED_TOKENS_CURRENT_USERS, EXTRA_LOCKED_TOKENS_ROUND_TOTAL, LOCKED_TOKENS}, + testing::{ + get_address_as_str, get_default_instantiate_msg, get_message_info, IBC_DENOM_1, + IBC_DENOM_2, IBC_DENOM_3, ONE_DAY_IN_NANO_SECONDS, VALIDATOR_1, VALIDATOR_1_LST_DENOM_1, + VALIDATOR_2, VALIDATOR_2_LST_DENOM_1, VALIDATOR_3, VALIDATOR_3_LST_DENOM_1, + }, + testing_lsm_integration::set_validator_infos_for_round, + testing_mocks::{ + custom_interchain_query_mock, denom_trace_grpc_query_mock, mock_dependencies, ICQMockData, + MockQuerier, + }, + testing_validators_icqs::get_mock_validator, + validators_icqs::TOKENS_TO_SHARES_MULTIPLIER, +}; + +const ROUND_LENGTH: u64 = 30 * ONE_DAY_IN_NANO_SECONDS; +const LOCK_EPOCH_LENGTH: u64 = ROUND_LENGTH; +const TEN_DAYS_IN_NANOS: u64 = 10 * ONE_DAY_IN_NANO_SECONDS; +const FIRST_ROUND_START: Timestamp = Timestamp::from_nanos(1737540000000000000); // Wednesday, January 22, 2025 10:00:00 AM +const INITIAL_BLOCK_HEIGHT: u64 = 19_185_000; +const BLOCKS_PER_DAY: u64 = 35_000; + +// 1. Round 0: Have 3 users fill the total cap by locking 3 different tokens for different duration (1, 6, 12 rounds). +// 2. Round 0: Update config to increase total_cap and set extra_cap starting from round 1. +// 3. Round 0: Update config to close the extra_cap after some time in round 1. +// 4. Round 0: Update all validator power ratios to verify that the total voting power changes, and users +// voting power also gets updated proportinally. +// 5. Round 1: Have the first known user unlock the expired lockup, to test voting power computation for previous round. +// 6. Round 1: Have the first known user lock some tokens in public_cap, then a completely new user lock tokens +// in public cap (try more than allowed, then lock below public_cap). +// 7. Round 1: Have the known user from previous step lock more to fill the public_cap and some more into extra_cap. +// 8. Round 1: Have the same known user try to lock in extra_cap more than it should be allowed. +// 9. Round 1: Have the same known user lock the most it should be allowed in the extra_cap. +// 10. Round 1: Have other two known users lock as much as they should be allowed in the extra_cap. +// 11. Round 1: Update config to increase total_cap and set extra_cap starting from round 2. +// 12. Round 1: Update config to close the extra_cap after some time in round 2. +// 13. Round 2: Have a completely new user lock tokens to fill up the public_cap, then try to lock more. +// 14. Round 2: Have a known user lock maximum allowed in extra cap. +// 15. Round 2: Advance the chain to end the extra_cap duration and have a user from step #13 lock +// additional amount that matches the entire amount previously reserved for extra_cap. +#[test] +fn test_compounder_cap() { + let whitelist_admin = "addr0000"; + let user1 = "addr0001"; + let user2 = "addr0002"; + let user3 = "addr0003"; + let user4 = "addr0004"; + let user5 = "addr0005"; + + let grpc_query = denom_trace_grpc_query_mock( + "transfer/channel-0".to_string(), + HashMap::from([ + (IBC_DENOM_1.to_string(), VALIDATOR_1_LST_DENOM_1.to_string()), + (IBC_DENOM_2.to_string(), VALIDATOR_2_LST_DENOM_1.to_string()), + (IBC_DENOM_3.to_string(), VALIDATOR_3_LST_DENOM_1.to_string()), + ]), + ); + let (mut deps, mut env) = (mock_dependencies(grpc_query), mock_env()); + + env.block.time = FIRST_ROUND_START; + env.block.height = INITIAL_BLOCK_HEIGHT; + + let user1_addr = deps.api.addr_make(user1); + let user2_addr = deps.api.addr_make(user2); + let user3_addr = deps.api.addr_make(user3); + let user4_addr = deps.api.addr_make(user4); + let user5_addr = deps.api.addr_make(user5); + + let mut msg = get_default_instantiate_msg(&deps.api); + + msg.lock_epoch_length = LOCK_EPOCH_LENGTH; + msg.round_length = ROUND_LENGTH; + msg.first_round_start = env.block.time; + msg.max_locked_tokens = Uint128::new(30000); + msg.whitelist_admins = vec![get_address_as_str(&deps.api, whitelist_admin)]; + + let admin_msg_info = get_message_info(&deps.api, whitelist_admin, &[]); + let res = instantiate( + deps.as_mut(), + env.clone(), + admin_msg_info.clone(), + msg.clone(), + ); + assert!(res.is_ok()); + + // Set all 3 validators power ratio in round 0 to 1 + let res = set_validator_infos_for_round( + &mut deps.storage, + 0, + vec![ + VALIDATOR_1.to_string(), + VALIDATOR_2.to_string(), + VALIDATOR_3.to_string(), + ], + ); + assert!(res.is_ok()); + + // Advance the chain 1 day into round 0 + env.block.time = env.block.time.plus_days(1); + env.block.height += BLOCKS_PER_DAY; + + // 1. Round 0: Have 3 users fill the total cap by locking 3 different tokens for different duration (1, 6, 12 rounds). + let locking_infos: Vec<(&str, u64, Coin, Option<&str>)> = vec![ + ( + user1, + LOCK_EPOCH_LENGTH, + Coin::new(10000u64, IBC_DENOM_1.to_string()), + None, + ), + ( + user2, + 6 * LOCK_EPOCH_LENGTH, + Coin::new(10000u64, IBC_DENOM_2.to_string()), + None, + ), + ( + user3, + 12 * LOCK_EPOCH_LENGTH, + Coin::new(10000u64, IBC_DENOM_3.to_string()), + None, + ), + ]; + + execute_locking_and_verify(&mut deps, &env, locking_infos); + + // Verify total voting power is as expected + let expected_round_powers: Vec<(u64, u128)> = vec![ + (0, 70000), + (1, 60000), + (2, 60000), + (3, 55000), + (4, 52500), + (5, 50000), + (6, 20000), + (7, 20000), + (8, 20000), + (9, 15000), + (10, 12500), + (11, 10000), + (12, 0), + ]; + for expected_round_power in expected_round_powers { + let res = get_total_power_for_round(deps.as_ref(), expected_round_power.0); + assert!(res.is_ok()); + assert_eq!(res.unwrap().to_uint_ceil().u128(), expected_round_power.1); + } + + // 2. Round 0: Update config to increase total_cap and set extra_cap starting from round 1. + + // Advance the chain by 1 day + env.block.time = env.block.time.plus_days(1); + env.block.height += BLOCKS_PER_DAY; + + let msg = ExecuteMsg::UpdateConfig { + activate_at: FIRST_ROUND_START.plus_nanos(ROUND_LENGTH + 1), + max_locked_tokens: Some(40000), + current_users_extra_cap: Some(2000), + max_deployment_duration: None, + }; + + let res = execute(deps.as_mut(), env.clone(), admin_msg_info.clone(), msg); + assert!(res.is_ok(), "error: {:?}", res); + + // 3. Round 0: Update config to close the extra_cap after some time in round 1. + + // Advance the chain by 1 day + env.block.time = env.block.time.plus_days(1); + env.block.height += BLOCKS_PER_DAY; + + let msg = ExecuteMsg::UpdateConfig { + activate_at: FIRST_ROUND_START.plus_nanos(ROUND_LENGTH + TEN_DAYS_IN_NANOS + 1), + max_locked_tokens: None, + current_users_extra_cap: Some(0), + max_deployment_duration: None, + }; + + let res = execute(deps.as_mut(), env.clone(), admin_msg_info.clone(), msg); + assert!(res.is_ok(), "error: {:?}", res); + + // 4. Round 0: Update all validator power ratios to verify that the total voting power changes, and users + // voting power also gets updated proportinally. + + // Advance the chain by 1 day + env.block.time = env.block.time.plus_days(1); + env.block.height += BLOCKS_PER_DAY; + + let mock_shares = Uint128::new(1000) * TOKENS_TO_SHARES_MULTIPLIER; + let mock_validator1 = get_mock_validator(VALIDATOR_1, Uint128::new(900), mock_shares); + let mock_validator2 = get_mock_validator(VALIDATOR_2, Uint128::new(900), mock_shares); + let mock_validator3 = get_mock_validator(VALIDATOR_3, Uint128::new(900), mock_shares); + + let mock_data = HashMap::from([ + ( + 1, + ICQMockData { + query_type: QueryType::KV, + should_query_return_error: false, + should_query_result_return_error: false, + kv_results: vec![StorageValue { + storage_prefix: STAKING_STORE_KEY.to_string(), + key: Binary::default(), + value: Binary::from(mock_validator1.encode_to_vec()), + }], + }, + ), + ( + 2, + ICQMockData { + query_type: QueryType::KV, + should_query_return_error: false, + should_query_result_return_error: false, + kv_results: vec![StorageValue { + storage_prefix: STAKING_STORE_KEY.to_string(), + key: Binary::default(), + value: Binary::from(mock_validator2.encode_to_vec()), + }], + }, + ), + ( + 3, + ICQMockData { + query_type: QueryType::KV, + should_query_return_error: false, + should_query_result_return_error: false, + kv_results: vec![StorageValue { + storage_prefix: STAKING_STORE_KEY.to_string(), + key: Binary::default(), + value: Binary::from(mock_validator3.encode_to_vec()), + }], + }, + ), + ]); + + deps.querier = deps + .querier + .with_custom_handler(custom_interchain_query_mock(mock_data)); + + for query_id in 1..=3 { + let res = sudo( + deps.as_mut(), + env.clone(), + SudoMsg::KVQueryResult { query_id }, + ); + assert!(res.is_ok()); + } + + // Verify total voting power is updated as expected + let expected_round_powers: Vec<(u64, u128)> = vec![ + (0, 63000), + (1, 54000), + (2, 54000), + (3, 49500), + (4, 47250), + (5, 45000), + (6, 18000), + (7, 18000), + (8, 18000), + (9, 13500), + (10, 11250), + (11, 9000), + (12, 0), + ]; + + for expected_round_power in expected_round_powers { + let res = get_total_power_for_round(deps.as_ref(), expected_round_power.0); + assert!(res.is_ok()); + assert_eq!(res.unwrap().to_uint_ceil().u128(), expected_round_power.1); + } + + // Advance the chain into the round 1 plus 1 day, so that user1 can unlock tokens + env.block.time = FIRST_ROUND_START.plus_nanos(ROUND_LENGTH + 1 + ONE_DAY_IN_NANO_SECONDS); + env.block.height = INITIAL_BLOCK_HEIGHT + BLOCKS_PER_DAY * 31; + + // 5. Round 1: Have the first known user unlock the expired lockup. + let info = get_message_info(&deps.api, user1, &[]); + let msg = ExecuteMsg::UnlockTokens { + lock_ids: Some(vec![0]), + }; + let res = execute(deps.as_mut(), env.clone(), info.clone(), msg); + assert!(res.is_ok(), "error: {:?}", res); + assert_eq!(res.unwrap().messages.len(), 1); + + // 6. Round 1: Have a known user lock some tokens in public_cap, then a completely new user lock tokens + // in public cap (try more than allowed, then lock below public_cap). + + // Advance the chain by 1 day + env.block.time = env.block.time.plus_days(1); + env.block.height += BLOCKS_PER_DAY; + + let locking_infos = vec![ + // After this action total locked tokens will be 30_000 again + ( + user1, + LOCK_EPOCH_LENGTH, + Coin::new(10000u64, IBC_DENOM_1.to_string()), + None, + ), + // Completely new user tries to lock more than it is available in public_cap + ( + user4, + LOCK_EPOCH_LENGTH, + Coin::new(8001u64, IBC_DENOM_1.to_string()), + Some("The limit for locking tokens has been reached. No more tokens can be locked."), + ), + // Completely new user locks 5_000 tokens in public_cap; the total locked tokens will be 35_000 + ( + user4, + LOCK_EPOCH_LENGTH, + Coin::new(5000u64, IBC_DENOM_1.to_string()), + None, + ), + ]; + + execute_locking_and_verify(&mut deps, &env, locking_infos); + + verify_locked_tokens_info( + &deps.storage, + 1, + 35000, + 0, + vec![(user1_addr.clone(), 0), (user4_addr.clone(), 0)], + ); + + // 7. Round 1: Have the known user from previous step lock more to fill the public_cap and some more into extra_cap. + // 8. Round 1: Have the same user try to lock in extra_cap more than it should be allowed. + // 9. Round 1: Have the same user lock the most it should be allowed in the extra_cap. + // 10. Round 1: Have other two known users lock as much as they should be allowed in the extra_cap. + + // Advance the chain by 1 day + env.block.time = env.block.time.plus_days(1); + env.block.height += BLOCKS_PER_DAY; + + let locking_infos = vec![ + // User 1 locks 3100 tokens, 3000 in public_cap, 100 in extra_cap + // After this action the total locked tokens will be 38_100 + // 38_000 locked in public_cap and 100 locked in extra_cap + ( + user1, + LOCK_EPOCH_LENGTH, + Coin::new(3100u64, IBC_DENOM_1.to_string()), + None, + ), + // User 1 tries to lock in extra_cap more than it should be allowed. + // By the voting power in previous round it should be allowed to + // lock 285 tokens, and in previous step it already locked 100. + ( + user1, + LOCK_EPOCH_LENGTH, + Coin::new(186u64, IBC_DENOM_1.to_string()), + Some("The limit for locking tokens has been reached. No more tokens can be locked."), + ), + // User 1 locks in extra_cap the maximum it should be allowed (285) + // After this action the total locked tokens will be 38_285 + ( + user1, + LOCK_EPOCH_LENGTH, + Coin::new(185u64, IBC_DENOM_1.to_string()), + None, + ), + // User 2 locks in extra_cap the maximum it should be allowed (571) + // After this action the total locked tokens will be 38_856 + ( + user2, + LOCK_EPOCH_LENGTH, + Coin::new(571u64, IBC_DENOM_1.to_string()), + None, + ), + // User 3 locks in extra_cap the maximum it should be allowed (1142) + // After this action the total locked tokens will be 39_998 + ( + user3, + LOCK_EPOCH_LENGTH, + Coin::new(1142u64, IBC_DENOM_1.to_string()), + None, + ), + ]; + + execute_locking_and_verify(&mut deps, &env, locking_infos); + + verify_locked_tokens_info( + &deps.storage, + 1, + 39998, + 1998, + vec![ + (user1_addr.clone(), 285), + (user2_addr.clone(), 571), + (user3_addr.clone(), 1142), + ], + ); + + // 11. Round 1: Update config to increase total_cap and set extra_cap starting from round 2. + + // Advance the chain by 1 day + env.block.time = env.block.time.plus_days(1); + env.block.height += BLOCKS_PER_DAY; + + let msg = ExecuteMsg::UpdateConfig { + activate_at: FIRST_ROUND_START.plus_nanos(2 * ROUND_LENGTH + 1), + max_locked_tokens: Some(50000), + current_users_extra_cap: Some(5000), + max_deployment_duration: None, + }; + + let res = execute(deps.as_mut(), env.clone(), admin_msg_info.clone(), msg); + assert!(res.is_ok(), "error: {:?}", res); + + // 12. Round 1: Update config to close the extra_cap after some time in round 2. + let msg = ExecuteMsg::UpdateConfig { + activate_at: FIRST_ROUND_START.plus_nanos(2 * ROUND_LENGTH + TEN_DAYS_IN_NANOS + 1), + max_locked_tokens: None, + current_users_extra_cap: Some(0), + max_deployment_duration: None, + }; + + let res = execute(deps.as_mut(), env.clone(), admin_msg_info.clone(), msg); + assert!(res.is_ok(), "error: {:?}", res); + + // Advance the chain into the round 2 plus 1 day + env.block.time = FIRST_ROUND_START.plus_nanos(2 * ROUND_LENGTH + 1 + ONE_DAY_IN_NANO_SECONDS); + env.block.height = INITIAL_BLOCK_HEIGHT + BLOCKS_PER_DAY * 61; + + // 13. Round 2: Have a completely new user lock tokens to fill up the public_cap, then try to lock more. + // 14. Round 2: Have a known user lock maximum allowed in extra cap. + + let locking_infos = vec![ + // Completely new user locks up to the public_cap + // After this action total locked tokens will be 45_000 + ( + user5, + LOCK_EPOCH_LENGTH, + Coin::new(5002u64, IBC_DENOM_1.to_string()), + None, + ), + // Then the same user tries to lock more than allowed in public_cap, while extra_cap is still active + ( + user5, + LOCK_EPOCH_LENGTH, + Coin::new(1u64, IBC_DENOM_1.to_string()), + Some("The limit for locking tokens has been reached. No more tokens can be locked."), + ), + // User 4 had voting power 4_500 out of 71_996 total voting power in round 1 + // With the extra_cap of 5_000, it is allowed to lock 312 tokens in it + // After this action total locked tokens will be 45_312 + ( + user4, + LOCK_EPOCH_LENGTH, + Coin::new(312u64, IBC_DENOM_1.to_string()), + None, + ), + ]; + + execute_locking_and_verify(&mut deps, &env, locking_infos); + + // 15. Round 2: Advance the chain to end the extra_cap duration and have a user from step #13 lock + // additional amount that matches the entire amount previously reserved for extra_cap. + env.block.time = FIRST_ROUND_START.plus_nanos(2 * ROUND_LENGTH + TEN_DAYS_IN_NANOS + 1); + env.block.height = INITIAL_BLOCK_HEIGHT + BLOCKS_PER_DAY * 70; + + let info = get_message_info( + &deps.api, + user5, + &[Coin::new(4688u64, IBC_DENOM_1.to_string())], + ); + let msg = ExecuteMsg::LockTokens { + lock_duration: LOCK_EPOCH_LENGTH, + }; + let res = execute(deps.as_mut(), env.clone(), info.clone(), msg); + assert!(res.is_ok()); + + verify_locked_tokens_info( + &deps.storage, + 2, + 50000, + 312, + vec![ + (user1_addr.clone(), 0), + (user2_addr.clone(), 0), + (user3_addr.clone(), 0), + (user4_addr.clone(), 312), + (user5_addr.clone(), 0), + ], + ); +} + +fn execute_locking_and_verify( + deps: &mut OwnedDeps, + env: &Env, + locking_infos: Vec<(&str, u64, Coin, Option<&str>)>, +) { + for locking_info in locking_infos { + let info = get_message_info(&deps.api, locking_info.0, &[locking_info.2]); + let msg = ExecuteMsg::LockTokens { + lock_duration: locking_info.1, + }; + let res = execute(deps.as_mut(), env.clone(), info.clone(), msg); + match locking_info.3 { + None => { + assert!(res.is_ok(), "error: {:?}", res); + } + Some(error_message) => { + assert!(res.unwrap_err().to_string().contains(error_message)); + } + } + } +} + +fn verify_locked_tokens_info( + storage: &impl Storage, + round_id: u64, + expected_total_locked_tokens: u128, + expected_extra_locked_tokens_round: u128, + expected_extra_locked_tokens_round_users: Vec<(Addr, u128)>, +) { + assert_eq!( + LOCKED_TOKENS.load(storage).unwrap_or_default(), + expected_total_locked_tokens + ); + assert_eq!( + EXTRA_LOCKED_TOKENS_ROUND_TOTAL + .load(storage, round_id) + .unwrap_or_default(), + expected_extra_locked_tokens_round + ); + + for expected_user_extra_locked in expected_extra_locked_tokens_round_users { + assert_eq!( + EXTRA_LOCKED_TOKENS_CURRENT_USERS + .load(storage, (round_id, expected_user_extra_locked.0)) + .unwrap_or_default(), + expected_user_extra_locked.1, + ); + } +} diff --git a/contracts/hydro/src/testing_lsm_integration.rs b/contracts/hydro/src/testing_lsm_integration.rs index cbf9c26..79908d9 100644 --- a/contracts/hydro/src/testing_lsm_integration.rs +++ b/contracts/hydro/src/testing_lsm_integration.rs @@ -13,7 +13,10 @@ use neutron_sdk::{ use neutron_std::types::ibc::applications::transfer::v1::QueryDenomTraceResponse; use crate::{ - contract::{execute, instantiate, query_round_tranche_proposals, query_top_n_proposals, sudo}, + contract::{ + compute_current_round_id, execute, instantiate, query_round_tranche_proposals, + query_top_n_proposals, sudo, + }, lsm_integration::{ get_total_power_for_round, get_validator_power_ratio_for_round, update_scores_due_to_power_ratio_change, validate_denom, @@ -40,6 +43,7 @@ fn get_default_constants() -> crate::state::Constants { lock_epoch_length: 1, first_round_start: Timestamp::from_seconds(0), max_locked_tokens: 1, + current_users_extra_cap: 0, paused: false, max_validator_shares_participating: 2, hub_connection_id: "connection-0".to_string(), @@ -290,9 +294,9 @@ fn test_validate_denom() { description: "happy path".to_string(), denom: IBC_DENOM_1.to_string(), expected_result: Ok(VALIDATOR_1.to_string()), - setup: Box::new(|storage, _env| { + setup: Box::new(|storage, env| { let constants = get_default_constants(); - crate::state::CONSTANTS.save(storage, &constants).unwrap(); + crate::state::CONSTANTS.save(storage, env.block.time.nanos(), &constants).unwrap(); let round_id = 0; let res = set_validator_infos_for_round( storage, @@ -319,7 +323,7 @@ fn test_validate_denom() { let constants = get_default_constants(); crate::state::CONSTANTS - .save(&mut deps.storage, &constants) + .save(&mut deps.storage, env.block.time.nanos(), &constants) .unwrap(); env.block.time = Timestamp::from_seconds(0); @@ -328,7 +332,7 @@ fn test_validate_denom() { let result = validate_denom( deps.as_ref(), - env.clone(), + compute_current_round_id(&env, &constants).unwrap(), &constants, test_case.denom.clone(), ); @@ -736,7 +740,30 @@ fn lock_tokens_multiple_validators_and_vote() { } // update the power ratio for validator 1 to become 0.5 - set_validator_power_ratio(deps.as_mut().storage, 0, VALIDATOR_1, Decimal::percent(50)); + let mock_tokens = Uint128::new(500); + let mock_shares = Uint128::new(1000) * TOKENS_TO_SHARES_MULTIPLIER; + let mock_validator = get_mock_validator(VALIDATOR_1, mock_tokens, mock_shares); + + let mock_data = HashMap::from([( + 1, + ICQMockData { + query_type: QueryType::KV, + should_query_return_error: false, + should_query_result_return_error: false, + kv_results: vec![StorageValue { + storage_prefix: STAKING_STORE_KEY.to_string(), + key: Binary::default(), + value: Binary::from(mock_validator.encode_to_vec()), + }], + }, + )]); + + deps.querier = deps + .querier + .with_custom_handler(custom_interchain_query_mock(mock_data)); + + let res = sudo(deps.as_mut(), env, SudoMsg::KVQueryResult { query_id: 1 }); + assert!(res.is_ok()); // Check the proposal scores { diff --git a/contracts/hydro/src/testing_queries.rs b/contracts/hydro/src/testing_queries.rs index 064eb2d..eeeb301 100644 --- a/contracts/hydro/src/testing_queries.rs +++ b/contracts/hydro/src/testing_queries.rs @@ -7,9 +7,7 @@ use crate::contract::{ scale_lockup_power, }; use crate::msg::ProposalToLockups; -use crate::state::{ - RoundLockPowerSchedule, ValidatorInfo, Vote, CONSTANTS, VALIDATORS_INFO, VOTE_MAP, -}; +use crate::state::{RoundLockPowerSchedule, ValidatorInfo, Vote, VALIDATORS_INFO, VOTE_MAP}; use crate::testing::{ get_default_instantiate_msg, get_message_info, set_default_validator_for_rounds, IBC_DENOM_1, ONE_MONTH_IN_NANO_SECONDS, VALIDATOR_1, VALIDATOR_1_LST_DENOM_1, VALIDATOR_2, VALIDATOR_3, @@ -18,6 +16,7 @@ use crate::testing_lsm_integration::set_validator_power_ratio; use crate::testing_mocks::{ denom_trace_grpc_query_mock, mock_dependencies, no_op_grpc_query_mock, MockQuerier, }; +use crate::utils::load_current_constants; use crate::{ contract::{execute, instantiate, query_expired_user_lockups, query_user_voting_power}, msg::ExecuteMsg, @@ -265,7 +264,7 @@ fn query_user_lockups_test() { assert_eq!(first_lockup_amount, expired_lockups[0].funds.amount.u128()); // adjust the validator power ratios to check that they are reflected properly in the result - let constants = CONSTANTS.load(deps.as_ref().storage).unwrap(); + let constants = load_current_constants(&deps.as_ref(), &env).unwrap(); let current_round_id = compute_current_round_id(&env, &constants).unwrap(); set_validator_power_ratio( deps.as_mut().storage, diff --git a/contracts/hydro/src/testing_snapshoting.rs b/contracts/hydro/src/testing_snapshoting.rs new file mode 100644 index 0000000..c0eaa79 --- /dev/null +++ b/contracts/hydro/src/testing_snapshoting.rs @@ -0,0 +1,198 @@ +use std::collections::HashMap; + +use cosmwasm_std::{testing::mock_env, Coin, Storage, Timestamp}; + +use crate::{ + contract::{execute, instantiate}, + msg::{ExecuteMsg, InstantiateMsg}, + state::{HEIGHT_TO_ROUND, ROUND_TO_HEIGHT_RANGE, USER_LOCKS}, + testing::{ + get_default_instantiate_msg, get_message_info, IBC_DENOM_1, ONE_DAY_IN_NANO_SECONDS, + VALIDATOR_1, VALIDATOR_1_LST_DENOM_1, + }, + testing_lsm_integration::set_validator_infos_for_round, + testing_mocks::{denom_trace_grpc_query_mock, mock_dependencies}, +}; + +#[test] +fn test_user_locks_snapshoting() { + let grpc_query = denom_trace_grpc_query_mock( + "transfer/channel-0".to_string(), + HashMap::from([(IBC_DENOM_1.to_string(), VALIDATOR_1_LST_DENOM_1.to_string())]), + ); + + let user = "addr0000"; + let initial_block_time = Timestamp::from_nanos(1737540000000000000); + let initial_block_height = 19_185_000; + let (mut deps, mut env) = (mock_dependencies(grpc_query), mock_env()); + let user_addr = deps.api.addr_make(user); + + env.block.time = initial_block_time; + env.block.height = initial_block_height; + + let info = get_message_info(&deps.api, user, &[]); + let instantiate_msg = InstantiateMsg { + first_round_start: env.block.time, + round_length: 30 * ONE_DAY_IN_NANO_SECONDS, + lock_epoch_length: 30 * ONE_DAY_IN_NANO_SECONDS, + ..get_default_instantiate_msg(&deps.api) + }; + + let res = instantiate( + deps.as_mut(), + env.clone(), + info.clone(), + instantiate_msg.clone(), + ); + assert!(res.is_ok()); + + let res = set_validator_infos_for_round(&mut deps.storage, 0, vec![VALIDATOR_1.to_string()]); + assert!(res.is_ok()); + + env.block.time = env.block.time.plus_days(1); + env.block.height += 35000; + + let info = get_message_info( + &deps.api, + user, + &[Coin::new(1000u64, IBC_DENOM_1.to_string())], + ); + let msg = ExecuteMsg::LockTokens { + lock_duration: instantiate_msg.lock_epoch_length, + }; + let res = execute(deps.as_mut(), env.clone(), info.clone(), msg); + assert!(res.is_ok(), "error: {:?}", res); + + let current_round = 0; + let current_round_expected_initial_height = env.block.height; + verify_round_height_mappings( + &deps.storage, + current_round, + (current_round_expected_initial_height, env.block.height), + env.block.height, + ); + + env.block.time = env.block.time.plus_days(1); + env.block.height += 35000; + + let msg = ExecuteMsg::LockTokens { + lock_duration: instantiate_msg.lock_epoch_length, + }; + let res = execute(deps.as_mut(), env.clone(), info.clone(), msg); + assert!(res.is_ok(), "error: {:?}", res); + + verify_round_height_mappings( + &deps.storage, + current_round, + (current_round_expected_initial_height, env.block.height), + env.block.height, + ); + + let mut expected_user_locks = vec![(env.block.height + 1, vec![0, 1])]; + + env.block.time = env.block.time.plus_days(1); + env.block.height += 35000; + + let msg = ExecuteMsg::RefreshLockDuration { + lock_ids: vec![0], + lock_duration: 3 * instantiate_msg.lock_epoch_length, + }; + let res = execute(deps.as_mut(), env.clone(), info.clone(), msg); + assert!(res.is_ok(), "error: {:?}", res); + + verify_round_height_mappings( + &deps.storage, + current_round, + (current_round_expected_initial_height, env.block.height), + env.block.height, + ); + + env.block.time = env.block.time.plus_days(1); + env.block.height += 35000; + + let msg = ExecuteMsg::LockTokens { + lock_duration: instantiate_msg.lock_epoch_length, + }; + let res = execute(deps.as_mut(), env.clone(), info.clone(), msg); + assert!(res.is_ok(), "error: {:?}", res); + + expected_user_locks.push((env.block.height + 1, vec![0, 1, 2])); + + // Advance the chain by 35 days from initial time so that the user can unlock locks 1 and 2 + env.block.time = initial_block_time.plus_nanos(35 * ONE_DAY_IN_NANO_SECONDS + 1); + env.block.height = initial_block_height + 35 * 35000; + + let current_round = 1; + let current_round_expected_initial_height = env.block.height; + + let msg = ExecuteMsg::UnlockTokens { + lock_ids: Some(vec![1, 2]), + }; + let res = execute(deps.as_mut(), env.clone(), info.clone(), msg); + assert!(res.is_ok(), "error: {:?}", res); + + expected_user_locks.push((env.block.height + 1, vec![0])); + + verify_round_height_mappings( + &deps.storage, + current_round, + (current_round_expected_initial_height, env.block.height), + env.block.height, + ); + + // Advance the chain by 95 days from initial time so that the user can unlock lock 0 + env.block.time = initial_block_time.plus_nanos(95 * ONE_DAY_IN_NANO_SECONDS + 1); + env.block.height = initial_block_height + 95 * 35000; + + let current_round = 3; + let current_round_expected_initial_height = env.block.height; + + let msg = ExecuteMsg::UnlockTokens { + lock_ids: Some(vec![0]), + }; + let res = execute(deps.as_mut(), env.clone(), info.clone(), msg); + assert!(res.is_ok(), "error: {:?}", res); + + expected_user_locks.push((env.block.height + 1, vec![])); + + verify_round_height_mappings( + &deps.storage, + current_round, + (current_round_expected_initial_height, env.block.height), + env.block.height, + ); + + // Verify that USER_LOCKS return expected values at a given heights + for expected_locks in expected_user_locks { + // unwrap() on purpose- it should never fail + let user_locks = USER_LOCKS + .may_load_at_height(&deps.storage, user_addr.clone(), expected_locks.0) + .unwrap() + .unwrap(); + assert_eq!(expected_locks.1, user_locks); + } +} + +fn verify_round_height_mappings( + storage: &impl Storage, + round_id: u64, + expected_round_height_range: (u64, u64), + height_to_check: u64, +) { + let height_range = ROUND_TO_HEIGHT_RANGE + .load(storage, round_id) + .unwrap_or_default(); + assert_eq!( + height_range.lowest_known_height, + expected_round_height_range.0 + ); + assert_eq!( + height_range.highest_known_height, + expected_round_height_range.1 + ); + + let height_round = HEIGHT_TO_ROUND + .load(storage, height_to_check) + .unwrap_or_default(); + assert_eq!(height_round, round_id); +} diff --git a/contracts/hydro/src/testing_utils.rs b/contracts/hydro/src/testing_utils.rs new file mode 100644 index 0000000..7469192 --- /dev/null +++ b/contracts/hydro/src/testing_utils.rs @@ -0,0 +1,94 @@ +use cosmwasm_std::{testing::mock_env, Timestamp}; + +use crate::{ + state::{Constants, CONSTANTS}, + testing::{get_default_power_schedule, ONE_DAY_IN_NANO_SECONDS}, + testing_mocks::{mock_dependencies, no_op_grpc_query_mock}, + utils::load_current_constants, +}; + +#[test] +fn load_current_constants_test() { + let (mut deps, mut env) = (mock_dependencies(no_op_grpc_query_mock()), mock_env()); + + struct TestCase { + activate_at_timestamp: u64, + constants_to_insert: Constants, + } + + let constants_template = Constants { + round_length: ONE_DAY_IN_NANO_SECONDS, + lock_epoch_length: ONE_DAY_IN_NANO_SECONDS, + first_round_start: Timestamp::from_seconds(0), + max_locked_tokens: 0, + current_users_extra_cap: 0, + paused: false, + max_validator_shares_participating: 500, + hub_connection_id: "connection-0".to_string(), + hub_transfer_channel_id: "channel-1".to_string(), + icq_update_period: 100000, + max_deployment_duration: 12, + round_lock_power_schedule: get_default_power_schedule(), + }; + + // Change max_locked_tokens each time we insert new Constants so that we can differentiate them + let clone_with_locked_tokens = + |constants_template: &Constants, max_locked_tokens| -> Constants { + let mut constants = constants_template.clone(); + constants.max_locked_tokens = max_locked_tokens; + + constants + }; + + let test_cases = vec![ + TestCase { + activate_at_timestamp: 1730840400000000000, // Tuesday, November 05, 2024 09:00:00 PM GMT + constants_to_insert: clone_with_locked_tokens(&constants_template, 1u128), + }, + TestCase { + activate_at_timestamp: 1731924672000000000, // Monday, November 18, 2024 10:11:12 AM GMT + constants_to_insert: clone_with_locked_tokens(&constants_template, 2u128), + }, + TestCase { + activate_at_timestamp: 1732421792000000000, // Sunday, November 24, 2024 04:16:32 AM GMT + constants_to_insert: clone_with_locked_tokens(&constants_template, 3u128), + }, + TestCase { + activate_at_timestamp: 1734264033000000000, // Sunday, December 15, 2024 12:00:33 PM GMT + constants_to_insert: clone_with_locked_tokens(&constants_template, 4u128), + }, + TestCase { + activate_at_timestamp: 1734955199000000000, // Monday, December 23, 2024 11:59:59 AM GMT + constants_to_insert: clone_with_locked_tokens(&constants_template, 5u128), + }, + TestCase { + activate_at_timestamp: 1735689599000000000, // Tuesday, December 31, 2024 11:59:59 PM GMT + constants_to_insert: clone_with_locked_tokens(&constants_template, 6u128), + }, + TestCase { + activate_at_timestamp: 1736208000000000000, // Tuesday, January 07, 2025 12:00:00 AM GMT + constants_to_insert: clone_with_locked_tokens(&constants_template, 7u128), + }, + ]; + + // first insert constants for all timestamps + for test_case in test_cases.iter() { + let res = CONSTANTS.save( + &mut deps.storage, + test_case.activate_at_timestamp, + &test_case.constants_to_insert, + ); + assert!(res.is_ok()); + } + + // Verify that we receive expected constants by setting the block time to activate_at_timestamp + 1 hour + for test_case in test_cases.iter() { + env.block.time = Timestamp::from_nanos(test_case.activate_at_timestamp).plus_seconds(3600); + + let res = load_current_constants(&deps.as_ref(), &env); + assert!(res.is_ok()); + let constants = res.unwrap(); + + assert_eq!(constants, test_case.constants_to_insert); + } +} diff --git a/contracts/hydro/src/utils.rs b/contracts/hydro/src/utils.rs new file mode 100644 index 0000000..fd8549d --- /dev/null +++ b/contracts/hydro/src/utils.rs @@ -0,0 +1,290 @@ +use cosmwasm_std::{ + Addr, Decimal, Deps, DepsMut, Env, Order, StdError, StdResult, Storage, Timestamp, Uint128, +}; +use cw_storage_plus::Bound; +use neutron_sdk::bindings::query::NeutronQuery; + +use crate::{ + contract::get_user_voting_power_for_past_round, + error::ContractError, + lsm_integration::{get_total_power_for_round, initialize_validator_store}, + state::{ + Constants, HeightRange, CONSTANTS, EXTRA_LOCKED_TOKENS_CURRENT_USERS, + EXTRA_LOCKED_TOKENS_ROUND_TOTAL, HEIGHT_TO_ROUND, LOCKED_TOKENS, ROUND_TO_HEIGHT_RANGE, + }, +}; + +/// Loads the constants that are active for the current block according to the block timestamp. +pub fn load_current_constants(deps: &Deps, env: &Env) -> StdResult { + Ok(load_constants_active_at_timestamp(deps, env.block.time)?.1) +} + +/// Loads the constants that were active at the given timestamp. Returns both the Constants and +/// their activation timestamp. +pub fn load_constants_active_at_timestamp( + deps: &Deps, + timestamp: Timestamp, +) -> StdResult<(u64, Constants)> { + let current_constants: Vec<(u64, Constants)> = CONSTANTS + .range( + deps.storage, + None, + Some(Bound::inclusive(timestamp.nanos())), + Order::Descending, + ) + .take(1) + .filter_map(|constants| match constants { + Ok(constants) => Some(constants), + Err(_) => None, + }) + .collect(); + + Ok(match current_constants.len() { + 1 => current_constants[0].clone(), + _ => { + return Err(StdError::generic_err( + "Failed to load constants active at the given timestamp.", + )); + } + }) +} + +// This function validates if user should be allowed to lock more tokens, depending on the total amount of +// currently locked tokens, existence of the extra cap and the number of tokens user already locked in that cap. +// Caps that we consider: +// 1) Total cap: the maximum number of tokens that can be locked in the contract, regardless of the +// extra cap existence. +// 2) Extra cap: the number of tokens reserved for users that had some voting power in previous round. +// It will be available to those users to lock additional tokens even if the public cap is filled. +// The extra cap will be available only for certain time frame at the beginning of the round. Duration +// will be determined through governance. After the extra cap duration has expired, any tokens left up +// to the total cap are allowed to be locked by any Hydro user. +// 3) Public cap: during the existence of extra cap, new Hydro users will be allowed to lock tokens only +// in public cap, where public_cap = total_cap - extra_cap. +// After the extra cap duration expires, public cap becomes equal to the total cap. +pub fn validate_locked_tokens_caps( + deps: &DepsMut, + constants: &Constants, + current_round: u64, + sender: &Addr, + total_locked_tokens: u128, + amount_to_lock: u128, +) -> Result { + let lock_limit_reached_error = Err(ContractError::Std(StdError::generic_err( + "The limit for locking tokens has been reached. No more tokens can be locked.", + ))); + + // Regardless of public_cap and extra_cap, we must make sure that nobody can lock more than total_cap + let total_locked_after = total_locked_tokens + amount_to_lock; + if total_locked_after > constants.max_locked_tokens { + return lock_limit_reached_error; + } + + let public_cap = constants.max_locked_tokens - constants.current_users_extra_cap; + + // This branch will be executed in one of the following cases: + // 1) constants.current_users_extra_cap != 0 and there is SOME room in the public cap + // 2) constants.current_users_extra_cap == 0 and there is ENOUGH room in the public_cap. + // Since in this case public_cap == total_cap, there must be enough room in the public_cap, + // otherwise we would error out at the start of this function. + if public_cap > total_locked_tokens { + // Check if entire amount_to_lock can fit into a public cap + if public_cap >= total_locked_after { + return Ok(LockingInfo { + lock_in_public_cap: Some(amount_to_lock), + lock_in_known_users_cap: None, + }); + } + + // Lock as much as possible in the public_cap, and the rest in the extra_cap + let lock_in_public_cap = public_cap - total_locked_tokens; + let lock_in_known_users_cap = amount_to_lock - lock_in_public_cap; + + // If there is still room in extra_cap, then check if this + // is a user that should be allowed to use the extra_cap. + if !can_user_lock_in_extra_cap( + deps, + constants, + current_round, + sender, + lock_in_known_users_cap, + )? { + return lock_limit_reached_error; + } + + return Ok(LockingInfo { + lock_in_public_cap: Some(lock_in_public_cap), + lock_in_known_users_cap: Some(lock_in_known_users_cap), + }); + } + + // If we got through here, it means that constants.current_users_extra_cap > 0. + // If constants.current_users_extra_cap was 0 and public_cap (equal to total_cap) + // is equal to total_locked_tokens, then we would error out at the start of this + // function, because any amount_to_lock would exceed the total_cap. This is just + // a safety check to make the code resilient to future changes. + if constants.current_users_extra_cap == 0 { + return lock_limit_reached_error; + } + + // If there is still room in extra_cap, then check if this + // is a user that should be allowed to use the extra_cap. + if !can_user_lock_in_extra_cap(deps, constants, current_round, sender, amount_to_lock)? { + return lock_limit_reached_error; + } + + Ok(LockingInfo { + lock_in_public_cap: None, + lock_in_known_users_cap: Some(amount_to_lock), + }) +} + +fn can_user_lock_in_extra_cap( + deps: &DepsMut, + constants: &Constants, + current_round: u64, + sender: &Addr, + amount_to_lock: u128, +) -> Result { + let extra_locked_tokens_round_total = EXTRA_LOCKED_TOKENS_ROUND_TOTAL + .may_load(deps.storage, current_round)? + .unwrap_or(0); + + if extra_locked_tokens_round_total + amount_to_lock > constants.current_users_extra_cap { + return Ok(false); + } + + // Determine if user has the right to lock in extra_cap by looking at its voting power in previous round. + // If we are in round 0 then check the current round. + let round_to_check = match current_round { + 0 => current_round, + _ => current_round - 1, + }; + + // Calculate user's voting power share in the total voting power + let users_voting_power = Decimal::from_ratio( + get_user_voting_power_for_past_round( + &deps.as_ref(), + constants, + sender.clone(), + round_to_check, + )?, + Uint128::one(), + ); + let total_voting_power = get_total_power_for_round(deps.as_ref(), round_to_check)?; + + // Prevent division by zero or break early in case user had no voting power in previous round. + if total_voting_power == Decimal::zero() || users_voting_power == Decimal::zero() { + return Ok(false); + } + + let users_voting_power_share = users_voting_power.checked_div(total_voting_power)?; + + // Calculate what would be users share of extra locked tokens in the maximum allowed extra locked tokens + let users_current_extra_lock = EXTRA_LOCKED_TOKENS_CURRENT_USERS + .may_load(deps.storage, (current_round, sender.clone()))? + .unwrap_or(0); + + let users_extra_lock = + Decimal::from_ratio(users_current_extra_lock + amount_to_lock, Uint128::one()); + let maximum_extra_lock = Decimal::from_ratio(constants.current_users_extra_cap, Uint128::one()); + let users_extra_lock_share = users_extra_lock.checked_div(maximum_extra_lock)?; + + // If users share in maximum allowed extra cap would be greater than its share in + // total voting power, then don't allow this user to lock the given amount of tokens. + if users_extra_lock_share > users_voting_power_share { + return Ok(false); + } + + Ok(true) +} + +// Whenever a users locks more tokens this function will update the necessary stores, +// depending on the amounts that user locked in public_cap and extra_cap. +// Stores that will (potentially) be updated: +// LOCKED_TOKENS, EXTRA_LOCKED_TOKENS_ROUND_TOTAL, EXTRA_LOCKED_TOKENS_CURRENT_USERS +pub fn update_locked_tokens_info( + deps: &mut DepsMut, + current_round: u64, + sender: &Addr, + mut total_locked_tokens: u128, + locking_info: LockingInfo, +) -> Result<(), ContractError> { + if let Some(lock_in_public_cap) = locking_info.lock_in_public_cap { + total_locked_tokens += lock_in_public_cap; + LOCKED_TOKENS.save(deps.storage, &total_locked_tokens)?; + } + + if let Some(lock_in_known_users_cap) = locking_info.lock_in_known_users_cap { + LOCKED_TOKENS.save( + deps.storage, + &(total_locked_tokens + lock_in_known_users_cap), + )?; + + EXTRA_LOCKED_TOKENS_ROUND_TOTAL.update( + deps.storage, + current_round, + |current_value| -> StdResult { + match current_value { + None => Ok(lock_in_known_users_cap), + Some(current_value) => Ok(current_value + lock_in_known_users_cap), + } + }, + )?; + + EXTRA_LOCKED_TOKENS_CURRENT_USERS.update( + deps.storage, + (current_round, sender.clone()), + |current_value| -> StdResult { + match current_value { + None => Ok(lock_in_known_users_cap), + Some(current_value) => Ok(current_value + lock_in_known_users_cap), + } + }, + )?; + } + + Ok(()) +} + +// Calls other functions that will update various stores whenever a transaction is executed against the contract. +pub fn run_on_each_transaction( + storage: &mut dyn Storage, + env: &Env, + round_id: u64, +) -> StdResult<()> { + initialize_validator_store(storage, round_id)?; + update_round_to_height_maps(storage, env, round_id) +} + +// Updates round_id -> height_range and block_height -> round_id maps, for later use. +pub fn update_round_to_height_maps( + storage: &mut dyn Storage, + env: &Env, + round_id: u64, +) -> StdResult<()> { + ROUND_TO_HEIGHT_RANGE.update( + storage, + round_id, + |height_range| -> Result { + match height_range { + None => Ok(HeightRange { + lowest_known_height: env.block.height, + highest_known_height: env.block.height, + }), + Some(mut height_range) => { + height_range.highest_known_height = env.block.height; + + Ok(height_range) + } + } + }, + )?; + + HEIGHT_TO_ROUND.save(storage, env.block.height, &round_id) +} + +pub struct LockingInfo { + pub lock_in_public_cap: Option, + pub lock_in_known_users_cap: Option, +} diff --git a/contracts/hydro/src/validators_icqs.rs b/contracts/hydro/src/validators_icqs.rs index 05766bc..258e3c9 100644 --- a/contracts/hydro/src/validators_icqs.rs +++ b/contracts/hydro/src/validators_icqs.rs @@ -20,11 +20,12 @@ use serde::{Deserialize, Serialize}; use crate::{ contract::{compute_current_round_id, NATIVE_TOKEN_DENOM}, error::ContractError, - lsm_integration::{initialize_validator_store, update_scores_due_to_power_ratio_change}, + lsm_integration::update_stores_due_to_power_ratio_change, state::{ - Constants, ValidatorInfo, CONSTANTS, QUERY_ID_TO_VALIDATOR, VALIDATORS_INFO, - VALIDATORS_PER_ROUND, VALIDATOR_TO_QUERY_ID, + Constants, ValidatorInfo, QUERY_ID_TO_VALIDATOR, VALIDATORS_INFO, VALIDATORS_PER_ROUND, + VALIDATOR_TO_QUERY_ID, }, + utils::{load_current_constants, run_on_each_transaction}, }; // A multiplier to normalize shares, such that when a validator has just been created @@ -121,9 +122,9 @@ pub fn handle_delivered_interchain_query_result( ); } }; - let constants = CONSTANTS.load(deps.storage)?; + let constants = load_current_constants(&deps.as_ref(), &env)?; let current_round = compute_current_round_id(&env, &constants)?; - initialize_validator_store(deps.storage, current_round)?; + run_on_each_transaction(deps.storage, &env, current_round)?; let validator_address = validator.operator_address.clone(); let new_tokens = Uint128::from_str(&validator.tokens)?; @@ -140,6 +141,7 @@ pub fn handle_delivered_interchain_query_result( Some(validator_info) => { top_n_validator_update( &mut deps, + &env, current_round, validator_info, new_tokens, @@ -154,7 +156,7 @@ pub fn handle_delivered_interchain_query_result( match get_last_validator(&mut deps, current_round, &constants) { None => { // if there are currently less than top N validators, add this one to the top N - top_n_validator_add(&mut deps, current_round, validator_info)?; + top_n_validator_add(&mut deps, &env, current_round, validator_info)?; } Some(last_validator) => { // there are top N validators already, so check if the new one has more @@ -164,8 +166,13 @@ pub fn handle_delivered_interchain_query_result( let other_validator_info = VALIDATORS_INFO .load(deps.storage, (current_round, last_validator.1.clone()))?; - top_n_validator_remove(&mut deps, current_round, other_validator_info)?; - top_n_validator_add(&mut deps, current_round, validator_info)?; + top_n_validator_remove( + &mut deps, + &env, + current_round, + other_validator_info, + )?; + top_n_validator_add(&mut deps, &env, current_round, validator_info)?; // remove ICQ of the validator that was dropped from the top N let last_validator_query_id = @@ -187,15 +194,15 @@ pub fn handle_delivered_interchain_query_result( fn top_n_validator_add( deps: &mut DepsMut, + env: &Env, current_round: u64, validator_info: ValidatorInfo, ) -> Result<(), NeutronError> { // this call only makes difference if some validator was in the top N, // then was droped out, and then got back in the top N again - - // update the power ratio for the validator in the scores of proposals - update_scores_due_to_power_ratio_change( + update_stores_due_to_power_ratio_change( deps.storage, + env.block.height, &validator_info.address.clone(), current_round, Decimal::zero(), @@ -220,6 +227,7 @@ fn top_n_validator_add( fn top_n_validator_update( deps: &mut DepsMut, + env: &Env, current_round: u64, mut validator_info: ValidatorInfo, new_tokens: Uint128, @@ -250,8 +258,9 @@ fn top_n_validator_update( } if validator_info.power_ratio != new_power_ratio { - update_scores_due_to_power_ratio_change( + update_stores_due_to_power_ratio_change( deps.storage, + env.block.height, &validator_info.address.clone(), current_round, validator_info.power_ratio, @@ -275,11 +284,13 @@ fn top_n_validator_update( fn top_n_validator_remove( deps: &mut DepsMut, + env: &Env, current_round: u64, validator_info: ValidatorInfo, ) -> Result<(), NeutronError> { - update_scores_due_to_power_ratio_change( + update_stores_due_to_power_ratio_change( deps.storage, + env.block.height, &validator_info.address.clone(), current_round, validator_info.power_ratio,