hexsha
stringlengths
40
40
size
int64
2
1.05M
content
stringlengths
2
1.05M
avg_line_length
float64
1.33
100
max_line_length
int64
1
1k
alphanum_fraction
float64
0.25
1
6a47424a0f386c502f24ea473918d5fc4db01cf3
17,653
use std::collections::{HashMap, HashSet}; use std::env; use std::fs; use std::path::{Path, PathBuf}; use std::str::FromStr; use config::{Config, File}; use serde::{Deserialize, Serialize}; use serde_with::rust::string_empty_as_none; use crate::commands::{validate_worker_name, DEFAULT_CONFIG_PATH}; use crate::settings::toml::deploy_config::{DeployConfig, RouteConfig}; use crate::settings::toml::dev::Dev; use crate::settings::toml::environment::Environment; use crate::settings::toml::kv_namespace::{ConfigKvNamespace, KvNamespace}; use crate::settings::toml::site::Site; use crate::settings::toml::target_type::TargetType; use crate::settings::toml::Target; use crate::terminal::{emoji, message, styles}; #[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)] pub struct Manifest { #[serde(default)] pub name: String, #[serde(rename = "type")] pub target_type: TargetType, #[serde(default)] pub account_id: String, pub workers_dev: Option<bool>, #[serde(default, with = "string_empty_as_none")] pub route: Option<String>, pub routes: Option<Vec<String>>, #[serde(default, with = "string_empty_as_none")] pub zone_id: Option<String>, pub webpack_config: Option<String>, pub private: Option<bool>, // TODO: maybe one day, serde toml support will allow us to serialize sites // as a TOML inline table (this would prevent confusion with environments too!) pub site: Option<Site>, pub dev: Option<Dev>, #[serde(alias = "kv-namespaces")] pub kv_namespaces: Option<Vec<ConfigKvNamespace>>, pub env: Option<HashMap<String, Environment>>, pub vars: Option<HashMap<String, String>>, } impl Manifest { pub fn new(config_path: &Path) -> Result<Self, failure::Error> { let file_name = config_path.file_name().unwrap().to_str().unwrap(); let mut message = format!("{} not found", file_name); if config_path.to_str().unwrap() == DEFAULT_CONFIG_PATH { message.push_str("; run `wrangler init` to create one."); } failure::ensure!(config_path.exists(), message); let config = read_config(config_path)?; let manifest: Manifest = match config.try_into() { Ok(m) => m, Err(e) => { if e.to_string().contains("unknown field `kv-namespaces`") { failure::bail!("kv-namespaces should not live under the [site] table in your configuration file; please move it above [site].") } else { failure::bail!(e) } } }; check_for_duplicate_names(&manifest)?; Ok(manifest) } pub fn generate( name: String, target_type: Option<TargetType>, config_path: &PathBuf, site: Option<Site>, ) -> Result<Manifest, failure::Error> { let config_file = config_path.join("wrangler.toml"); let template_config_content = fs::read_to_string(&config_file); let template_config = match &template_config_content { Ok(content) => { let config: Manifest = toml::from_str(content)?; config.warn_on_account_info(); if let Some(target_type) = &target_type { if config.target_type != *target_type { message::warn(&format!("The template recommends the \"{}\" type. Using type \"{}\" may cause errors, we recommend changing the type field in wrangler.toml to \"{}\"", config.target_type, target_type, config.target_type)); } } Ok(config) } Err(err) => Err(err), }; let mut template_config = match template_config { Ok(config) => config, Err(err) => { log::info!("Error parsing template {}", err); log::debug!("template content {:?}", template_config_content); Manifest::default() } }; let default_workers_dev = match &template_config.route { Some(route) => { if route.is_empty() { Some(true) } else { None } } None => Some(true), }; template_config.name = name; template_config.workers_dev = default_workers_dev; if let Some(target_type) = &target_type { template_config.target_type = target_type.clone(); } if let Some(arg_site) = site { if template_config.site.is_none() { template_config.site = Some(arg_site); } } // TODO: https://github.com/cloudflare/wrangler/issues/773 let toml = toml::to_string(&template_config)?; log::info!("Writing a wrangler.toml file at {}", config_file.display()); fs::write(&config_file, &toml)?; Ok(template_config) } pub fn worker_name(&self, env_arg: Option<&str>) -> String { if let Some(environment) = self.get_environment(env_arg).unwrap_or_default() { if let Some(name) = &environment.name { return name.clone(); } if let Some(env) = env_arg { return format!("{}-{}", self.name, env); } } self.name.clone() } fn route_config(&self) -> RouteConfig { RouteConfig { account_id: Some(self.account_id.clone()), workers_dev: self.workers_dev, route: self.route.clone(), routes: self.routes.clone(), zone_id: self.zone_id.clone(), } } pub fn deploy_config(&self, env: Option<&str>) -> Result<DeployConfig, failure::Error> { let script = self.worker_name(env); validate_worker_name(&script)?; if let Some(environment) = self.get_environment(env)? { // if there is an environment level deploy target, try to return that if let Some(env_route_config) = environment.route_config(self.account_id.clone(), self.zone_id.clone()) { DeployConfig::build(&script, &env_route_config) } else { // If the top level config is Zoned, the user needs to specify new route config let top_level_config = DeployConfig::build(&script, &self.route_config())?; match top_level_config { DeployConfig::Zoned(_) => failure::bail!( "you must specify route(s) per environment for zoned deploys." ), DeployConfig::Zoneless(_) => Ok(top_level_config), } } } else { DeployConfig::build(&script, &self.route_config()) } } pub fn get_account_id(&self, environment_name: Option<&str>) -> Result<String, failure::Error> { let environment = self.get_environment(environment_name)?; let mut result = self.account_id.to_string(); if let Some(environment) = environment { if let Some(account_id) = &environment.account_id { result = account_id.to_string(); } } if result.is_empty() { let mut msg = "Your configuration file is missing an account_id field".to_string(); if let Some(environment_name) = environment_name { msg.push_str(&format!(" in [env.{}]", environment_name)); } failure::bail!("{}", &msg) } else { Ok(result) } } pub fn get_target( &self, environment_name: Option<&str>, preview: bool, ) -> Result<Target, failure::Error> { // Site projects are always webpack for now; don't let toml override this. let target_type = match self.site { Some(_) => TargetType::Webpack, None => self.target_type.clone(), }; let mut target = Target { target_type, // MUST inherit account_id: self.account_id.clone(), // MAY inherit webpack_config: self.webpack_config.clone(), // MAY inherit // importantly, the top level name will be modified // to include the name of the environment name: self.name.clone(), // MAY inherit kv_namespaces: get_namespaces(self.kv_namespaces.clone(), preview)?, // MUST NOT inherit site: self.site.clone(), // MUST NOT inherit vars: self.vars.clone(), // MAY inherit, }; let environment = self.get_environment(environment_name)?; if let Some(environment) = environment { target.name = self.worker_name(environment_name); if let Some(account_id) = &environment.account_id { target.account_id = account_id.clone(); } if let Some(webpack_config) = &environment.webpack_config { target.webpack_config = Some(webpack_config.clone()); } // don't inherit kv namespaces because it is an anti-pattern to use the same namespaces across multiple environments target.kv_namespaces = get_namespaces(environment.kv_namespaces.clone(), preview)?; // don't inherit vars target.vars = environment.vars.clone(); } Ok(target) } pub fn get_environment( &self, environment_name: Option<&str>, ) -> Result<Option<&Environment>, failure::Error> { // check for user-specified environment name if let Some(environment_name) = environment_name { if let Some(environment_table) = &self.env { if let Some(environment) = environment_table.get(environment_name) { Ok(Some(environment)) } else { failure::bail!(format!( "{} Could not find environment with name \"{}\"", emoji::WARN, environment_name )) } } else { failure::bail!(format!( "{} There are no environments specified in your configuration file", emoji::WARN )) } } else { Ok(None) } } fn warn_on_account_info(&self) { let account_id_env = env::var("CF_ACCOUNT_ID").is_ok(); let zone_id_env = env::var("CF_ZONE_ID").is_ok(); let mut top_level_fields: Vec<String> = Vec::new(); if !account_id_env { top_level_fields.push("account_id".to_string()); } if let Some(kv_namespaces) = &self.kv_namespaces { for kv_namespace in kv_namespaces { top_level_fields.push(format!( "kv-namespace {} needs a namespace_id", kv_namespace.binding )); } } if let Some(route) = &self.route { if !route.is_empty() { top_level_fields.push("route".to_string()); } } if let Some(zone_id) = &self.zone_id { if !zone_id.is_empty() && !zone_id_env { top_level_fields.push("zone_id".to_string()); } } let mut env_fields: HashMap<String, Vec<String>> = HashMap::new(); if let Some(env) = &self.env { for (env_name, env) in env { let mut current_env_fields: Vec<String> = Vec::new(); if env.account_id.is_some() && !account_id_env { current_env_fields.push("account_id".to_string()); } if let Some(kv_namespaces) = &env.kv_namespaces { for kv_namespace in kv_namespaces { current_env_fields.push(format!( "kv-namespace {} needs a namespace_id", kv_namespace.binding )); } } if let Some(route) = &env.route { if !route.is_empty() { current_env_fields.push("route".to_string()); } } if let Some(zone_id) = &env.zone_id { if !zone_id.is_empty() && !zone_id_env { current_env_fields.push("zone_id".to_string()); } } if !current_env_fields.is_empty() { env_fields.insert(env_name.to_string(), current_env_fields); } } } let has_top_level_fields = !top_level_fields.is_empty(); let has_env_fields = !env_fields.is_empty(); let mut needs_new_line = false; if has_top_level_fields || has_env_fields { let toml_msg = styles::highlight("wrangler.toml"); let account_id_msg = styles::highlight("account_id"); let zone_id_msg = styles::highlight("zone_id"); let dash_url = styles::url("https://dash.cloudflare.com"); message::help( &format!("You will need to update the following fields in the created {} file before continuing:", toml_msg) ); message::help(&format!( "You can find your {} in the right sidebar of your account's Workers page, and {} in the right sidebar of a zone's overview tab at {}", account_id_msg, zone_id_msg, dash_url )); if has_top_level_fields { needs_new_line = true; for top_level_field in top_level_fields { println!("- {}", top_level_field); } } if has_env_fields { for (env_name, env_fields) in env_fields { if needs_new_line { println!(); } println!("[env.{}]", env_name); needs_new_line = true; for env_field in env_fields { println!(" - {}", env_field); } } } } } } impl FromStr for Manifest { type Err = toml::de::Error; fn from_str(serialized_toml: &str) -> Result<Self, Self::Err> { toml::from_str(serialized_toml) } } fn read_config(config_path: &Path) -> Result<Config, failure::Error> { let mut config = Config::new(); let config_str = config_path .to_str() .expect("project config path should be a string"); config.merge(File::with_name(config_str))?; // Eg.. `CF_ACCOUNT_AUTH_KEY=farts` would set the `account_auth_key` key config.merge(config::Environment::with_prefix("CF"))?; Ok(config) } fn check_for_duplicate_names(manifest: &Manifest) -> Result<(), failure::Error> { let mut names: HashSet<String> = HashSet::new(); let mut duplicate_names: HashSet<String> = HashSet::new(); names.insert(manifest.name.to_string()); if let Some(environments) = &manifest.env { for (_, environment) in environments.iter() { if let Some(name) = &environment.name { if names.contains(name) && !duplicate_names.contains(name) { duplicate_names.insert(name.to_string()); } else { names.insert(name.to_string()); } } } } let duplicate_name_string = duplicate_names .clone() .into_iter() .collect::<Vec<String>>() .join(", "); let duplicate_message = match duplicate_names.len() { 1 => Some("this name is duplicated".to_string()), n if n >= 2 => Some("these names are duplicated".to_string()), _ => None, }; if let Some(message) = duplicate_message { failure::bail!(format!( "{} Each name in your configuration file must be unique, {}: {}", emoji::WARN, message, duplicate_name_string )) } Ok(()) } fn get_namespaces( kv_namespaces: Option<Vec<ConfigKvNamespace>>, preview: bool, ) -> Result<Vec<KvNamespace>, failure::Error> { if let Some(namespaces) = kv_namespaces { namespaces.into_iter().map(|ns| { if preview { if let Some(preview_id) = &ns.preview_id { if let Some(id) = &ns.id { if preview_id == id { message::warn("Specifying the same KV namespace ID for both preview and production sessions may cause bugs in your production worker! Proceed with caution."); } } Ok(KvNamespace { id: preview_id.to_string(), binding: ns.binding.to_string(), }) } else { failure::bail!("In order to preview a worker with KV namespaces, you must designate a preview_id in your configuration file for each KV namespace you'd like to preview.") } } else if let Some(id) = &ns.id { Ok(KvNamespace { id: id.to_string(), binding: ns.binding, }) } else { failure::bail!("You must specify the namespace ID in the id field for the namespace with binding \"{}\"", &ns.binding) } }).collect() } else { Ok(Vec::new()) } }
38.459695
245
0.542174
ac8ada025f3dbc423d8a5b3f4a16a6afe4f270a3
2,949
#[doc = "Register `DATA` reader"] pub struct R(crate::R<DATA_SPEC>); impl core::ops::Deref for R { type Target = crate::R<DATA_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl From<crate::R<DATA_SPEC>> for R { #[inline(always)] fn from(reader: crate::R<DATA_SPEC>) -> Self { R(reader) } } #[doc = "Register `DATA` writer"] pub struct W(crate::W<DATA_SPEC>); impl core::ops::Deref for W { type Target = crate::W<DATA_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::ops::DerefMut for W { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl From<crate::W<DATA_SPEC>> for W { #[inline(always)] fn from(writer: crate::W<DATA_SPEC>) -> Self { W(writer) } } #[doc = "Field `DATA` reader - Data Value"] pub struct DATA_R(crate::FieldReader<u32, u32>); impl DATA_R { pub(crate) fn new(bits: u32) -> Self { DATA_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for DATA_R { type Target = crate::FieldReader<u32, u32>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `DATA` writer - Data Value"] pub struct DATA_W<'a> { w: &'a mut W, } impl<'a> DATA_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u32) -> &'a mut W { self.w.bits = (self.w.bits & !0xffff_ffff) | (value as u32 & 0xffff_ffff); self.w } } impl R { #[doc = "Bits 0:31 - Data Value"] #[inline(always)] pub fn data(&self) -> DATA_R { DATA_R::new((self.bits & 0xffff_ffff) as u32) } } impl W { #[doc = "Bits 0:31 - Data Value"] #[inline(always)] pub fn data(&mut self) -> DATA_W { DATA_W { w: self } } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.0.bits(bits); self } } #[doc = "SPIM Data\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [data](index.html) module"] pub struct DATA_SPEC; impl crate::RegisterSpec for DATA_SPEC { type Ux = u32; } #[doc = "`read()` method returns [data::R](R) reader structure"] impl crate::Readable for DATA_SPEC { type Reader = R; } #[doc = "`write(|w| ..)` method takes [data::W](W) writer structure"] impl crate::Writable for DATA_SPEC { type Writer = W; } #[doc = "`reset()` method sets DATA to value 0"] impl crate::Resettable for DATA_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0 } }
28.631068
394
0.589352
d9d8c35841c1c6615e0e0f10a2565ab746b274d4
107,821
use crate::matching::{OrderType, Side}; use crate::state::{AssetType, INFO_LEN}; use crate::state::{TriggerCondition, MAX_PAIRS}; use arrayref::{array_ref, array_refs}; use fixed::types::I80F48; use num_enum::TryFromPrimitive; use serde::{Deserialize, Serialize}; use solana_program::instruction::{AccountMeta, Instruction}; use solana_program::program_error::ProgramError; use solana_program::pubkey::Pubkey; use std::convert::{TryFrom, TryInto}; use std::num::NonZeroU64; #[repr(C)] #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum MangoInstruction { /// Initialize a group of lending pools that can be cross margined /// /// Accounts expected by this instruction (12): /// /// 0. `[writable]` mango_group_ai /// 1. `[]` signer_ai /// 2. `[]` admin_ai /// 3. `[]` quote_mint_ai /// 4. `[]` quote_vault_ai /// 5. `[writable]` quote_node_bank_ai /// 6. `[writable]` quote_root_bank_ai /// 7. `[]` dao_vault_ai - aka insurance fund /// 8. `[]` msrm_vault_ai - msrm deposits for fee discounts; can be Pubkey::default() /// 9. `[]` fees_vault_ai - vault owned by Mango DAO token governance to receive fees /// 10. `[writable]` mango_cache_ai - Account to cache prices, root banks, and perp markets /// 11. `[]` dex_prog_ai InitMangoGroup { signer_nonce: u64, valid_interval: u64, quote_optimal_util: I80F48, quote_optimal_rate: I80F48, quote_max_rate: I80F48, }, /// DEPRECATED Initialize a mango account for a user /// Accounts created with this function cannot be closed without upgrading with UpgradeMangoAccountV0V1 /// /// Accounts expected by this instruction (3): /// /// 0. `[]` mango_group_ai - MangoGroup that this mango account is for /// 1. `[writable]` mango_account_ai - the mango account data /// 2. `[signer]` owner_ai - Solana account of owner of the mango account InitMangoAccount, /// Deposit funds into mango account /// /// Accounts expected by this instruction (9): /// /// 0. `[]` mango_group_ai - MangoGroup that this mango account is for /// 1. `[writable]` mango_account_ai - the mango account for this user /// 2. `[signer]` owner_ai - Solana account of owner of the mango account /// 3. `[]` mango_cache_ai - MangoCache /// 4. `[]` root_bank_ai - RootBank owned by MangoGroup /// 5. `[writable]` node_bank_ai - NodeBank owned by RootBank /// 6. `[writable]` vault_ai - TokenAccount owned by MangoGroup /// 7. `[]` token_prog_ai - acc pointed to by SPL token program id /// 8. `[writable]` owner_token_account_ai - TokenAccount owned by user which will be sending the funds Deposit { quantity: u64, }, /// Withdraw funds that were deposited earlier. /// /// Accounts expected by this instruction (10): /// /// 0. `[read]` mango_group_ai, - /// 1. `[write]` mango_account_ai, - /// 2. `[read]` owner_ai, - /// 3. `[read]` mango_cache_ai, - /// 4. `[read]` root_bank_ai, - /// 5. `[write]` node_bank_ai, - /// 6. `[write]` vault_ai, - /// 7. `[write]` token_account_ai, - /// 8. `[read]` signer_ai, - /// 9. `[read]` token_prog_ai, - /// 10..+ `[]` open_orders_accs - open orders for each of the spot market Withdraw { quantity: u64, allow_borrow: bool, }, /// Add a token to a mango group /// /// Accounts expected by this instruction (8): /// /// 0. `[writable]` mango_group_ai /// 1 `[]` oracle_ai /// 2. `[]` spot_market_ai /// 3. `[]` dex_program_ai /// 4. `[]` mint_ai /// 5. `[writable]` node_bank_ai /// 6. `[]` vault_ai /// 7. `[writable]` root_bank_ai /// 8. `[signer]` admin_ai AddSpotMarket { maint_leverage: I80F48, init_leverage: I80F48, liquidation_fee: I80F48, optimal_util: I80F48, optimal_rate: I80F48, max_rate: I80F48, }, /// DEPRECATED AddToBasket { market_index: usize, }, /// DEPRECATED - use Withdraw with allow_borrow = true Borrow { quantity: u64, }, /// Cache prices /// /// Accounts expected: 3 + Oracles /// 0. `[]` mango_group_ai - /// 1. `[writable]` mango_cache_ai - /// 2+... `[]` oracle_ais - flux aggregator feed accounts CachePrices, /// DEPRECATED - caching of root banks now happens in update index /// Cache root banks /// /// Accounts expected: 2 + Root Banks /// 0. `[]` mango_group_ai /// 1. `[writable]` mango_cache_ai CacheRootBanks, /// Place an order on the Serum Dex using Mango account /// /// Accounts expected by this instruction (23 + MAX_PAIRS): /// 0. `[]` mango_group_ai - MangoGroup /// 1. `[writable]` mango_account_ai - the MangoAccount of owner /// 2. `[signer]` owner_ai - owner of MangoAccount /// 3. `[]` mango_cache_ai - MangoCache for this MangoGroup /// 4. `[]` dex_prog_ai - serum dex program id /// 5. `[writable]` spot_market_ai - serum dex MarketState account /// 6. `[writable]` bids_ai - bids account for serum dex market /// 7. `[writable]` asks_ai - asks account for serum dex market /// 8. `[writable]` dex_request_queue_ai - request queue for serum dex market /// 9. `[writable]` dex_event_queue_ai - event queue for serum dex market /// 10. `[writable]` dex_base_ai - base currency serum dex market vault /// 11. `[writable]` dex_quote_ai - quote currency serum dex market vault /// 12. `[]` base_root_bank_ai - root bank of base currency /// 13. `[writable]` base_node_bank_ai - node bank of base currency /// 14. `[writable]` base_vault_ai - vault of the basenode bank /// 15. `[]` quote_root_bank_ai - root bank of quote currency /// 16. `[writable]` quote_node_bank_ai - node bank of quote currency /// 17. `[writable]` quote_vault_ai - vault of the quote node bank /// 18. `[]` token_prog_ai - SPL token program id /// 19. `[]` signer_ai - signer key for this MangoGroup /// 20. `[]` rent_ai - rent sysvar var /// 21. `[]` dex_signer_key - signer for serum dex /// 22. `[]` msrm_or_srm_vault_ai - the msrm or srm vault in this MangoGroup. Can be zero key /// 23+ `[writable]` open_orders_ais - An array of MAX_PAIRS. Only OpenOrders of current market /// index needs to be writable. Only OpenOrders in_margin_basket needs to be correct; /// remaining open orders can just be Pubkey::default() (the zero key) PlaceSpotOrder { order: serum_dex::instruction::NewOrderInstructionV3, }, /// Add oracle /// /// Accounts expected: 3 /// 0. `[writable]` mango_group_ai - MangoGroup /// 1. `[writable]` oracle_ai - oracle /// 2. `[signer]` admin_ai - admin AddOracle, // = 10 /// Add a perp market to a mango group /// /// Accounts expected by this instruction (7): /// /// 0. `[writable]` mango_group_ai /// 1. `[]` oracle_ai /// 2. `[writable]` perp_market_ai /// 3. `[writable]` event_queue_ai /// 4. `[writable]` bids_ai /// 5. `[writable]` asks_ai /// 6. `[]` mngo_vault_ai - the vault from which liquidity incentives will be paid out for this market /// 7. `[signer]` admin_ai AddPerpMarket { maint_leverage: I80F48, init_leverage: I80F48, liquidation_fee: I80F48, maker_fee: I80F48, taker_fee: I80F48, base_lot_size: i64, quote_lot_size: i64, /// Starting rate for liquidity mining rate: I80F48, /// depth liquidity mining works for max_depth_bps: I80F48, /// target length in seconds of one period target_period_length: u64, /// amount MNGO rewarded per period mngo_per_period: u64, /// Optional: Exponent in the liquidity mining formula; default 2 exp: u8, }, /// Place an order on a perp market /// /// In case this order is matched, the corresponding order structs on both /// PerpAccounts (taker & maker) will be adjusted, and the position size /// will be adjusted w/o accounting for fees. /// In addition a FillEvent will be placed on the event queue. /// Through a subsequent invocation of ConsumeEvents the FillEvent can be /// executed and the perp account balances (base/quote) and fees will be /// paid from the quote position. Only at this point the position balance /// is 100% refelecting the trade. /// /// Accounts expected by this instruction (8 + `MAX_PAIRS` + (optional 1)): /// 0. `[]` mango_group_ai - MangoGroup /// 1. `[writable]` mango_account_ai - the MangoAccount of owner /// 2. `[signer]` owner_ai - owner of MangoAccount /// 3. `[]` mango_cache_ai - MangoCache for this MangoGroup /// 4. `[writable]` perp_market_ai /// 5. `[writable]` bids_ai - bids account for this PerpMarket /// 6. `[writable]` asks_ai - asks account for this PerpMarket /// 7. `[writable]` event_queue_ai - EventQueue for this PerpMarket /// 8..23 `[]` open_orders_ais - array of open orders accounts on this MangoAccount /// 23. `[writable]` referrer_mango_account_ai - optional, mango account of referrer PlacePerpOrder { price: i64, quantity: i64, client_order_id: u64, side: Side, /// Can be 0 -> LIMIT, 1 -> IOC, 2 -> PostOnly, 3 -> Market, 4 -> PostOnlySlide order_type: OrderType, /// Optional to be backward compatible; default false reduce_only: bool, }, CancelPerpOrderByClientId { client_order_id: u64, invalid_id_ok: bool, }, CancelPerpOrder { order_id: i128, invalid_id_ok: bool, }, ConsumeEvents { limit: usize, }, /// Cache perp markets /// /// Accounts expected: 2 + Perp Markets /// 0. `[]` mango_group_ai /// 1. `[writable]` mango_cache_ai CachePerpMarkets, /// Update funding related variables UpdateFunding, /// Can only be used on a stub oracle in devnet SetOracle { price: I80F48, }, /// Settle all funds from serum dex open orders /// /// Accounts expected by this instruction (18): /// /// 0. `[]` mango_group_ai - MangoGroup that this mango account is for /// 1. `[]` mango_cache_ai - MangoCache for this MangoGroup /// 2. `[signer]` owner_ai - MangoAccount owner /// 3. `[writable]` mango_account_ai - MangoAccount /// 4. `[]` dex_prog_ai - program id of serum dex /// 5. `[writable]` spot_market_ai - dex MarketState account /// 6. `[writable]` open_orders_ai - open orders for this market for this MangoAccount /// 7. `[]` signer_ai - MangoGroup signer key /// 8. `[writable]` dex_base_ai - base vault for dex MarketState /// 9. `[writable]` dex_quote_ai - quote vault for dex MarketState /// 10. `[]` base_root_bank_ai - MangoGroup base vault acc /// 11. `[writable]` base_node_bank_ai - MangoGroup quote vault acc /// 12. `[]` quote_root_bank_ai - MangoGroup quote vault acc /// 13. `[writable]` quote_node_bank_ai - MangoGroup quote vault acc /// 14. `[writable]` base_vault_ai - MangoGroup base vault acc /// 15. `[writable]` quote_vault_ai - MangoGroup quote vault acc /// 16. `[]` dex_signer_ai - dex Market signer account /// 17. `[]` spl token program SettleFunds, /// Cancel an order using dex instruction /// /// Accounts expected by this instruction (): /// CancelSpotOrder { // 20 order: serum_dex::instruction::CancelOrderInstructionV2, }, /// Update a root bank's indexes by providing all it's node banks /// /// Accounts expected: 2 + Node Banks /// 0. `[]` mango_group_ai - MangoGroup /// 1. `[]` root_bank_ai - RootBank /// 2+... `[]` node_bank_ais - NodeBanks UpdateRootBank, /// Take two MangoAccounts and settle profits and losses between them for a perp market /// /// Accounts expected (6): SettlePnl { market_index: usize, }, /// DEPRECATED - no longer makes sense /// Use this token's position and deposit to reduce borrows /// /// Accounts expected by this instruction (5): SettleBorrow { token_index: usize, quantity: u64, }, /// Force cancellation of open orders for a user being liquidated /// /// Accounts expected: 19 + Liqee open orders accounts (MAX_PAIRS) /// 0. `[]` mango_group_ai - MangoGroup /// 1. `[]` mango_cache_ai - MangoCache /// 2. `[writable]` liqee_mango_account_ai - MangoAccount /// 3. `[]` base_root_bank_ai - RootBank /// 4. `[writable]` base_node_bank_ai - NodeBank /// 5. `[writable]` base_vault_ai - MangoGroup base vault acc /// 6. `[]` quote_root_bank_ai - RootBank /// 7. `[writable]` quote_node_bank_ai - NodeBank /// 8. `[writable]` quote_vault_ai - MangoGroup quote vault acc /// 9. `[writable]` spot_market_ai - SpotMarket /// 10. `[writable]` bids_ai - SpotMarket bids acc /// 11. `[writable]` asks_ai - SpotMarket asks acc /// 12. `[signer]` signer_ai - Signer /// 13. `[writable]` dex_event_queue_ai - Market event queue acc /// 14. `[writable]` dex_base_ai - /// 15. `[writable]` dex_quote_ai - /// 16. `[]` dex_signer_ai - /// 17. `[]` dex_prog_ai - Dex Program acc /// 18. `[]` token_prog_ai - Token Program acc /// 19+... `[]` liqee_open_orders_ais - Liqee open orders accs ForceCancelSpotOrders { limit: u8, }, /// Force cancellation of open orders for a user being liquidated /// /// Accounts expected: 6 + Liqee open orders accounts (MAX_PAIRS) /// 0. `[]` mango_group_ai - MangoGroup /// 1. `[]` mango_cache_ai - MangoCache /// 2. `[]` perp_market_ai - PerpMarket /// 3. `[writable]` bids_ai - Bids acc /// 4. `[writable]` asks_ai - Asks acc /// 5. `[writable]` liqee_mango_account_ai - Liqee MangoAccount /// 6+... `[]` liqor_open_orders_ais - Liqee open orders accs ForceCancelPerpOrders { limit: u8, }, /// Liquidator takes some of borrows at token at `liab_index` and receives some deposits from /// the token at `asset_index` /// /// Accounts expected: 9 + Liqee open orders accounts (MAX_PAIRS) + Liqor open orders accounts (MAX_PAIRS) /// 0. `[]` mango_group_ai - MangoGroup /// 1. `[]` mango_cache_ai - MangoCache /// 2. `[writable]` liqee_mango_account_ai - MangoAccount /// 3. `[writable]` liqor_mango_account_ai - MangoAccount /// 4. `[signer]` liqor_ai - Liqor Account /// 5. `[]` asset_root_bank_ai - RootBank /// 6. `[writable]` asset_node_bank_ai - NodeBank /// 7. `[]` liab_root_bank_ai - RootBank /// 8. `[writable]` liab_node_bank_ai - NodeBank /// 9+... `[]` liqee_open_orders_ais - Liqee open orders accs /// 9+MAX_PAIRS... `[]` liqor_open_orders_ais - Liqor open orders accs LiquidateTokenAndToken { max_liab_transfer: I80F48, }, /// Swap tokens for perp quote position if only and only if the base position in that market is 0 /// /// Accounts expected: 7 + Liqee open orders accounts (MAX_PAIRS) + Liqor open orders accounts (MAX_PAIRS) /// 0. `[]` mango_group_ai - MangoGroup /// 1. `[]` mango_cache_ai - MangoCache /// 2. `[writable]` liqee_mango_account_ai - MangoAccount /// 3. `[writable]` liqor_mango_account_ai - MangoAccount /// 4. `[signer]` liqor_ai - Liqor Account /// 5. `[]` root_bank_ai - RootBank /// 6. `[writable]` node_bank_ai - NodeBank /// 7+... `[]` liqee_open_orders_ais - Liqee open orders accs /// 7+MAX_PAIRS... `[]` liqor_open_orders_ais - Liqor open orders accs LiquidateTokenAndPerp { asset_type: AssetType, asset_index: usize, liab_type: AssetType, liab_index: usize, max_liab_transfer: I80F48, }, /// Reduce some of the base position in exchange for quote position in this market /// /// Accounts expected: 7 + Liqee open orders accounts (MAX_PAIRS) + Liqor open orders accounts (MAX_PAIRS) /// 0. `[]` mango_group_ai - MangoGroup /// 1. `[]` mango_cache_ai - MangoCache /// 2. `[writable]` perp_market_ai - PerpMarket /// 3. `[writable]` event_queue_ai - EventQueue /// 4. `[writable]` liqee_mango_account_ai - MangoAccount /// 5. `[writable]` liqor_mango_account_ai - MangoAccount /// 6. `[signer]` liqor_ai - Liqor Account /// 7+... `[]` liqee_open_orders_ais - Liqee open orders accs /// 7+MAX_PAIRS... `[]` liqor_open_orders_ais - Liqor open orders accs LiquidatePerpMarket { base_transfer_request: i64, }, /// Take an account that has losses in the selected perp market to account for fees_accrued /// /// Accounts expected: 10 /// 0. `[]` mango_group_ai - MangoGroup /// 1. `[]` mango_cache_ai - MangoCache /// 2. `[writable]` perp_market_ai - PerpMarket /// 3. `[writable]` mango_account_ai - MangoAccount /// 4. `[]` root_bank_ai - RootBank /// 5. `[writable]` node_bank_ai - NodeBank /// 6. `[writable]` bank_vault_ai - ? /// 7. `[writable]` fees_vault_ai - fee vault owned by mango DAO token governance /// 8. `[]` signer_ai - Group Signer Account /// 9. `[]` token_prog_ai - Token Program Account SettleFees, /// Claim insurance fund and then socialize loss /// /// Accounts expected: 12 + Liqor open orders accounts (MAX_PAIRS) /// 0. `[]` mango_group_ai - MangoGroup /// 1. `[writable]` mango_cache_ai - MangoCache /// 2. `[writable]` liqee_mango_account_ai - Liqee MangoAccount /// 3. `[writable]` liqor_mango_account_ai - Liqor MangoAccount /// 4. `[signer]` liqor_ai - Liqor Account /// 5. `[]` root_bank_ai - RootBank /// 6. `[writable]` node_bank_ai - NodeBank /// 7. `[writable]` vault_ai - ? /// 8. `[writable]` dao_vault_ai - DAO Vault /// 9. `[]` signer_ai - Group Signer Account /// 10. `[]` perp_market_ai - PerpMarket /// 11. `[]` token_prog_ai - Token Program Account /// 12+... `[]` liqor_open_orders_ais - Liqor open orders accs ResolvePerpBankruptcy { // 30 liab_index: usize, max_liab_transfer: I80F48, }, /// Claim insurance fund and then socialize loss /// /// Accounts expected: 13 + Liqor open orders accounts (MAX_PAIRS) + Liab node banks (MAX_NODE_BANKS) /// 0. `[]` mango_group_ai - MangoGroup /// 1. `[writable]` mango_cache_ai - MangoCache /// 2. `[writable]` liqee_mango_account_ai - Liqee MangoAccount /// 3. `[writable]` liqor_mango_account_ai - Liqor MangoAccount /// 4. `[signer]` liqor_ai - Liqor Account /// 5. `[]` quote_root_bank_ai - RootBank /// 6. `[writable]` quote_node_bank_ai - NodeBank /// 7. `[writable]` quote_vault_ai - ? /// 8. `[writable]` dao_vault_ai - DAO Vault /// 9. `[]` signer_ai - Group Signer Account /// 10. `[]` liab_root_bank_ai - RootBank /// 11. `[writable]` liab_node_bank_ai - NodeBank /// 12. `[]` token_prog_ai - Token Program Account /// 13+... `[]` liqor_open_orders_ais - Liqor open orders accs /// 14+MAX_PAIRS... `[]` liab_node_bank_ais - Lib token node banks ResolveTokenBankruptcy { max_liab_transfer: I80F48, }, /// Initialize open orders /// /// Accounts expected by this instruction (8): /// /// 0. `[]` mango_group_ai - MangoGroup that this mango account is for /// 1. `[writable]` mango_account_ai - MangoAccount /// 2. `[signer]` owner_ai - MangoAccount owner /// 3. `[]` dex_prog_ai - program id of serum dex /// 4. `[writable]` open_orders_ai - open orders for this market for this MangoAccount /// 5. `[]` spot_market_ai - dex MarketState account /// 6. `[]` signer_ai - Group Signer Account /// 7. `[]` rent_ai - Rent sysvar account InitSpotOpenOrders, /// Redeem the mngo_accrued in a PerpAccount for MNGO in MangoAccount deposits /// /// Accounts expected by this instruction (11): /// 0. `[]` mango_group_ai - MangoGroup that this mango account is for /// 1. `[]` mango_cache_ai - MangoCache /// 2. `[writable]` mango_account_ai - MangoAccount /// 3. `[signer]` owner_ai - MangoAccount owner /// 4. `[]` perp_market_ai - PerpMarket /// 5. `[writable]` mngo_perp_vault_ai /// 6. `[]` mngo_root_bank_ai /// 7. `[writable]` mngo_node_bank_ai /// 8. `[writable]` mngo_bank_vault_ai /// 9. `[]` signer_ai - Group Signer Account /// 10. `[]` token_prog_ai - SPL Token program id RedeemMngo, /// Add account info; useful for naming accounts /// /// Accounts expected by this instruction (3): /// 0. `[]` mango_group_ai - MangoGroup that this mango account is for /// 1. `[writable]` mango_account_ai - MangoAccount /// 2. `[signer]` owner_ai - MangoAccount owner AddMangoAccountInfo { info: [u8; INFO_LEN], }, /// Deposit MSRM to reduce fees. This MSRM is not at risk and is not used for any health calculations /// /// Accounts expected by this instruction (6): /// /// 0. `[]` mango_group_ai - MangoGroup that this mango account is for /// 1. `[writable]` mango_account_ai - MangoAccount /// 2. `[signer]` owner_ai - MangoAccount owner /// 3. `[writable]` msrm_account_ai - MSRM token account /// 4. `[writable]` msrm_vault_ai - MSRM vault owned by mango program /// 5. `[]` token_prog_ai - SPL Token program id DepositMsrm { quantity: u64, }, /// Withdraw the MSRM deposited /// /// Accounts expected by this instruction (7): /// /// 0. `[]` mango_group_ai - MangoGroup that this mango account is for /// 1. `[writable]` mango_account_ai - MangoAccount /// 2. `[signer]` owner_ai - MangoAccount owner /// 3. `[writable]` msrm_account_ai - MSRM token account /// 4. `[writable]` msrm_vault_ai - MSRM vault owned by mango program /// 5. `[]` signer_ai - signer key of the MangoGroup /// 6. `[]` token_prog_ai - SPL Token program id WithdrawMsrm { quantity: u64, }, /// Change the params for perp market. /// /// Accounts expected by this instruction (3): /// 0. `[writable]` mango_group_ai - MangoGroup /// 1. `[writable]` perp_market_ai - PerpMarket /// 2. `[signer]` admin_ai - MangoGroup admin ChangePerpMarketParams { #[serde(serialize_with = "serialize_option_fixed_width")] maint_leverage: Option<I80F48>, #[serde(serialize_with = "serialize_option_fixed_width")] init_leverage: Option<I80F48>, #[serde(serialize_with = "serialize_option_fixed_width")] liquidation_fee: Option<I80F48>, #[serde(serialize_with = "serialize_option_fixed_width")] maker_fee: Option<I80F48>, #[serde(serialize_with = "serialize_option_fixed_width")] taker_fee: Option<I80F48>, /// Starting rate for liquidity mining #[serde(serialize_with = "serialize_option_fixed_width")] rate: Option<I80F48>, /// depth liquidity mining works for #[serde(serialize_with = "serialize_option_fixed_width")] max_depth_bps: Option<I80F48>, /// target length in seconds of one period #[serde(serialize_with = "serialize_option_fixed_width")] target_period_length: Option<u64>, /// amount MNGO rewarded per period #[serde(serialize_with = "serialize_option_fixed_width")] mngo_per_period: Option<u64>, /// Optional: Exponent in the liquidity mining formula #[serde(serialize_with = "serialize_option_fixed_width")] exp: Option<u8>, }, /// Transfer admin permissions over group to another account /// /// Accounts expected by this instruction (3): /// 0. `[writable]` mango_group_ai - MangoGroup /// 1. `[]` new_admin_ai - New MangoGroup admin /// 2. `[signer]` admin_ai - MangoGroup admin SetGroupAdmin, /// Cancel all perp open orders (batch cancel) /// /// Accounts expected: 6 /// 0. `[]` mango_group_ai - MangoGroup /// 1. `[writable]` mango_account_ai - MangoAccount /// 2. `[signer]` owner_ai - Owner of Mango Account /// 3. `[writable]` perp_market_ai - PerpMarket /// 4. `[writable]` bids_ai - Bids acc /// 5. `[writable]` asks_ai - Asks acc CancelAllPerpOrders { limit: u8, }, /// DEPRECATED - No longer valid instruction as of release 3.0.5 /// Liqor takes on all the quote positions where base_position == 0 /// Equivalent amount of quote currency is credited/debited in deposits/borrows. /// This is very similar to the settle_pnl function, but is forced for Sick accounts /// /// Accounts expected: 7 + MAX_PAIRS /// 0. `[]` mango_group_ai - MangoGroup /// 1. `[]` mango_cache_ai - MangoCache /// 2. `[writable]` liqee_mango_account_ai - MangoAccount /// 3. `[writable]` liqor_mango_account_ai - MangoAccount /// 4. `[signer]` liqor_ai - Liqor Account /// 5. `[]` root_bank_ai - RootBank /// 6. `[writable]` node_bank_ai - NodeBank /// 7+... `[]` liqee_open_orders_ais - Liqee open orders accs ForceSettleQuotePositions, // instruction 40 /// Place an order on the Serum Dex using Mango account. Improved over PlaceSpotOrder /// by reducing the tx size PlaceSpotOrder2 { order: serum_dex::instruction::NewOrderInstructionV3, }, /// Initialize the advanced open orders account for a MangoAccount and set InitAdvancedOrders, /// Add a trigger order which executes if the trigger condition is met. /// 0. `[]` mango_group_ai - MangoGroup /// 1. `[]` mango_account_ai - the MangoAccount of owner /// 2. `[writable, signer]` owner_ai - owner of MangoAccount /// 3 `[writable]` advanced_orders_ai - the AdvanceOrdersAccount of owner /// 4. `[]` mango_cache_ai - MangoCache for this MangoGroup /// 5. `[]` perp_market_ai /// 6. `[]` system_prog_ai /// 7.. `[]` open_orders_ais - OpenOrders account for each serum dex market in margin basket AddPerpTriggerOrder { order_type: OrderType, side: Side, trigger_condition: TriggerCondition, reduce_only: bool, // only valid on perp order client_order_id: u64, price: i64, quantity: i64, trigger_price: I80F48, }, /// Remove the order at the order_index RemoveAdvancedOrder { order_index: u8, }, /// 0. `[]` mango_group_ai - MangoGroup /// 1. `[writable]` mango_account_ai - the MangoAccount of owner /// 2 `[writable]` advanced_orders_ai - the AdvanceOrdersAccount of owner /// 3. `[writable,signer]` agent_ai - operator of the execution service (receives lamports) /// 4. `[]` mango_cache_ai - MangoCache for this MangoGroup /// 5. `[writable]` perp_market_ai /// 6. `[writable]` bids_ai - bids account for this PerpMarket /// 7. `[writable]` asks_ai - asks account for this PerpMarket /// 8. `[writable]` event_queue_ai - EventQueue for this PerpMarket /// 9. `[] system_prog_ai ExecutePerpTriggerOrder { order_index: u8, }, /// Create the necessary PDAs for the perp market and initialize them and add to MangoGroup /// /// Accounts expected by this instruction (13): /// /// 0. `[writable]` mango_group_ai /// 1. `[]` oracle_ai /// 2. `[writable]` perp_market_ai /// 3. `[writable]` event_queue_ai /// 4. `[writable]` bids_ai /// 5. `[writable]` asks_ai /// 6. `[]` mngo_mint_ai - mngo token mint /// 7. `[writable]` mngo_vault_ai - the vault from which liquidity incentives will be paid out for this market /// 8. `[signer, writable]` admin_ai - writable if admin_ai is also funder /// 9. `[writable]` signer_ai - optionally writable if funder is signer_ai /// 10. `[]` system_prog_ai - system program /// 11. `[]` token_prog_ai - SPL token program /// 12. `[]` rent_ai - rent sysvar because SPL token program requires it CreatePerpMarket { maint_leverage: I80F48, init_leverage: I80F48, liquidation_fee: I80F48, maker_fee: I80F48, taker_fee: I80F48, base_lot_size: i64, quote_lot_size: i64, /// Starting rate for liquidity mining rate: I80F48, /// v0: depth in bps for liquidity mining; v1: depth in contract size max_depth_bps: I80F48, /// target length in seconds of one period target_period_length: u64, /// amount MNGO rewarded per period mngo_per_period: u64, exp: u8, version: u8, /// Helps with integer overflow lm_size_shift: u8, /// define base decimals in case spot market has not yet been listed base_decimals: u8, }, /// Change the params for perp market. /// /// Accounts expected by this instruction (3): /// 0. `[writable]` mango_group_ai - MangoGroup /// 1. `[writable]` perp_market_ai - PerpMarket /// 2. `[signer]` admin_ai - MangoGroup admin ChangePerpMarketParams2 { #[serde(serialize_with = "serialize_option_fixed_width")] maint_leverage: Option<I80F48>, #[serde(serialize_with = "serialize_option_fixed_width")] init_leverage: Option<I80F48>, #[serde(serialize_with = "serialize_option_fixed_width")] liquidation_fee: Option<I80F48>, #[serde(serialize_with = "serialize_option_fixed_width")] maker_fee: Option<I80F48>, #[serde(serialize_with = "serialize_option_fixed_width")] taker_fee: Option<I80F48>, /// Starting rate for liquidity mining #[serde(serialize_with = "serialize_option_fixed_width")] rate: Option<I80F48>, /// depth liquidity mining works for #[serde(serialize_with = "serialize_option_fixed_width")] max_depth_bps: Option<I80F48>, /// target length in seconds of one period #[serde(serialize_with = "serialize_option_fixed_width")] target_period_length: Option<u64>, /// amount MNGO rewarded per period #[serde(serialize_with = "serialize_option_fixed_width")] mngo_per_period: Option<u64>, #[serde(serialize_with = "serialize_option_fixed_width")] exp: Option<u8>, #[serde(serialize_with = "serialize_option_fixed_width")] version: Option<u8>, #[serde(serialize_with = "serialize_option_fixed_width")] lm_size_shift: Option<u8>, }, /// Change the params for perp market. /// /// Accounts expected by this instruction (2 + MAX_PAIRS): /// 0. `[]` mango_group_ai - MangoGroup /// 1. `[writable]` mango_account_ai - MangoAccount /// 2+ `[]` open_orders_ais - An array of MAX_PAIRS. Only OpenOrders of current market /// index needs to be writable. Only OpenOrders in_margin_basket needs to be correct; /// remaining open orders can just be Pubkey::default() (the zero key) UpdateMarginBasket, /// Change the maximum number of closeable MangoAccounts.v1 allowed /// /// Accounts expected by this instruction (2): /// /// 0. `[writable]` mango_group_ai - MangoGroup /// 1. `[signer]` admin_ai - Admin ChangeMaxMangoAccounts { max_mango_accounts: u32, }, /// Delete a mango account and return lamports /// /// Accounts expected by this instruction (3): /// /// 0. `[writable]` mango_group_ai - MangoGroup that this mango account is for /// 1. `[writable]` mango_account_ai - the mango account data /// 2. `[signer]` owner_ai - Solana account of owner of the mango account CloseMangoAccount, // instruction 50 /// Delete a spot open orders account and return lamports /// /// Accounts expected by this instruction (7): /// /// 0. `[]` mango_group_ai - MangoGroup that this mango account is for /// 1. `[writable]` mango_account_ai - the mango account data /// 2. `[signer, writable]` owner_ai - Solana account of owner of the mango account /// 3. `[]` dex_prog_ai - The serum dex program id /// 4. `[writable]` open_orders_ai - The open orders account to close /// 5. `[]` spot_market_ai - The spot market for the account /// 6. `[]` signer_ai - Mango group signer key CloseSpotOpenOrders, /// Delete an advanced orders account and return lamports /// /// Accounts expected by this instruction (4): /// /// 0. `[]` mango_group_ai - MangoGroup that this mango account is for /// 1. `[writable]` mango_account_ai - the mango account data /// 2. `[signer, writable]` owner_ai - Solana account of owner of the mango account /// 3. `[writable]` advanced_orders_ai - the advanced orders account CloseAdvancedOrders, /// Create a PDA Mango Account for collecting dust owned by a group /// /// Accounts expected by this instruction (4) /// 0. `[]` mango_group_ai - MangoGroup to create the dust account for /// 1. `[writable]` mango_account_ai - the mango account data /// 2. `[signer, writable]` signer_ai - Signer and fee payer account /// 3. `[writable]` system_prog_ai - System program CreateDustAccount, /// Transfer dust (< 1 native SPL token) assets and liabilities for a single token to the group's dust account /// /// Accounts expected by this instruction (7) /// /// 0. `[]` mango_group_ai - MangoGroup of the mango account /// 1. `[writable]` mango_account_ai - the mango account data /// 2. `[signer, writable]` owner_ai - Solana account of owner of the mango account /// 3. `[writable]` dust_account_ai - Dust Account for the group /// 4. `[]` root_bank_ai - The root bank for the token /// 5. `[writable]` node_bank_ai - A node bank for the token /// 6. `[]` mango_cache_ai - The cache for the group ResolveDust, /// Create a PDA mango account for a user /// /// Accounts expected by this instruction (5): /// /// 0. `[writable]` mango_group_ai - MangoGroup that this mango account is for /// 1. `[writable]` mango_account_ai - the mango account data /// 2. `[signer]` owner_ai - Solana account of owner of the mango account /// 3. `[]` system_prog_ai - System program /// 4. `[signer, writable]` payer_ai - pays for the PDA creation CreateMangoAccount { account_num: u64, }, /// Upgrade a V0 Mango Account to V1 allowing it to be closed /// /// Accounts expected by this instruction (3): /// /// 0. `[writable]` mango_group_ai - MangoGroup /// 1. `[writable]` mango_account_ai - MangoAccount /// 2. `[signer]` owner_ai - Solana account of owner of the mango account UpgradeMangoAccountV0V1, /// Cancel all perp open orders for one side of the book /// /// Accounts expected: 6 /// 0. `[]` mango_group_ai - MangoGroup /// 1. `[writable]` mango_account_ai - MangoAccount /// 2. `[signer]` owner_ai - Owner of Mango Account /// 3. `[writable]` perp_market_ai - PerpMarket /// 4. `[writable]` bids_ai - Bids acc /// 5. `[writable]` asks_ai - Asks acc CancelPerpOrdersSide { side: Side, limit: u8, }, /// https://github.com/blockworks-foundation/mango-v3/pull/97/ /// Set delegate authority to mango account which can do everything regular account can do /// except Withdraw and CloseMangoAccount. Set to Pubkey::default() to revoke delegate /// /// Accounts expected: 4 /// 0. `[]` mango_group_ai - MangoGroup /// 1. `[writable]` mango_account_ai - MangoAccount /// 2. `[signer]` owner_ai - Owner of Mango Account /// 3. `[]` delegate_ai - delegate SetDelegate, /// Change the params for a spot market. /// /// Accounts expected by this instruction (4): /// 0. `[writable]` mango_group_ai - MangoGroup /// 1. `[writable]` spot_market_ai - Market /// 2. `[writable]` root_bank_ai - RootBank /// 3. `[signer]` admin_ai - MangoGroup admin ChangeSpotMarketParams { #[serde(serialize_with = "serialize_option_fixed_width")] maint_leverage: Option<I80F48>, #[serde(serialize_with = "serialize_option_fixed_width")] init_leverage: Option<I80F48>, #[serde(serialize_with = "serialize_option_fixed_width")] liquidation_fee: Option<I80F48>, #[serde(serialize_with = "serialize_option_fixed_width")] optimal_util: Option<I80F48>, #[serde(serialize_with = "serialize_option_fixed_width")] optimal_rate: Option<I80F48>, #[serde(serialize_with = "serialize_option_fixed_width")] max_rate: Option<I80F48>, #[serde(serialize_with = "serialize_option_fixed_width")] version: Option<u8>, }, /// Create an OpenOrders PDA and initialize it with InitOpenOrders call to serum dex /// /// Accounts expected by this instruction (9): /// /// 0. `[]` mango_group_ai - MangoGroup that this mango account is for /// 1. `[writable]` mango_account_ai - MangoAccount /// 2. `[signer]` owner_ai - MangoAccount owner /// 3. `[]` dex_prog_ai - program id of serum dex /// 4. `[writable]` open_orders_ai - open orders PDA /// 5. `[]` spot_market_ai - dex MarketState account /// 6. `[]` signer_ai - Group Signer Account /// 7. `[]` system_prog_ai - System program /// 8. `[signer, writable]` payer_ai - pays for the PDA creation CreateSpotOpenOrders, // instruction 60 /// Set the `ref_surcharge_centibps`, `ref_share_centibps` and `ref_mngo_required` on `MangoGroup` /// /// Accounts expected by this instruction (2): /// 0. `[writable]` mango_group_ai - MangoGroup that this mango account is for /// 1. `[signer]` admin_ai - mango_group.admin ChangeReferralFeeParams { ref_surcharge_centibps: u32, ref_share_centibps: u32, ref_mngo_required: u64, }, /// Store the referrer's MangoAccount pubkey on the Referrer account /// It will create the Referrer account as a PDA of user's MangoAccount if it doesn't exist /// This is primarily useful for the UI; the referrer address stored here is not necessarily /// who earns the ref fees. /// /// Accounts expected by this instruction (7): /// /// 0. `[]` mango_group_ai - MangoGroup that this mango account is for /// 1. `[]` mango_account_ai - MangoAccount of the referred /// 2. `[signer]` owner_ai - MangoAccount owner or delegate /// 3. `[writable]` referrer_memory_ai - ReferrerMemory struct; will be initialized if required /// 4. `[]` referrer_mango_account_ai - referrer's MangoAccount /// 5. `[signer, writable]` payer_ai - payer for PDA; can be same as owner /// 6. `[]` system_prog_ai - System program SetReferrerMemory, /// Associate the referrer's MangoAccount with a human readable `referrer_id` which can be used /// in a ref link. This is primarily useful for the UI. /// Create the `ReferrerIdRecord` PDA; if it already exists throw error /// /// Accounts expected by this instruction (5): /// 0. `[]` mango_group_ai - MangoGroup /// 1. `[]` referrer_mango_account_ai - MangoAccount /// 2. `[writable]` referrer_id_record_ai - The PDA to store the record on /// 3. `[signer, writable]` payer_ai - payer for PDA; can be same as owner /// 4. `[]` system_prog_ai - System program RegisterReferrerId { referrer_id: [u8; INFO_LEN], }, /// Place an order on a perp market /// /// In case this order is matched, the corresponding order structs on both /// PerpAccounts (taker & maker) will be adjusted, and the position size /// will be adjusted w/o accounting for fees. /// In addition a FillEvent will be placed on the event queue. /// Through a subsequent invocation of ConsumeEvents the FillEvent can be /// executed and the perp account balances (base/quote) and fees will be /// paid from the quote position. Only at this point the position balance /// is 100% reflecting the trade. /// /// Accounts expected by this instruction (9 + `NUM_IN_MARGIN_BASKET`): /// 0. `[]` mango_group_ai - MangoGroup /// 1. `[writable]` mango_account_ai - the MangoAccount of owner /// 2. `[signer]` owner_ai - owner of MangoAccount /// 3. `[]` mango_cache_ai - MangoCache for this MangoGroup /// 4. `[writable]` perp_market_ai /// 5. `[writable]` bids_ai - bids account for this PerpMarket /// 6. `[writable]` asks_ai - asks account for this PerpMarket /// 7. `[writable]` event_queue_ai - EventQueue for this PerpMarket /// 8. `[writable]` referrer_mango_account_ai - referrer's mango account; /// pass in mango_account_ai as duplicate if you don't have a referrer /// 9..9 + NUM_IN_MARGIN_BASKET `[]` open_orders_ais - pass in open orders in margin basket PlacePerpOrder2 { /// Price in quote lots per base lots. /// /// Effect is based on order type, it's usually /// - fill orders on the book up to this price or /// - place an order on the book at this price. /// /// Ignored for Market orders and potentially adjusted for PostOnlySlide orders. price: i64, /// Max base lots to buy/sell. max_base_quantity: i64, /// Max quote lots to pay/receive (not taking fees into account). max_quote_quantity: i64, /// Arbitrary user-controlled order id. client_order_id: u64, /// Timestamp of when order expires /// /// Send 0 if you want the order to never expire. /// Timestamps in the past mean the instruction is skipped. /// Timestamps in the future are reduced to now + 255s. expiry_timestamp: u64, side: Side, /// Can be 0 -> LIMIT, 1 -> IOC, 2 -> PostOnly, 3 -> Market, 4 -> PostOnlySlide order_type: OrderType, reduce_only: bool, /// Maximum number of orders from the book to fill. /// /// Use this to limit compute used during order matching. /// When the limit is reached, processing stops and the instruction succeeds. limit: u8, }, } impl MangoInstruction { pub fn unpack(input: &[u8]) -> Option<Self> { let (&discrim, data) = array_refs![input, 4; ..;]; let discrim = u32::from_le_bytes(discrim); Some(match discrim { 0 => { let data = array_ref![data, 0, 64]; let ( signer_nonce, valid_interval, quote_optimal_util, quote_optimal_rate, quote_max_rate, ) = array_refs![data, 8, 8, 16, 16, 16]; MangoInstruction::InitMangoGroup { signer_nonce: u64::from_le_bytes(*signer_nonce), valid_interval: u64::from_le_bytes(*valid_interval), quote_optimal_util: I80F48::from_le_bytes(*quote_optimal_util), quote_optimal_rate: I80F48::from_le_bytes(*quote_optimal_rate), quote_max_rate: I80F48::from_le_bytes(*quote_max_rate), } } 1 => MangoInstruction::InitMangoAccount, 2 => { let quantity = array_ref![data, 0, 8]; MangoInstruction::Deposit { quantity: u64::from_le_bytes(*quantity) } } 3 => { let data = array_ref![data, 0, 9]; let (quantity, allow_borrow) = array_refs![data, 8, 1]; let allow_borrow = match allow_borrow { [0] => false, [1] => true, _ => return None, }; MangoInstruction::Withdraw { quantity: u64::from_le_bytes(*quantity), allow_borrow } } 4 => { let data = array_ref![data, 0, 96]; let ( maint_leverage, init_leverage, liquidation_fee, optimal_util, optimal_rate, max_rate, ) = array_refs![data, 16, 16, 16, 16, 16, 16]; MangoInstruction::AddSpotMarket { maint_leverage: I80F48::from_le_bytes(*maint_leverage), init_leverage: I80F48::from_le_bytes(*init_leverage), liquidation_fee: I80F48::from_le_bytes(*liquidation_fee), optimal_util: I80F48::from_le_bytes(*optimal_util), optimal_rate: I80F48::from_le_bytes(*optimal_rate), max_rate: I80F48::from_le_bytes(*max_rate), } } 5 => { let market_index = array_ref![data, 0, 8]; MangoInstruction::AddToBasket { market_index: usize::from_le_bytes(*market_index) } } 6 => { let quantity = array_ref![data, 0, 8]; MangoInstruction::Borrow { quantity: u64::from_le_bytes(*quantity) } } 7 => MangoInstruction::CachePrices, 8 => MangoInstruction::CacheRootBanks, 9 => { let data_arr = array_ref![data, 0, 46]; let order = unpack_dex_new_order_v3(data_arr)?; MangoInstruction::PlaceSpotOrder { order } } 10 => MangoInstruction::AddOracle, 11 => { let exp = if data.len() > 144 { data[144] } else { 2 }; let data_arr = array_ref![data, 0, 144]; let ( maint_leverage, init_leverage, liquidation_fee, maker_fee, taker_fee, base_lot_size, quote_lot_size, rate, max_depth_bps, target_period_length, mngo_per_period, ) = array_refs![data_arr, 16, 16, 16, 16, 16, 8, 8, 16, 16, 8, 8]; MangoInstruction::AddPerpMarket { maint_leverage: I80F48::from_le_bytes(*maint_leverage), init_leverage: I80F48::from_le_bytes(*init_leverage), liquidation_fee: I80F48::from_le_bytes(*liquidation_fee), maker_fee: I80F48::from_le_bytes(*maker_fee), taker_fee: I80F48::from_le_bytes(*taker_fee), base_lot_size: i64::from_le_bytes(*base_lot_size), quote_lot_size: i64::from_le_bytes(*quote_lot_size), rate: I80F48::from_le_bytes(*rate), max_depth_bps: I80F48::from_le_bytes(*max_depth_bps), target_period_length: u64::from_le_bytes(*target_period_length), mngo_per_period: u64::from_le_bytes(*mngo_per_period), exp, } } 12 => { let reduce_only = if data.len() > 26 { data[26] != 0 } else { false }; let data_arr = array_ref![data, 0, 26]; let (price, quantity, client_order_id, side, order_type) = array_refs![data_arr, 8, 8, 8, 1, 1]; MangoInstruction::PlacePerpOrder { price: i64::from_le_bytes(*price), quantity: i64::from_le_bytes(*quantity), client_order_id: u64::from_le_bytes(*client_order_id), side: Side::try_from_primitive(side[0]).ok()?, order_type: OrderType::try_from_primitive(order_type[0]).ok()?, reduce_only, } } 13 => { let data_arr = array_ref![data, 0, 9]; let (client_order_id, invalid_id_ok) = array_refs![data_arr, 8, 1]; MangoInstruction::CancelPerpOrderByClientId { client_order_id: u64::from_le_bytes(*client_order_id), invalid_id_ok: invalid_id_ok[0] != 0, } } 14 => { let data_arr = array_ref![data, 0, 17]; let (order_id, invalid_id_ok) = array_refs![data_arr, 16, 1]; MangoInstruction::CancelPerpOrder { order_id: i128::from_le_bytes(*order_id), invalid_id_ok: invalid_id_ok[0] != 0, } } 15 => { let data_arr = array_ref![data, 0, 8]; MangoInstruction::ConsumeEvents { limit: usize::from_le_bytes(*data_arr) } } 16 => MangoInstruction::CachePerpMarkets, 17 => MangoInstruction::UpdateFunding, 18 => { let data_arr = array_ref![data, 0, 16]; MangoInstruction::SetOracle { price: I80F48::from_le_bytes(*data_arr) } } 19 => MangoInstruction::SettleFunds, 20 => { let data_array = array_ref![data, 0, 20]; let fields = array_refs![data_array, 4, 16]; let side = match u32::from_le_bytes(*fields.0) { 0 => serum_dex::matching::Side::Bid, 1 => serum_dex::matching::Side::Ask, _ => return None, }; let order_id = u128::from_le_bytes(*fields.1); let order = serum_dex::instruction::CancelOrderInstructionV2 { side, order_id }; MangoInstruction::CancelSpotOrder { order } } 21 => MangoInstruction::UpdateRootBank, 22 => { let data_arr = array_ref![data, 0, 8]; MangoInstruction::SettlePnl { market_index: usize::from_le_bytes(*data_arr) } } 23 => { let data = array_ref![data, 0, 16]; let (token_index, quantity) = array_refs![data, 8, 8]; MangoInstruction::SettleBorrow { token_index: usize::from_le_bytes(*token_index), quantity: u64::from_le_bytes(*quantity), } } 24 => { let data_arr = array_ref![data, 0, 1]; MangoInstruction::ForceCancelSpotOrders { limit: u8::from_le_bytes(*data_arr) } } 25 => { let data_arr = array_ref![data, 0, 1]; MangoInstruction::ForceCancelPerpOrders { limit: u8::from_le_bytes(*data_arr) } } 26 => { let data_arr = array_ref![data, 0, 16]; MangoInstruction::LiquidateTokenAndToken { max_liab_transfer: I80F48::from_le_bytes(*data_arr), } } 27 => { let data = array_ref![data, 0, 34]; let (asset_type, asset_index, liab_type, liab_index, max_liab_transfer) = array_refs![data, 1, 8, 1, 8, 16]; MangoInstruction::LiquidateTokenAndPerp { asset_type: AssetType::try_from(u8::from_le_bytes(*asset_type)).unwrap(), asset_index: usize::from_le_bytes(*asset_index), liab_type: AssetType::try_from(u8::from_le_bytes(*liab_type)).unwrap(), liab_index: usize::from_le_bytes(*liab_index), max_liab_transfer: I80F48::from_le_bytes(*max_liab_transfer), } } 28 => { let data_arr = array_ref![data, 0, 8]; MangoInstruction::LiquidatePerpMarket { base_transfer_request: i64::from_le_bytes(*data_arr), } } 29 => MangoInstruction::SettleFees, 30 => { let data = array_ref![data, 0, 24]; let (liab_index, max_liab_transfer) = array_refs![data, 8, 16]; MangoInstruction::ResolvePerpBankruptcy { liab_index: usize::from_le_bytes(*liab_index), max_liab_transfer: I80F48::from_le_bytes(*max_liab_transfer), } } 31 => { let data_arr = array_ref![data, 0, 16]; MangoInstruction::ResolveTokenBankruptcy { max_liab_transfer: I80F48::from_le_bytes(*data_arr), } } 32 => MangoInstruction::InitSpotOpenOrders, 33 => MangoInstruction::RedeemMngo, 34 => { let info = array_ref![data, 0, INFO_LEN]; MangoInstruction::AddMangoAccountInfo { info: *info } } 35 => { let quantity = array_ref![data, 0, 8]; MangoInstruction::DepositMsrm { quantity: u64::from_le_bytes(*quantity) } } 36 => { let quantity = array_ref![data, 0, 8]; MangoInstruction::WithdrawMsrm { quantity: u64::from_le_bytes(*quantity) } } 37 => { let exp = if data.len() > 137 { unpack_u8_opt(&[data[137], data[138]]) } else { None }; let data_arr = array_ref![data, 0, 137]; let ( maint_leverage, init_leverage, liquidation_fee, maker_fee, taker_fee, rate, max_depth_bps, target_period_length, mngo_per_period, ) = array_refs![data_arr, 17, 17, 17, 17, 17, 17, 17, 9, 9]; MangoInstruction::ChangePerpMarketParams { maint_leverage: unpack_i80f48_opt(maint_leverage), init_leverage: unpack_i80f48_opt(init_leverage), liquidation_fee: unpack_i80f48_opt(liquidation_fee), maker_fee: unpack_i80f48_opt(maker_fee), taker_fee: unpack_i80f48_opt(taker_fee), rate: unpack_i80f48_opt(rate), max_depth_bps: unpack_i80f48_opt(max_depth_bps), target_period_length: unpack_u64_opt(target_period_length), mngo_per_period: unpack_u64_opt(mngo_per_period), exp, } } 38 => MangoInstruction::SetGroupAdmin, 39 => { let data_arr = array_ref![data, 0, 1]; MangoInstruction::CancelAllPerpOrders { limit: u8::from_le_bytes(*data_arr) } } 40 => MangoInstruction::ForceSettleQuotePositions, 41 => { let data_arr = array_ref![data, 0, 46]; let order = unpack_dex_new_order_v3(data_arr)?; MangoInstruction::PlaceSpotOrder2 { order } } 42 => MangoInstruction::InitAdvancedOrders, 43 => { let data_arr = array_ref![data, 0, 44]; let ( order_type, side, trigger_condition, reduce_only, client_order_id, price, quantity, trigger_price, ) = array_refs![data_arr, 1, 1, 1, 1, 8, 8, 8, 16]; MangoInstruction::AddPerpTriggerOrder { order_type: OrderType::try_from_primitive(order_type[0]).ok()?, side: Side::try_from_primitive(side[0]).ok()?, trigger_condition: TriggerCondition::try_from(u8::from_le_bytes( *trigger_condition, )) .unwrap(), reduce_only: reduce_only[0] != 0, client_order_id: u64::from_le_bytes(*client_order_id), price: i64::from_le_bytes(*price), quantity: i64::from_le_bytes(*quantity), trigger_price: I80F48::from_le_bytes(*trigger_price), } } 44 => { let order_index = array_ref![data, 0, 1][0]; MangoInstruction::RemoveAdvancedOrder { order_index } } 45 => { let order_index = array_ref![data, 0, 1][0]; MangoInstruction::ExecutePerpTriggerOrder { order_index } } 46 => { let data_arr = array_ref![data, 0, 148]; let ( maint_leverage, init_leverage, liquidation_fee, maker_fee, taker_fee, base_lot_size, quote_lot_size, rate, max_depth_bps, target_period_length, mngo_per_period, exp, version, lm_size_shift, base_decimals, ) = array_refs![data_arr, 16, 16, 16, 16, 16, 8, 8, 16, 16, 8, 8, 1, 1, 1, 1]; MangoInstruction::CreatePerpMarket { maint_leverage: I80F48::from_le_bytes(*maint_leverage), init_leverage: I80F48::from_le_bytes(*init_leverage), liquidation_fee: I80F48::from_le_bytes(*liquidation_fee), maker_fee: I80F48::from_le_bytes(*maker_fee), taker_fee: I80F48::from_le_bytes(*taker_fee), base_lot_size: i64::from_le_bytes(*base_lot_size), quote_lot_size: i64::from_le_bytes(*quote_lot_size), rate: I80F48::from_le_bytes(*rate), max_depth_bps: I80F48::from_le_bytes(*max_depth_bps), target_period_length: u64::from_le_bytes(*target_period_length), mngo_per_period: u64::from_le_bytes(*mngo_per_period), exp: exp[0], version: version[0], lm_size_shift: lm_size_shift[0], base_decimals: base_decimals[0], } } 47 => { let data_arr = array_ref![data, 0, 143]; let ( maint_leverage, init_leverage, liquidation_fee, maker_fee, taker_fee, rate, max_depth_bps, target_period_length, mngo_per_period, exp, version, lm_size_shift, ) = array_refs![data_arr, 17, 17, 17, 17, 17, 17, 17, 9, 9, 2, 2, 2]; MangoInstruction::ChangePerpMarketParams2 { maint_leverage: unpack_i80f48_opt(maint_leverage), init_leverage: unpack_i80f48_opt(init_leverage), liquidation_fee: unpack_i80f48_opt(liquidation_fee), maker_fee: unpack_i80f48_opt(maker_fee), taker_fee: unpack_i80f48_opt(taker_fee), rate: unpack_i80f48_opt(rate), max_depth_bps: unpack_i80f48_opt(max_depth_bps), target_period_length: unpack_u64_opt(target_period_length), mngo_per_period: unpack_u64_opt(mngo_per_period), exp: unpack_u8_opt(exp), version: unpack_u8_opt(version), lm_size_shift: unpack_u8_opt(lm_size_shift), } } 48 => MangoInstruction::UpdateMarginBasket, 49 => { let data_arr = array_ref![data, 0, 4]; MangoInstruction::ChangeMaxMangoAccounts { max_mango_accounts: u32::from_le_bytes(*data_arr), } } 50 => MangoInstruction::CloseMangoAccount, 51 => MangoInstruction::CloseSpotOpenOrders, 52 => MangoInstruction::CloseAdvancedOrders, 53 => MangoInstruction::CreateDustAccount, 54 => MangoInstruction::ResolveDust, 55 => { let account_num = array_ref![data, 0, 8]; MangoInstruction::CreateMangoAccount { account_num: u64::from_le_bytes(*account_num), } } 56 => MangoInstruction::UpgradeMangoAccountV0V1, 57 => { let data_arr = array_ref![data, 0, 2]; let (side, limit) = array_refs![data_arr, 1, 1]; MangoInstruction::CancelPerpOrdersSide { side: Side::try_from_primitive(side[0]).ok()?, limit: u8::from_le_bytes(*limit), } } 58 => MangoInstruction::SetDelegate, 59 => { let data_arr = array_ref![data, 0, 104]; let ( maint_leverage, init_leverage, liquidation_fee, optimal_util, optimal_rate, max_rate, version, ) = array_refs![data_arr, 17, 17, 17, 17, 17, 17, 2]; MangoInstruction::ChangeSpotMarketParams { maint_leverage: unpack_i80f48_opt(maint_leverage), init_leverage: unpack_i80f48_opt(init_leverage), liquidation_fee: unpack_i80f48_opt(liquidation_fee), optimal_util: unpack_i80f48_opt(optimal_util), optimal_rate: unpack_i80f48_opt(optimal_rate), max_rate: unpack_i80f48_opt(max_rate), version: unpack_u8_opt(version), } } 60 => MangoInstruction::CreateSpotOpenOrders, 61 => { let data = array_ref![data, 0, 16]; let (ref_surcharge_centibps, ref_share_centibps, ref_mngo_required) = array_refs![data, 4, 4, 8]; MangoInstruction::ChangeReferralFeeParams { ref_surcharge_centibps: u32::from_le_bytes(*ref_surcharge_centibps), ref_share_centibps: u32::from_le_bytes(*ref_share_centibps), ref_mngo_required: u64::from_le_bytes(*ref_mngo_required), } } 62 => MangoInstruction::SetReferrerMemory, 63 => { let referrer_id = array_ref![data, 0, INFO_LEN]; MangoInstruction::RegisterReferrerId { referrer_id: *referrer_id } } 64 => { let data_arr = array_ref![data, 0, 44]; let ( price, max_base_quantity, max_quote_quantity, client_order_id, expiry_timestamp, side, order_type, reduce_only, limit, ) = array_refs![data_arr, 8, 8, 8, 8, 8, 1, 1, 1, 1]; MangoInstruction::PlacePerpOrder2 { price: i64::from_le_bytes(*price), max_base_quantity: i64::from_le_bytes(*max_base_quantity), max_quote_quantity: i64::from_le_bytes(*max_quote_quantity), client_order_id: u64::from_le_bytes(*client_order_id), expiry_timestamp: u64::from_le_bytes(*expiry_timestamp), side: Side::try_from_primitive(side[0]).ok()?, order_type: OrderType::try_from_primitive(order_type[0]).ok()?, reduce_only: reduce_only[0] != 0, limit: u8::from_le_bytes(*limit), } } _ => { return None; } }) } pub fn pack(&self) -> Vec<u8> { bincode::serialize(self).unwrap() } } fn unpack_u8_opt(data: &[u8; 2]) -> Option<u8> { if data[0] == 0 { None } else { Some(data[1]) } } fn unpack_i80f48_opt(data: &[u8; 17]) -> Option<I80F48> { let (opt, val) = array_refs![data, 1, 16]; if opt[0] == 0 { None } else { Some(I80F48::from_le_bytes(*val)) } } fn unpack_u64_opt(data: &[u8; 9]) -> Option<u64> { let (opt, val) = array_refs![data, 1, 8]; if opt[0] == 0 { None } else { Some(u64::from_le_bytes(*val)) } } fn unpack_dex_new_order_v3( data: &[u8; 46], ) -> Option<serum_dex::instruction::NewOrderInstructionV3> { let ( &side_arr, &price_arr, &max_coin_qty_arr, &max_native_pc_qty_arr, &self_trade_behavior_arr, &otype_arr, &client_order_id_bytes, &limit_arr, ) = array_refs![data, 4, 8, 8, 8, 4, 4, 8, 2]; let side = serum_dex::matching::Side::try_from_primitive( u32::from_le_bytes(side_arr).try_into().ok()?, ) .ok()?; let limit_price = NonZeroU64::new(u64::from_le_bytes(price_arr))?; let max_coin_qty = NonZeroU64::new(u64::from_le_bytes(max_coin_qty_arr))?; let max_native_pc_qty_including_fees = NonZeroU64::new(u64::from_le_bytes(max_native_pc_qty_arr))?; let self_trade_behavior = serum_dex::instruction::SelfTradeBehavior::try_from_primitive( u32::from_le_bytes(self_trade_behavior_arr).try_into().ok()?, ) .ok()?; let order_type = serum_dex::matching::OrderType::try_from_primitive( u32::from_le_bytes(otype_arr).try_into().ok()?, ) .ok()?; let client_order_id = u64::from_le_bytes(client_order_id_bytes); let limit = u16::from_le_bytes(limit_arr); Some(serum_dex::instruction::NewOrderInstructionV3 { side, limit_price, max_coin_qty, max_native_pc_qty_including_fees, self_trade_behavior, order_type, client_order_id, limit, }) } pub fn init_mango_group( program_id: &Pubkey, mango_group_pk: &Pubkey, signer_pk: &Pubkey, admin_pk: &Pubkey, quote_mint_pk: &Pubkey, quote_vault_pk: &Pubkey, quote_node_bank_pk: &Pubkey, quote_root_bank_pk: &Pubkey, insurance_vault_pk: &Pubkey, msrm_vault_pk: &Pubkey, // send in Pubkey:default() if not using this feature fees_vault_pk: &Pubkey, mango_cache_ai: &Pubkey, dex_program_pk: &Pubkey, signer_nonce: u64, valid_interval: u64, quote_optimal_util: I80F48, quote_optimal_rate: I80F48, quote_max_rate: I80F48, ) -> Result<Instruction, ProgramError> { let accounts = vec![ AccountMeta::new(*mango_group_pk, false), AccountMeta::new_readonly(*signer_pk, false), AccountMeta::new_readonly(*admin_pk, true), AccountMeta::new_readonly(*quote_mint_pk, false), AccountMeta::new_readonly(*quote_vault_pk, false), AccountMeta::new(*quote_node_bank_pk, false), AccountMeta::new(*quote_root_bank_pk, false), AccountMeta::new_readonly(*insurance_vault_pk, false), AccountMeta::new_readonly(*msrm_vault_pk, false), AccountMeta::new_readonly(*fees_vault_pk, false), AccountMeta::new(*mango_cache_ai, false), AccountMeta::new_readonly(*dex_program_pk, false), ]; let instr = MangoInstruction::InitMangoGroup { signer_nonce, valid_interval, quote_optimal_util, quote_optimal_rate, quote_max_rate, }; let data = instr.pack(); Ok(Instruction { program_id: *program_id, accounts, data }) } pub fn init_mango_account( program_id: &Pubkey, mango_group_pk: &Pubkey, mango_account_pk: &Pubkey, owner_pk: &Pubkey, ) -> Result<Instruction, ProgramError> { let accounts = vec![ AccountMeta::new_readonly(*mango_group_pk, false), AccountMeta::new(*mango_account_pk, false), AccountMeta::new_readonly(*owner_pk, true), ]; let instr = MangoInstruction::InitMangoAccount; let data = instr.pack(); Ok(Instruction { program_id: *program_id, accounts, data }) } pub fn close_mango_account( program_id: &Pubkey, mango_group_pk: &Pubkey, mango_account_pk: &Pubkey, owner_pk: &Pubkey, ) -> Result<Instruction, ProgramError> { let accounts = vec![ AccountMeta::new(*mango_group_pk, false), AccountMeta::new(*mango_account_pk, false), AccountMeta::new_readonly(*owner_pk, true), ]; let instr = MangoInstruction::CloseMangoAccount; let data = instr.pack(); Ok(Instruction { program_id: *program_id, accounts, data }) } pub fn create_mango_account( program_id: &Pubkey, mango_group_pk: &Pubkey, mango_account_pk: &Pubkey, owner_pk: &Pubkey, system_prog_pk: &Pubkey, payer_pk: &Pubkey, account_num: u64, ) -> Result<Instruction, ProgramError> { let accounts = vec![ AccountMeta::new(*mango_group_pk, false), AccountMeta::new(*mango_account_pk, false), AccountMeta::new_readonly(*owner_pk, true), AccountMeta::new_readonly(*system_prog_pk, false), AccountMeta::new(*payer_pk, true), ]; let instr = MangoInstruction::CreateMangoAccount { account_num }; let data = instr.pack(); Ok(Instruction { program_id: *program_id, accounts, data }) } pub fn set_delegate( program_id: &Pubkey, mango_group_pk: &Pubkey, mango_account_pk: &Pubkey, owner_pk: &Pubkey, delegate_pk: &Pubkey, ) -> Result<Instruction, ProgramError> { let accounts = vec![ AccountMeta::new_readonly(*mango_group_pk, false), AccountMeta::new(*mango_account_pk, false), AccountMeta::new_readonly(*owner_pk, true), AccountMeta::new_readonly(*delegate_pk, false), ]; let instr = MangoInstruction::SetDelegate {}; let data = instr.pack(); Ok(Instruction { program_id: *program_id, accounts, data }) } pub fn upgrade_mango_account_v0_v1( program_id: &Pubkey, mango_group_pk: &Pubkey, mango_account_pk: &Pubkey, owner_pk: &Pubkey, ) -> Result<Instruction, ProgramError> { let accounts = vec![ AccountMeta::new(*mango_group_pk, false), AccountMeta::new(*mango_account_pk, false), AccountMeta::new_readonly(*owner_pk, true), ]; let instr = MangoInstruction::UpgradeMangoAccountV0V1; let data = instr.pack(); Ok(Instruction { program_id: *program_id, accounts, data }) } pub fn deposit( program_id: &Pubkey, mango_group_pk: &Pubkey, mango_account_pk: &Pubkey, owner_pk: &Pubkey, mango_cache_pk: &Pubkey, root_bank_pk: &Pubkey, node_bank_pk: &Pubkey, vault_pk: &Pubkey, owner_token_account_pk: &Pubkey, quantity: u64, ) -> Result<Instruction, ProgramError> { let accounts = vec![ AccountMeta::new_readonly(*mango_group_pk, false), AccountMeta::new(*mango_account_pk, false), AccountMeta::new_readonly(*owner_pk, true), AccountMeta::new_readonly(*mango_cache_pk, false), AccountMeta::new_readonly(*root_bank_pk, false), AccountMeta::new(*node_bank_pk, false), AccountMeta::new(*vault_pk, false), AccountMeta::new_readonly(spl_token::ID, false), AccountMeta::new(*owner_token_account_pk, false), ]; let instr = MangoInstruction::Deposit { quantity }; let data = instr.pack(); Ok(Instruction { program_id: *program_id, accounts, data }) } pub fn add_spot_market( program_id: &Pubkey, mango_group_pk: &Pubkey, oracle_pk: &Pubkey, spot_market_pk: &Pubkey, dex_program_pk: &Pubkey, token_mint_pk: &Pubkey, node_bank_pk: &Pubkey, vault_pk: &Pubkey, root_bank_pk: &Pubkey, admin_pk: &Pubkey, maint_leverage: I80F48, init_leverage: I80F48, liquidation_fee: I80F48, optimal_util: I80F48, optimal_rate: I80F48, max_rate: I80F48, ) -> Result<Instruction, ProgramError> { let accounts = vec![ AccountMeta::new(*mango_group_pk, false), AccountMeta::new_readonly(*oracle_pk, false), AccountMeta::new_readonly(*spot_market_pk, false), AccountMeta::new_readonly(*dex_program_pk, false), AccountMeta::new_readonly(*token_mint_pk, false), AccountMeta::new(*node_bank_pk, false), AccountMeta::new_readonly(*vault_pk, false), AccountMeta::new(*root_bank_pk, false), AccountMeta::new_readonly(*admin_pk, true), ]; let instr = MangoInstruction::AddSpotMarket { maint_leverage, init_leverage, liquidation_fee, optimal_util, optimal_rate, max_rate, }; let data = instr.pack(); Ok(Instruction { program_id: *program_id, accounts, data }) } pub fn add_perp_market( program_id: &Pubkey, mango_group_pk: &Pubkey, oracle_pk: &Pubkey, perp_market_pk: &Pubkey, event_queue_pk: &Pubkey, bids_pk: &Pubkey, asks_pk: &Pubkey, mngo_vault_pk: &Pubkey, admin_pk: &Pubkey, maint_leverage: I80F48, init_leverage: I80F48, liquidation_fee: I80F48, maker_fee: I80F48, taker_fee: I80F48, base_lot_size: i64, quote_lot_size: i64, rate: I80F48, max_depth_bps: I80F48, target_period_length: u64, mngo_per_period: u64, ) -> Result<Instruction, ProgramError> { let accounts = vec![ AccountMeta::new(*mango_group_pk, false), AccountMeta::new(*oracle_pk, false), AccountMeta::new(*perp_market_pk, false), AccountMeta::new(*event_queue_pk, false), AccountMeta::new(*bids_pk, false), AccountMeta::new(*asks_pk, false), AccountMeta::new_readonly(*mngo_vault_pk, false), AccountMeta::new_readonly(*admin_pk, true), ]; let instr = MangoInstruction::AddPerpMarket { maint_leverage, init_leverage, liquidation_fee, maker_fee, taker_fee, base_lot_size, quote_lot_size, rate, max_depth_bps, target_period_length, mngo_per_period, exp: 2, // TODO add this to function signature }; let data = instr.pack(); Ok(Instruction { program_id: *program_id, accounts, data }) } pub fn place_perp_order( program_id: &Pubkey, mango_group_pk: &Pubkey, mango_account_pk: &Pubkey, owner_pk: &Pubkey, mango_cache_pk: &Pubkey, perp_market_pk: &Pubkey, bids_pk: &Pubkey, asks_pk: &Pubkey, event_queue_pk: &Pubkey, referrer_mango_account_pk: Option<&Pubkey>, open_orders_pks: &[Pubkey; MAX_PAIRS], side: Side, price: i64, quantity: i64, client_order_id: u64, order_type: OrderType, reduce_only: bool, ) -> Result<Instruction, ProgramError> { let mut accounts = vec![ AccountMeta::new_readonly(*mango_group_pk, false), AccountMeta::new(*mango_account_pk, false), AccountMeta::new_readonly(*owner_pk, true), AccountMeta::new_readonly(*mango_cache_pk, false), AccountMeta::new(*perp_market_pk, false), AccountMeta::new(*bids_pk, false), AccountMeta::new(*asks_pk, false), AccountMeta::new(*event_queue_pk, false), ]; accounts.extend(open_orders_pks.iter().map(|pk| AccountMeta::new_readonly(*pk, false))); if let Some(referrer_mango_account_pk) = referrer_mango_account_pk { accounts.push(AccountMeta::new(*referrer_mango_account_pk, false)); } let instr = MangoInstruction::PlacePerpOrder { side, price, quantity, client_order_id, order_type, reduce_only, }; let data = instr.pack(); Ok(Instruction { program_id: *program_id, accounts, data }) } pub fn place_perp_order2( program_id: &Pubkey, mango_group_pk: &Pubkey, mango_account_pk: &Pubkey, owner_pk: &Pubkey, mango_cache_pk: &Pubkey, perp_market_pk: &Pubkey, bids_pk: &Pubkey, asks_pk: &Pubkey, event_queue_pk: &Pubkey, referrer_mango_account_pk: Option<&Pubkey>, open_orders_pks: &[Pubkey], side: Side, price: i64, max_base_quantity: i64, max_quote_quantity: i64, client_order_id: u64, order_type: OrderType, reduce_only: bool, expiry_timestamp: Option<u64>, // Send 0 if you want to ignore time in force limit: u8, // maximum number of FillEvents before terminating ) -> Result<Instruction, ProgramError> { let mut accounts = vec![ AccountMeta::new_readonly(*mango_group_pk, false), AccountMeta::new(*mango_account_pk, false), AccountMeta::new_readonly(*owner_pk, true), AccountMeta::new_readonly(*mango_cache_pk, false), AccountMeta::new(*perp_market_pk, false), AccountMeta::new(*bids_pk, false), AccountMeta::new(*asks_pk, false), AccountMeta::new(*event_queue_pk, false), AccountMeta::new(*referrer_mango_account_pk.unwrap_or(mango_account_pk), false), ]; accounts.extend(open_orders_pks.iter().map(|pk| AccountMeta::new_readonly(*pk, false))); let instr = MangoInstruction::PlacePerpOrder2 { side, price, max_base_quantity, max_quote_quantity, client_order_id, order_type, reduce_only, expiry_timestamp: expiry_timestamp.unwrap_or(0), limit, }; let data = instr.pack(); Ok(Instruction { program_id: *program_id, accounts, data }) } pub fn cancel_perp_order_by_client_id( program_id: &Pubkey, mango_group_pk: &Pubkey, // read mango_account_pk: &Pubkey, // write owner_pk: &Pubkey, // read, signer perp_market_pk: &Pubkey, // write bids_pk: &Pubkey, // write asks_pk: &Pubkey, // write client_order_id: u64, invalid_id_ok: bool, ) -> Result<Instruction, ProgramError> { let accounts = vec![ AccountMeta::new_readonly(*mango_group_pk, false), AccountMeta::new(*mango_account_pk, false), AccountMeta::new_readonly(*owner_pk, true), AccountMeta::new(*perp_market_pk, false), AccountMeta::new(*bids_pk, false), AccountMeta::new(*asks_pk, false), ]; let instr = MangoInstruction::CancelPerpOrderByClientId { client_order_id, invalid_id_ok }; let data = instr.pack(); Ok(Instruction { program_id: *program_id, accounts, data }) } pub fn cancel_perp_order( program_id: &Pubkey, mango_group_pk: &Pubkey, // read mango_account_pk: &Pubkey, // write owner_pk: &Pubkey, // read, signer perp_market_pk: &Pubkey, // write bids_pk: &Pubkey, // write asks_pk: &Pubkey, // write order_id: i128, invalid_id_ok: bool, ) -> Result<Instruction, ProgramError> { let accounts = vec![ AccountMeta::new_readonly(*mango_group_pk, false), AccountMeta::new(*mango_account_pk, false), AccountMeta::new_readonly(*owner_pk, true), AccountMeta::new(*perp_market_pk, false), AccountMeta::new(*bids_pk, false), AccountMeta::new(*asks_pk, false), ]; let instr = MangoInstruction::CancelPerpOrder { order_id, invalid_id_ok }; let data = instr.pack(); Ok(Instruction { program_id: *program_id, accounts, data }) } pub fn cancel_all_perp_orders( program_id: &Pubkey, mango_group_pk: &Pubkey, // read mango_account_pk: &Pubkey, // write owner_pk: &Pubkey, // read, signer perp_market_pk: &Pubkey, // write bids_pk: &Pubkey, // write asks_pk: &Pubkey, // write limit: u8, ) -> Result<Instruction, ProgramError> { let accounts = vec![ AccountMeta::new_readonly(*mango_group_pk, false), AccountMeta::new(*mango_account_pk, false), AccountMeta::new_readonly(*owner_pk, true), AccountMeta::new(*perp_market_pk, false), AccountMeta::new(*bids_pk, false), AccountMeta::new(*asks_pk, false), ]; let instr = MangoInstruction::CancelAllPerpOrders { limit }; let data = instr.pack(); Ok(Instruction { program_id: *program_id, accounts, data }) } pub fn cancel_perp_orders_side( program_id: &Pubkey, mango_group_pk: &Pubkey, // read mango_account_pk: &Pubkey, // write owner_pk: &Pubkey, // read, signer perp_market_pk: &Pubkey, // write bids_pk: &Pubkey, // write asks_pk: &Pubkey, // write side: Side, limit: u8, ) -> Result<Instruction, ProgramError> { let accounts = vec![ AccountMeta::new_readonly(*mango_group_pk, false), AccountMeta::new(*mango_account_pk, false), AccountMeta::new_readonly(*owner_pk, true), AccountMeta::new(*perp_market_pk, false), AccountMeta::new(*bids_pk, false), AccountMeta::new(*asks_pk, false), ]; let instr = MangoInstruction::CancelPerpOrdersSide { side, limit }; let data = instr.pack(); Ok(Instruction { program_id: *program_id, accounts, data }) } pub fn force_cancel_perp_orders( program_id: &Pubkey, mango_group_pk: &Pubkey, // read mango_cache_pk: &Pubkey, // read perp_market_pk: &Pubkey, // read bids_pk: &Pubkey, // write asks_pk: &Pubkey, // write liqee_mango_account_pk: &Pubkey, // write open_orders_pks: &[Pubkey], // read limit: u8, ) -> Result<Instruction, ProgramError> { let mut accounts = vec![ AccountMeta::new_readonly(*mango_group_pk, false), AccountMeta::new_readonly(*mango_cache_pk, false), AccountMeta::new_readonly(*perp_market_pk, false), AccountMeta::new(*bids_pk, false), AccountMeta::new(*asks_pk, false), AccountMeta::new(*liqee_mango_account_pk, false), ]; accounts.extend(open_orders_pks.iter().map(|pk| AccountMeta::new_readonly(*pk, false))); let instr = MangoInstruction::ForceCancelPerpOrders { limit }; let data = instr.pack(); Ok(Instruction { program_id: *program_id, accounts, data }) } pub fn init_advanced_orders( program_id: &Pubkey, mango_group_pk: &Pubkey, // read mango_account_pk: &Pubkey, // write owner_pk: &Pubkey, // write & signer advanced_orders_pk: &Pubkey, // write system_prog_pk: &Pubkey, // read ) -> Result<Instruction, ProgramError> { let accounts = vec![ AccountMeta::new_readonly(*mango_group_pk, false), AccountMeta::new(*mango_account_pk, false), AccountMeta::new(*owner_pk, true), AccountMeta::new(*advanced_orders_pk, false), AccountMeta::new_readonly(*system_prog_pk, false), ]; let instr = MangoInstruction::InitAdvancedOrders {}; let data = instr.pack(); Ok(Instruction { program_id: *program_id, accounts, data }) } pub fn close_advanced_orders( program_id: &Pubkey, mango_group_pk: &Pubkey, mango_account_pk: &Pubkey, advanced_orders_pk: &Pubkey, owner_pk: &Pubkey, ) -> Result<Instruction, ProgramError> { let accounts = vec![ AccountMeta::new_readonly(*mango_group_pk, false), AccountMeta::new(*mango_account_pk, false), AccountMeta::new(*owner_pk, true), AccountMeta::new(*advanced_orders_pk, false), ]; let instr = MangoInstruction::CloseAdvancedOrders; let data = instr.pack(); Ok(Instruction { program_id: *program_id, accounts, data }) } pub fn add_perp_trigger_order( program_id: &Pubkey, mango_group_pk: &Pubkey, // read mango_account_pk: &Pubkey, // read owner_pk: &Pubkey, // write & signer advanced_orders_pk: &Pubkey, // write mango_cache_pk: &Pubkey, // read perp_market_pk: &Pubkey, // read system_prog_pk: &Pubkey, // read order_type: OrderType, side: Side, trigger_condition: TriggerCondition, reduce_only: bool, client_order_id: u64, price: i64, quantity: i64, trigger_price: I80F48, ) -> Result<Instruction, ProgramError> { let accounts = vec![ AccountMeta::new_readonly(*mango_group_pk, false), AccountMeta::new_readonly(*mango_account_pk, false), AccountMeta::new(*owner_pk, true), AccountMeta::new(*advanced_orders_pk, false), AccountMeta::new_readonly(*mango_cache_pk, false), AccountMeta::new_readonly(*perp_market_pk, false), AccountMeta::new_readonly(*system_prog_pk, false), ]; let instr = MangoInstruction::AddPerpTriggerOrder { order_type, side, trigger_condition, reduce_only, client_order_id, price, quantity, trigger_price, }; let data = instr.pack(); Ok(Instruction { program_id: *program_id, accounts, data }) } pub fn remove_advanced_order( program_id: &Pubkey, mango_group_pk: &Pubkey, // read mango_account_pk: &Pubkey, // read owner_pk: &Pubkey, // write & signer advanced_orders_pk: &Pubkey, // write system_prog_pk: &Pubkey, // read order_index: u8, ) -> Result<Instruction, ProgramError> { let accounts = vec![ AccountMeta::new_readonly(*mango_group_pk, false), AccountMeta::new_readonly(*mango_account_pk, false), AccountMeta::new(*owner_pk, true), AccountMeta::new(*advanced_orders_pk, false), AccountMeta::new_readonly(*system_prog_pk, false), ]; let instr = MangoInstruction::RemoveAdvancedOrder { order_index }; let data = instr.pack(); Ok(Instruction { program_id: *program_id, accounts, data }) } pub fn execute_perp_trigger_order( program_id: &Pubkey, mango_group_pk: &Pubkey, // read mango_account_pk: &Pubkey, // write advanced_orders_pk: &Pubkey, // write agent_pk: &Pubkey, // write & signer mango_cache_pk: &Pubkey, // read perp_market_pk: &Pubkey, // write bids_pk: &Pubkey, // write asks_pk: &Pubkey, // write event_queue_pk: &Pubkey, // write order_index: u8, ) -> Result<Instruction, ProgramError> { let accounts = vec![ AccountMeta::new_readonly(*mango_group_pk, false), AccountMeta::new(*mango_account_pk, false), AccountMeta::new(*advanced_orders_pk, false), AccountMeta::new(*agent_pk, true), AccountMeta::new_readonly(*mango_cache_pk, false), AccountMeta::new(*perp_market_pk, false), AccountMeta::new(*bids_pk, false), AccountMeta::new(*asks_pk, false), AccountMeta::new(*event_queue_pk, false), ]; let instr = MangoInstruction::ExecutePerpTriggerOrder { order_index }; let data = instr.pack(); Ok(Instruction { program_id: *program_id, accounts, data }) } pub fn consume_events( program_id: &Pubkey, mango_group_pk: &Pubkey, // read mango_cache_pk: &Pubkey, // read perp_market_pk: &Pubkey, // read event_queue_pk: &Pubkey, // write mango_acc_pks: &mut [Pubkey], // write limit: usize, ) -> Result<Instruction, ProgramError> { let fixed_accounts = vec![ AccountMeta::new_readonly(*mango_group_pk, false), AccountMeta::new_readonly(*mango_cache_pk, false), AccountMeta::new(*perp_market_pk, false), AccountMeta::new(*event_queue_pk, false), ]; mango_acc_pks.sort(); let mango_accounts = mango_acc_pks.into_iter().map(|pk| AccountMeta::new(*pk, false)); let accounts = fixed_accounts.into_iter().chain(mango_accounts).collect(); let instr = MangoInstruction::ConsumeEvents { limit }; let data = instr.pack(); Ok(Instruction { program_id: *program_id, accounts, data }) } pub fn settle_pnl( program_id: &Pubkey, mango_group_pk: &Pubkey, // read mango_account_a_pk: &Pubkey, // write mango_account_b_pk: &Pubkey, // write mango_cache_pk: &Pubkey, // read root_bank_pk: &Pubkey, // read node_bank_pk: &Pubkey, // write market_index: usize, ) -> Result<Instruction, ProgramError> { let accounts = vec![ AccountMeta::new_readonly(*mango_group_pk, false), AccountMeta::new(*mango_account_a_pk, false), AccountMeta::new(*mango_account_b_pk, false), AccountMeta::new_readonly(*mango_cache_pk, false), AccountMeta::new_readonly(*root_bank_pk, false), AccountMeta::new(*node_bank_pk, false), ]; let instr = MangoInstruction::SettlePnl { market_index }; let data = instr.pack(); Ok(Instruction { program_id: *program_id, accounts, data }) } pub fn update_funding( program_id: &Pubkey, mango_group_pk: &Pubkey, // read mango_cache_pk: &Pubkey, // write perp_market_pk: &Pubkey, // write bids_pk: &Pubkey, // read asks_pk: &Pubkey, // read ) -> Result<Instruction, ProgramError> { let accounts = vec![ AccountMeta::new_readonly(*mango_group_pk, false), AccountMeta::new(*mango_cache_pk, false), AccountMeta::new(*perp_market_pk, false), AccountMeta::new_readonly(*bids_pk, false), AccountMeta::new_readonly(*asks_pk, false), ]; let instr = MangoInstruction::UpdateFunding {}; let data = instr.pack(); Ok(Instruction { program_id: *program_id, accounts, data }) } pub fn withdraw( program_id: &Pubkey, mango_group_pk: &Pubkey, mango_account_pk: &Pubkey, owner_pk: &Pubkey, mango_cache_pk: &Pubkey, root_bank_pk: &Pubkey, node_bank_pk: &Pubkey, vault_pk: &Pubkey, token_account_pk: &Pubkey, signer_pk: &Pubkey, open_orders_pks: &[Pubkey], quantity: u64, allow_borrow: bool, ) -> Result<Instruction, ProgramError> { let mut accounts = vec![ AccountMeta::new_readonly(*mango_group_pk, false), AccountMeta::new(*mango_account_pk, false), AccountMeta::new_readonly(*owner_pk, true), AccountMeta::new_readonly(*mango_cache_pk, false), AccountMeta::new_readonly(*root_bank_pk, false), AccountMeta::new(*node_bank_pk, false), AccountMeta::new(*vault_pk, false), AccountMeta::new(*token_account_pk, false), AccountMeta::new_readonly(*signer_pk, false), AccountMeta::new_readonly(spl_token::ID, false), ]; accounts.extend(open_orders_pks.iter().map(|pk| AccountMeta::new_readonly(*pk, false))); let instr = MangoInstruction::Withdraw { quantity, allow_borrow }; let data = instr.pack(); Ok(Instruction { program_id: *program_id, accounts, data }) } pub fn borrow( program_id: &Pubkey, mango_group_pk: &Pubkey, mango_account_pk: &Pubkey, mango_cache_pk: &Pubkey, owner_pk: &Pubkey, root_bank_pk: &Pubkey, node_bank_pk: &Pubkey, open_orders_pks: &[Pubkey], quantity: u64, ) -> Result<Instruction, ProgramError> { let mut accounts = vec![ AccountMeta::new(*mango_group_pk, false), AccountMeta::new(*mango_account_pk, false), AccountMeta::new_readonly(*owner_pk, true), AccountMeta::new_readonly(*mango_cache_pk, false), AccountMeta::new_readonly(*root_bank_pk, false), AccountMeta::new(*node_bank_pk, false), ]; accounts.extend(open_orders_pks.iter().map(|pk| AccountMeta::new(*pk, false))); let instr = MangoInstruction::Borrow { quantity }; let data = instr.pack(); Ok(Instruction { program_id: *program_id, accounts, data }) } pub fn cache_prices( program_id: &Pubkey, mango_group_pk: &Pubkey, mango_cache_pk: &Pubkey, oracle_pks: &[Pubkey], ) -> Result<Instruction, ProgramError> { let mut accounts = vec![ AccountMeta::new_readonly(*mango_group_pk, false), AccountMeta::new(*mango_cache_pk, false), ]; accounts.extend(oracle_pks.iter().map(|pk| AccountMeta::new_readonly(*pk, false))); let instr = MangoInstruction::CachePrices; let data = instr.pack(); Ok(Instruction { program_id: *program_id, accounts, data }) } pub fn cache_root_banks( program_id: &Pubkey, mango_group_pk: &Pubkey, mango_cache_pk: &Pubkey, root_bank_pks: &[Pubkey], ) -> Result<Instruction, ProgramError> { let mut accounts = vec![ AccountMeta::new_readonly(*mango_group_pk, false), AccountMeta::new(*mango_cache_pk, false), ]; accounts.extend(root_bank_pks.iter().map(|pk| AccountMeta::new_readonly(*pk, false))); let instr = MangoInstruction::CacheRootBanks; let data = instr.pack(); Ok(Instruction { program_id: *program_id, accounts, data }) } pub fn cache_perp_markets( program_id: &Pubkey, mango_group_pk: &Pubkey, mango_cache_pk: &Pubkey, perp_market_pks: &[Pubkey], ) -> Result<Instruction, ProgramError> { let mut accounts = vec![ AccountMeta::new_readonly(*mango_group_pk, false), AccountMeta::new(*mango_cache_pk, false), ]; accounts.extend(perp_market_pks.iter().map(|pk| AccountMeta::new_readonly(*pk, false))); let instr = MangoInstruction::CachePerpMarkets; let data = instr.pack(); Ok(Instruction { program_id: *program_id, accounts, data }) } pub fn init_spot_open_orders( program_id: &Pubkey, mango_group_pk: &Pubkey, mango_account_pk: &Pubkey, owner_pk: &Pubkey, dex_prog_pk: &Pubkey, open_orders_pk: &Pubkey, spot_market_pk: &Pubkey, signer_pk: &Pubkey, ) -> Result<Instruction, ProgramError> { let accounts = vec![ AccountMeta::new_readonly(*mango_group_pk, false), AccountMeta::new(*mango_account_pk, false), AccountMeta::new_readonly(*owner_pk, true), AccountMeta::new_readonly(*dex_prog_pk, false), AccountMeta::new(*open_orders_pk, false), AccountMeta::new_readonly(*spot_market_pk, false), AccountMeta::new_readonly(*signer_pk, false), AccountMeta::new_readonly(solana_program::sysvar::rent::ID, false), ]; let instr = MangoInstruction::InitSpotOpenOrders; let data = instr.pack(); Ok(Instruction { program_id: *program_id, accounts, data }) } pub fn create_spot_open_orders( program_id: &Pubkey, mango_group_pk: &Pubkey, mango_account_pk: &Pubkey, owner_pk: &Pubkey, dex_prog_pk: &Pubkey, open_orders_pk: &Pubkey, spot_market_pk: &Pubkey, signer_pk: &Pubkey, payer_pk: &Pubkey, ) -> Result<Instruction, ProgramError> { let accounts = vec![ AccountMeta::new_readonly(*mango_group_pk, false), AccountMeta::new(*mango_account_pk, false), AccountMeta::new_readonly(*owner_pk, true), AccountMeta::new_readonly(*dex_prog_pk, false), AccountMeta::new(*open_orders_pk, false), AccountMeta::new_readonly(*spot_market_pk, false), AccountMeta::new_readonly(*signer_pk, false), AccountMeta::new_readonly(solana_program::system_program::ID, false), AccountMeta::new(*payer_pk, true), ]; let instr = MangoInstruction::CreateSpotOpenOrders; let data = instr.pack(); Ok(Instruction { program_id: *program_id, accounts, data }) } pub fn close_spot_open_orders( program_id: &Pubkey, mango_group_pk: &Pubkey, mango_account_pk: &Pubkey, owner_pk: &Pubkey, dex_prog_pk: &Pubkey, open_orders_pk: &Pubkey, spot_market_pk: &Pubkey, signer_pk: &Pubkey, ) -> Result<Instruction, ProgramError> { let accounts = vec![ AccountMeta::new_readonly(*mango_group_pk, false), AccountMeta::new(*mango_account_pk, false), AccountMeta::new(*owner_pk, true), AccountMeta::new_readonly(*dex_prog_pk, false), AccountMeta::new(*open_orders_pk, false), AccountMeta::new_readonly(*spot_market_pk, false), AccountMeta::new_readonly(*signer_pk, false), ]; let instr = MangoInstruction::CloseSpotOpenOrders; let data = instr.pack(); Ok(Instruction { program_id: *program_id, accounts, data }) } pub fn place_spot_order( program_id: &Pubkey, mango_group_pk: &Pubkey, mango_account_pk: &Pubkey, owner_pk: &Pubkey, mango_cache_pk: &Pubkey, dex_prog_pk: &Pubkey, spot_market_pk: &Pubkey, bids_pk: &Pubkey, asks_pk: &Pubkey, dex_request_queue_pk: &Pubkey, dex_event_queue_pk: &Pubkey, dex_base_pk: &Pubkey, dex_quote_pk: &Pubkey, base_root_bank_pk: &Pubkey, base_node_bank_pk: &Pubkey, base_vault_pk: &Pubkey, quote_root_bank_pk: &Pubkey, quote_node_bank_pk: &Pubkey, quote_vault_pk: &Pubkey, signer_pk: &Pubkey, dex_signer_pk: &Pubkey, msrm_or_srm_vault_pk: &Pubkey, open_orders_pks: &[Pubkey], market_index: usize, // used to determine which of the open orders accounts should be passed in write order: serum_dex::instruction::NewOrderInstructionV3, ) -> Result<Instruction, ProgramError> { let mut accounts = vec![ AccountMeta::new_readonly(*mango_group_pk, false), AccountMeta::new(*mango_account_pk, false), AccountMeta::new_readonly(*owner_pk, true), AccountMeta::new_readonly(*mango_cache_pk, false), AccountMeta::new_readonly(*dex_prog_pk, false), AccountMeta::new(*spot_market_pk, false), AccountMeta::new(*bids_pk, false), AccountMeta::new(*asks_pk, false), AccountMeta::new(*dex_request_queue_pk, false), AccountMeta::new(*dex_event_queue_pk, false), AccountMeta::new(*dex_base_pk, false), AccountMeta::new(*dex_quote_pk, false), AccountMeta::new_readonly(*base_root_bank_pk, false), AccountMeta::new(*base_node_bank_pk, false), AccountMeta::new(*base_vault_pk, false), AccountMeta::new_readonly(*quote_root_bank_pk, false), AccountMeta::new(*quote_node_bank_pk, false), AccountMeta::new(*quote_vault_pk, false), AccountMeta::new_readonly(spl_token::ID, false), AccountMeta::new_readonly(*signer_pk, false), AccountMeta::new_readonly(solana_program::sysvar::rent::ID, false), AccountMeta::new_readonly(*dex_signer_pk, false), AccountMeta::new_readonly(*msrm_or_srm_vault_pk, false), ]; accounts.extend(open_orders_pks.iter().enumerate().map(|(i, pk)| { if i == market_index { AccountMeta::new(*pk, false) } else { AccountMeta::new_readonly(*pk, false) } })); let instr = MangoInstruction::PlaceSpotOrder { order }; let data = instr.pack(); Ok(Instruction { program_id: *program_id, accounts, data }) } pub fn settle_funds( program_id: &Pubkey, mango_group_pk: &Pubkey, mango_cache_pk: &Pubkey, owner_pk: &Pubkey, mango_account_pk: &Pubkey, dex_prog_pk: &Pubkey, spot_market_pk: &Pubkey, open_orders_pk: &Pubkey, signer_pk: &Pubkey, dex_base_pk: &Pubkey, dex_quote_pk: &Pubkey, base_root_bank_pk: &Pubkey, base_node_bank_pk: &Pubkey, quote_root_bank_pk: &Pubkey, quote_node_bank_pk: &Pubkey, base_vault_pk: &Pubkey, quote_vault_pk: &Pubkey, dex_signer_pk: &Pubkey, ) -> Result<Instruction, ProgramError> { let accounts = vec![ AccountMeta::new_readonly(*mango_group_pk, false), AccountMeta::new_readonly(*mango_cache_pk, false), AccountMeta::new_readonly(*owner_pk, true), AccountMeta::new(*mango_account_pk, false), AccountMeta::new_readonly(*dex_prog_pk, false), AccountMeta::new(*spot_market_pk, false), AccountMeta::new(*open_orders_pk, false), AccountMeta::new_readonly(*signer_pk, false), AccountMeta::new(*dex_base_pk, false), AccountMeta::new(*dex_quote_pk, false), AccountMeta::new_readonly(*base_root_bank_pk, false), AccountMeta::new(*base_node_bank_pk, false), AccountMeta::new_readonly(*quote_root_bank_pk, false), AccountMeta::new(*quote_node_bank_pk, false), AccountMeta::new(*base_vault_pk, false), AccountMeta::new(*quote_vault_pk, false), AccountMeta::new_readonly(*dex_signer_pk, false), AccountMeta::new_readonly(spl_token::ID, false), ]; let instr = MangoInstruction::SettleFunds; let data = instr.pack(); Ok(Instruction { program_id: *program_id, accounts, data }) } pub fn add_oracle( program_id: &Pubkey, mango_group_pk: &Pubkey, oracle_pk: &Pubkey, admin_pk: &Pubkey, ) -> Result<Instruction, ProgramError> { let accounts = vec![ AccountMeta::new(*mango_group_pk, false), AccountMeta::new(*oracle_pk, false), AccountMeta::new_readonly(*admin_pk, true), ]; let instr = MangoInstruction::AddOracle; let data = instr.pack(); Ok(Instruction { program_id: *program_id, accounts, data }) } pub fn update_root_bank( program_id: &Pubkey, mango_group_pk: &Pubkey, mango_cache_pk: &Pubkey, root_bank_pk: &Pubkey, node_bank_pks: &[Pubkey], ) -> Result<Instruction, ProgramError> { let mut accounts = vec![ AccountMeta::new_readonly(*mango_group_pk, false), AccountMeta::new(*mango_cache_pk, false), AccountMeta::new(*root_bank_pk, false), ]; accounts.extend(node_bank_pks.iter().map(|pk| AccountMeta::new_readonly(*pk, false))); let instr = MangoInstruction::UpdateRootBank; let data = instr.pack(); Ok(Instruction { program_id: *program_id, accounts, data }) } pub fn set_oracle( program_id: &Pubkey, mango_group_pk: &Pubkey, oracle_pk: &Pubkey, admin_pk: &Pubkey, price: I80F48, ) -> Result<Instruction, ProgramError> { let accounts = vec![ AccountMeta::new_readonly(*mango_group_pk, false), AccountMeta::new(*oracle_pk, false), AccountMeta::new_readonly(*admin_pk, true), ]; let instr = MangoInstruction::SetOracle { price }; let data = instr.pack(); Ok(Instruction { program_id: *program_id, accounts, data }) } pub fn liquidate_token_and_token( program_id: &Pubkey, mango_group_pk: &Pubkey, mango_cache_pk: &Pubkey, liqee_mango_account_pk: &Pubkey, liqor_mango_account_pk: &Pubkey, liqor_pk: &Pubkey, asset_root_bank_pk: &Pubkey, asset_node_bank_pk: &Pubkey, liab_root_bank_pk: &Pubkey, liab_node_bank_pk: &Pubkey, liqee_open_orders_pks: &[Pubkey], liqor_open_orders_pks: &[Pubkey], max_liab_transfer: I80F48, ) -> Result<Instruction, ProgramError> { let mut accounts = vec![ AccountMeta::new_readonly(*mango_group_pk, false), AccountMeta::new_readonly(*mango_cache_pk, false), AccountMeta::new(*liqee_mango_account_pk, false), AccountMeta::new(*liqor_mango_account_pk, false), AccountMeta::new_readonly(*liqor_pk, true), AccountMeta::new_readonly(*asset_root_bank_pk, false), AccountMeta::new(*asset_node_bank_pk, false), AccountMeta::new_readonly(*liab_root_bank_pk, false), AccountMeta::new(*liab_node_bank_pk, false), ]; accounts.extend(liqee_open_orders_pks.iter().map(|pk| AccountMeta::new_readonly(*pk, false))); accounts.extend(liqor_open_orders_pks.iter().map(|pk| AccountMeta::new_readonly(*pk, false))); let instr = MangoInstruction::LiquidateTokenAndToken { max_liab_transfer }; let data = instr.pack(); Ok(Instruction { program_id: *program_id, accounts, data }) } pub fn liquidate_token_and_perp( program_id: &Pubkey, mango_group_pk: &Pubkey, mango_cache_pk: &Pubkey, liqee_mango_account_pk: &Pubkey, liqor_mango_account_pk: &Pubkey, liqor_pk: &Pubkey, root_bank_pk: &Pubkey, node_bank_pk: &Pubkey, liqee_open_orders_pks: &[Pubkey], liqor_open_orders_pks: &[Pubkey], asset_type: AssetType, asset_index: usize, liab_type: AssetType, liab_index: usize, max_liab_transfer: I80F48, ) -> Result<Instruction, ProgramError> { let mut accounts = vec![ AccountMeta::new_readonly(*mango_group_pk, false), AccountMeta::new_readonly(*mango_cache_pk, false), AccountMeta::new(*liqee_mango_account_pk, false), AccountMeta::new(*liqor_mango_account_pk, false), AccountMeta::new_readonly(*liqor_pk, true), AccountMeta::new_readonly(*root_bank_pk, false), AccountMeta::new(*node_bank_pk, false), ]; accounts.extend(liqee_open_orders_pks.iter().map(|pk| AccountMeta::new_readonly(*pk, false))); accounts.extend(liqor_open_orders_pks.iter().map(|pk| AccountMeta::new_readonly(*pk, false))); let instr = MangoInstruction::LiquidateTokenAndPerp { asset_type, asset_index, liab_type, liab_index, max_liab_transfer, }; let data = instr.pack(); Ok(Instruction { program_id: *program_id, accounts, data }) } pub fn liquidate_perp_market( program_id: &Pubkey, mango_group_pk: &Pubkey, mango_cache_pk: &Pubkey, perp_market_pk: &Pubkey, event_queue_pk: &Pubkey, liqee_mango_account_pk: &Pubkey, liqor_mango_account_pk: &Pubkey, liqor_pk: &Pubkey, liqee_open_orders_pks: &[Pubkey], liqor_open_orders_pks: &[Pubkey], base_transfer_request: i64, ) -> Result<Instruction, ProgramError> { let mut accounts = vec![ AccountMeta::new_readonly(*mango_group_pk, false), AccountMeta::new_readonly(*mango_cache_pk, false), AccountMeta::new(*perp_market_pk, false), AccountMeta::new(*event_queue_pk, false), AccountMeta::new(*liqee_mango_account_pk, false), AccountMeta::new(*liqor_mango_account_pk, false), AccountMeta::new_readonly(*liqor_pk, true), ]; accounts.extend(liqee_open_orders_pks.iter().map(|pk| AccountMeta::new_readonly(*pk, false))); accounts.extend(liqor_open_orders_pks.iter().map(|pk| AccountMeta::new_readonly(*pk, false))); let instr = MangoInstruction::LiquidatePerpMarket { base_transfer_request }; let data = instr.pack(); Ok(Instruction { program_id: *program_id, accounts, data }) } pub fn change_spot_market_params( program_id: &Pubkey, mango_group_pk: &Pubkey, spot_market_pk: &Pubkey, root_bank_pk: &Pubkey, admin_pk: &Pubkey, maint_leverage: Option<I80F48>, init_leverage: Option<I80F48>, liquidation_fee: Option<I80F48>, optimal_util: Option<I80F48>, optimal_rate: Option<I80F48>, max_rate: Option<I80F48>, version: Option<u8>, ) -> Result<Instruction, ProgramError> { let accounts = vec![ AccountMeta::new(*mango_group_pk, false), AccountMeta::new(*spot_market_pk, false), AccountMeta::new(*root_bank_pk, false), AccountMeta::new_readonly(*admin_pk, true), ]; let instr = MangoInstruction::ChangeSpotMarketParams { maint_leverage, init_leverage, liquidation_fee, optimal_util, optimal_rate, max_rate, version, }; let data = instr.pack(); Ok(Instruction { program_id: *program_id, accounts, data }) } /// Serialize Option<T> as (bool, T). This gives the binary representation /// a fixed width, instead of it becoming one byte for None. fn serialize_option_fixed_width<S: serde::Serializer, T: Sized + Default + Serialize>( opt: &Option<T>, serializer: S, ) -> Result<S::Ok, S::Error> { use serde::ser::SerializeTuple; let mut tup = serializer.serialize_tuple(2)?; match opt { Some(value) => { tup.serialize_element(&true)?; tup.serialize_element(&value)?; } None => { tup.serialize_element(&false)?; tup.serialize_element(&T::default())?; } }; tup.end() }
38.438859
114
0.614676
fb9693146058a23eb9c66297ebd194f1ae82403e
7,181
// WARNING: This file was autogenerated by jni-bindgen. Any changes to this file may be lost!!! #[cfg(any(feature = "all", feature = "android-view-textclassifier-ConversationActions"))] __jni_bindgen! { /// public final class [ConversationActions](https://developer.android.com/reference/android/view/textclassifier/ConversationActions.html) /// /// Required feature: android-view-textclassifier-ConversationActions public final class ConversationActions ("android/view/textclassifier/ConversationActions") extends crate::java::lang::Object, implements crate::android::os::Parcelable { /// [ConversationActions](https://developer.android.com/reference/android/view/textclassifier/ConversationActions.html#ConversationActions(java.util.List,%20java.lang.String)) /// /// Required features: "java-lang-String", "java-util-List" #[cfg(any(feature = "all", all(feature = "java-lang-String", feature = "java-util-List")))] pub fn new<'env>(__jni_env: &'env __jni_bindgen::Env, arg0: impl __jni_bindgen::std::convert::Into<__jni_bindgen::std::option::Option<&'env crate::java::util::List>>, arg1: impl __jni_bindgen::std::convert::Into<__jni_bindgen::std::option::Option<&'env crate::java::lang::String>>) -> __jni_bindgen::std::result::Result<__jni_bindgen::Local<'env, crate::android::view::textclassifier::ConversationActions>, __jni_bindgen::Local<'env, crate::java::lang::Throwable>> { // class.path == "android/view/textclassifier/ConversationActions", java.flags == PUBLIC, .name == "<init>", .descriptor == "(Ljava/util/List;Ljava/lang/String;)V" unsafe { let __jni_args = [__jni_bindgen::AsJValue::as_jvalue(&arg0.into()), __jni_bindgen::AsJValue::as_jvalue(&arg1.into())]; let (__jni_class, __jni_method) = __jni_env.require_class_method("android/view/textclassifier/ConversationActions\0", "<init>\0", "(Ljava/util/List;Ljava/lang/String;)V\0"); __jni_env.new_object_a(__jni_class, __jni_method, __jni_args.as_ptr()) } } /// [describeContents](https://developer.android.com/reference/android/view/textclassifier/ConversationActions.html#describeContents()) pub fn describeContents<'env>(&'env self) -> __jni_bindgen::std::result::Result<i32, __jni_bindgen::Local<'env, crate::java::lang::Throwable>> { // class.path == "android/view/textclassifier/ConversationActions", java.flags == PUBLIC, .name == "describeContents", .descriptor == "()I" unsafe { let __jni_args = []; let __jni_env = __jni_bindgen::Env::from_ptr(self.0.env); let (__jni_class, __jni_method) = __jni_env.require_class_method("android/view/textclassifier/ConversationActions\0", "describeContents\0", "()I\0"); __jni_env.call_int_method_a(self.0.object, __jni_method, __jni_args.as_ptr()) } } /// [writeToParcel](https://developer.android.com/reference/android/view/textclassifier/ConversationActions.html#writeToParcel(android.os.Parcel,%20int)) /// /// Required features: "android-os-Parcel" #[cfg(any(feature = "all", all(feature = "android-os-Parcel")))] pub fn writeToParcel<'env>(&'env self, arg0: impl __jni_bindgen::std::convert::Into<__jni_bindgen::std::option::Option<&'env crate::android::os::Parcel>>, arg1: i32) -> __jni_bindgen::std::result::Result<(), __jni_bindgen::Local<'env, crate::java::lang::Throwable>> { // class.path == "android/view/textclassifier/ConversationActions", java.flags == PUBLIC, .name == "writeToParcel", .descriptor == "(Landroid/os/Parcel;I)V" unsafe { let __jni_args = [__jni_bindgen::AsJValue::as_jvalue(&arg0.into()), __jni_bindgen::AsJValue::as_jvalue(&arg1)]; let __jni_env = __jni_bindgen::Env::from_ptr(self.0.env); let (__jni_class, __jni_method) = __jni_env.require_class_method("android/view/textclassifier/ConversationActions\0", "writeToParcel\0", "(Landroid/os/Parcel;I)V\0"); __jni_env.call_void_method_a(self.0.object, __jni_method, __jni_args.as_ptr()) } } /// [getConversationActions](https://developer.android.com/reference/android/view/textclassifier/ConversationActions.html#getConversationActions()) /// /// Required features: "java-util-List" #[cfg(any(feature = "all", all(feature = "java-util-List")))] pub fn getConversationActions<'env>(&'env self) -> __jni_bindgen::std::result::Result<__jni_bindgen::std::option::Option<__jni_bindgen::Local<'env, crate::java::util::List>>, __jni_bindgen::Local<'env, crate::java::lang::Throwable>> { // class.path == "android/view/textclassifier/ConversationActions", java.flags == PUBLIC, .name == "getConversationActions", .descriptor == "()Ljava/util/List;" unsafe { let __jni_args = []; let __jni_env = __jni_bindgen::Env::from_ptr(self.0.env); let (__jni_class, __jni_method) = __jni_env.require_class_method("android/view/textclassifier/ConversationActions\0", "getConversationActions\0", "()Ljava/util/List;\0"); __jni_env.call_object_method_a(self.0.object, __jni_method, __jni_args.as_ptr()) } } /// [getId](https://developer.android.com/reference/android/view/textclassifier/ConversationActions.html#getId()) /// /// Required features: "java-lang-String" #[cfg(any(feature = "all", all(feature = "java-lang-String")))] pub fn getId<'env>(&'env self) -> __jni_bindgen::std::result::Result<__jni_bindgen::std::option::Option<__jni_bindgen::Local<'env, crate::java::lang::String>>, __jni_bindgen::Local<'env, crate::java::lang::Throwable>> { // class.path == "android/view/textclassifier/ConversationActions", java.flags == PUBLIC, .name == "getId", .descriptor == "()Ljava/lang/String;" unsafe { let __jni_args = []; let __jni_env = __jni_bindgen::Env::from_ptr(self.0.env); let (__jni_class, __jni_method) = __jni_env.require_class_method("android/view/textclassifier/ConversationActions\0", "getId\0", "()Ljava/lang/String;\0"); __jni_env.call_object_method_a(self.0.object, __jni_method, __jni_args.as_ptr()) } } /// **get** public static final [CREATOR](https://developer.android.com/reference/android/view/textclassifier/ConversationActions.html#CREATOR) /// /// Required feature: android-os-Parcelable_Creator #[cfg(any(feature = "all", feature = "android-os-Parcelable_Creator"))] pub fn CREATOR<'env>(env: &'env __jni_bindgen::Env) -> __jni_bindgen::std::option::Option<__jni_bindgen::Local<'env, crate::android::os::Parcelable_Creator>> { unsafe { let (class, field) = env.require_class_static_field("android/view/textclassifier/ConversationActions\0", "CREATOR\0", "Landroid/os/Parcelable$Creator;\0"); env.get_static_object_field(class, field) } } } }
80.685393
474
0.666202
6a3893d1636a45f0a56087c36b553bfeb8810138
6,340
/* Pyexpat builtin module * * */ use crate::vm::VirtualMachine; use crate::PyObjectRef; pub fn make_module(vm: &VirtualMachine) -> PyObjectRef { let module = _pyexpat::make_module(vm); extend_module!(vm, module, { "errors" => _errors::make_module(vm), "model" => _model::make_module(vm), }); module } macro_rules! create_property { ($ctx: expr, $attributes: expr, $name: expr, $element: ident) => { let attr = $ctx.new_getset( $name, move |this: &PyExpatLikeXmlParser| this.$element.read().clone(), move |this: &PyExpatLikeXmlParser, func: PyObjectRef| *this.$element.write() = func, ); $attributes.insert($name.to_owned(), attr); }; } #[pymodule(name = "pyexpat")] mod _pyexpat { use crate::builtins::{PyStr, PyStrRef, PyTypeRef}; use crate::byteslike::PyBytesLike; use crate::function::{IntoFuncArgs, OptionalArg}; use crate::pyobject::StaticType; use crate::{ ItemProtocol, PyContext, PyObjectRef, PyRef, PyResult, PyValue, TryFromObject, VirtualMachine, }; use rustpython_common::lock::PyRwLock; use std::io::Cursor; use xml::reader::XmlEvent; type MutableObject = PyRwLock<PyObjectRef>; #[pyattr] #[pyclass(name = "xmlparser", module = false)] #[derive(Debug)] pub struct PyExpatLikeXmlParser { start_element: MutableObject, end_element: MutableObject, character_data: MutableObject, entity_decl: MutableObject, buffer_text: MutableObject, } type PyExpatLikeXmlParserRef = PyRef<PyExpatLikeXmlParser>; impl PyValue for PyExpatLikeXmlParser { fn class(_vm: &VirtualMachine) -> &PyTypeRef { Self::static_type() } } #[inline] fn invoke_handler<T>(vm: &VirtualMachine, handler: &MutableObject, args: T) where T: IntoFuncArgs, { vm.invoke(&handler.read().clone(), args).ok(); } #[pyimpl] impl PyExpatLikeXmlParser { fn new(vm: &VirtualMachine) -> PyResult<PyExpatLikeXmlParserRef> { Ok(PyExpatLikeXmlParser { start_element: MutableObject::new(vm.ctx.none()), end_element: MutableObject::new(vm.ctx.none()), character_data: MutableObject::new(vm.ctx.none()), entity_decl: MutableObject::new(vm.ctx.none()), buffer_text: MutableObject::new(vm.ctx.new_bool(false)), } .into_ref(vm)) } #[extend_class] fn extend_class_with_fields(ctx: &PyContext, class: &PyTypeRef) { let mut attributes = class.attributes.write(); create_property!(ctx, attributes, "StartElementHandler", start_element); create_property!(ctx, attributes, "EndElementHandler", end_element); create_property!(ctx, attributes, "CharacterDataHandler", character_data); create_property!(ctx, attributes, "EntityDeclHandler", entity_decl); create_property!(ctx, attributes, "buffer_text", buffer_text); } fn create_config(&self) -> xml::ParserConfig { xml::ParserConfig::new() .cdata_to_characters(true) .coalesce_characters(false) .whitespace_to_characters(true) } fn do_parse<T>(&self, vm: &VirtualMachine, parser: xml::EventReader<T>) where T: std::io::Read, { for e in parser { match e { Ok(XmlEvent::StartElement { name, attributes, .. }) => { let dict = vm.ctx.new_dict(); for attribute in attributes { dict.set_item( attribute.name.local_name.as_str(), vm.ctx.new_str(attribute.value), vm, ) .unwrap(); } let name_str = PyStr::from(name.local_name).into_ref(vm); invoke_handler(vm, &self.start_element, (name_str, dict.into_object())); } Ok(XmlEvent::EndElement { name, .. }) => { let name_str = PyStr::from(name.local_name).into_ref(vm); invoke_handler(vm, &self.end_element, (name_str,)); } Ok(XmlEvent::Characters(chars)) => { let str = PyStr::from(chars).into_ref(vm); invoke_handler(vm, &self.character_data, (str,)); } _ => {} } } } #[pymethod(name = "Parse")] fn parse(&self, data: PyStrRef, _isfinal: OptionalArg<bool>, vm: &VirtualMachine) { let reader = Cursor::<Vec<u8>>::new(data.as_str().as_bytes().to_vec()); let parser = self.create_config().create_reader(reader); self.do_parse(vm, parser); } #[pymethod(name = "ParseFile")] fn parse_file(&self, file: PyObjectRef, vm: &VirtualMachine) -> PyResult<()> { // todo: read chunks at a time let read_res = vm.call_method(&file, "read", ())?; let bytes_like = PyBytesLike::try_from_object(vm, read_res)?; let buf = bytes_like.borrow_buf().to_vec(); let reader = Cursor::new(buf); let parser = self.create_config().create_reader(reader); self.do_parse(vm, parser); // todo: return value Ok(()) } } #[derive(FromArgs)] #[allow(dead_code)] struct ParserCreateArgs { #[pyarg(any, optional)] encoding: OptionalArg<PyStrRef>, #[pyarg(any, optional)] namespace_separator: OptionalArg<PyStrRef>, #[pyarg(any, optional)] intern: OptionalArg<PyStrRef>, } #[pyfunction(name = "ParserCreate")] fn parser_create( _args: ParserCreateArgs, vm: &VirtualMachine, ) -> PyResult<PyExpatLikeXmlParserRef> { PyExpatLikeXmlParser::new(vm) } } #[pymodule(name = "model")] mod _model {} #[pymodule(name = "errors")] mod _errors {}
33.903743
96
0.549527
e22831bcaa81e811f1965398356a6098955256e1
8,970
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! A clone of the standard library's `RefCell` type with extra debugging //! support in non-release builds. //! //! Whenever a borrow error happens the current //! locations of where known borrows were created will be printed out as well. //! //! # Examples //! //! ```no_run //! use debug_cell::RefCell; //! //! let r = RefCell::new(3); //! let a = r.borrow(); //! //! // In debug builds this will print that the cell is currently borrowed //! // above, and in release builds it will behave the same as the standard //! // library's `RefCell` //! let b = r.borrow_mut(); //! ``` #![deny(missing_docs)] #[cfg(debug_assertions)] extern crate backtrace; #[cfg(debug_assertions)] use std::cell::RefCell as StdRefCell; use std::cell::{Cell, UnsafeCell}; use std::ops::{Deref, DerefMut}; /// A clone of the standard library's `RefCell` type. pub struct RefCell<T: ?Sized> { borrow: BorrowFlag, value: UnsafeCell<T>, } #[cfg(not(debug_assertions))] type Location = (); #[cfg(debug_assertions)] type Location = backtrace::Backtrace; /// An enumeration of values returned from the `state` method on a `RefCell<T>`. #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum BorrowState { /// The cell is currently being read, there is at least one active `borrow`. Reading, /// The cell is currently being written to, there is an active `borrow_mut`. Writing, /// There are no outstanding borrows on this cell. Unused, } // Values [1, MAX-1] represent the number of `Ref` active // (will not outgrow its range since `usize` is the size of the address space) struct BorrowFlag { flag: Cell<usize>, #[cfg(debug_assertions)] locations: StdRefCell<Vec<Location>>, } const UNUSED: usize = 0; const WRITING: usize = !0; impl<T> RefCell<T> { /// Creates a new `RefCell` containing `value`. pub fn new(value: T) -> RefCell<T> { RefCell { borrow: BorrowFlag::new(), value: UnsafeCell::new(value), } } /// Consumes the `RefCell`, returning the wrapped value. pub fn into_inner(self) -> T { debug_assert!(self.borrow.flag.get() == UNUSED); unsafe { self.value.into_inner() } } } impl<T: ?Sized> RefCell<T> { /// Immutably borrows the wrapped value. /// /// The borrow lasts until the returned `Ref` exits scope. Multiple /// immutable borrows can be taken out at the same time. /// /// # Panics /// /// Panics if the value is currently mutably borrowed. #[cfg_attr(debug_assertions, inline(never))] pub fn borrow<'a>(&'a self) -> Ref<'a, T> { match BorrowRef::new(&self.borrow) { Some(b) => Ref { _value: unsafe { &*self.value.get() }, _borrow: b, }, None => self.panic("mutably borrowed"), } } /// Mutably borrows the wrapped value. /// /// The borrow lasts until the returned `RefMut` exits scope. The value /// cannot be borrowed while this borrow is active. /// /// # Panics /// /// Panics if the value is currently borrowed. #[cfg_attr(debug_assertions, inline(never))] pub fn borrow_mut<'a>(&'a self) -> RefMut<'a, T> { match BorrowRefMut::new(&self.borrow) { Some(b) => RefMut { _value: unsafe { &mut *self.value.get() }, _borrow: b, }, None => self.panic("borrowed"), } } #[cfg(not(debug_assertions))] fn panic(&self, msg: &str) -> ! { panic!("RefCell<T> already {}", msg) } #[cfg(debug_assertions)] #[allow(unused_must_use)] fn panic(&self, msg: &str) -> ! { let mut msg = format!("RefCell<T> already {}", msg); let locations = self.borrow.locations.borrow(); if locations.len() > 0 { msg.push_str("\ncurrent active borrows: \n"); for b in locations.iter() { msg.push_str(&format!("-------------------------\n{:?}\n", b)); } msg.push_str("\n\n"); } panic!(msg) } } #[cfg(not(debug_assertions))] impl BorrowFlag { #[inline] fn new() -> BorrowFlag { BorrowFlag { flag: Cell::new(UNUSED) } } #[inline] fn push(&self, _caller: Location) {} #[inline] fn pop(&self) {} } #[cfg(debug_assertions)] impl BorrowFlag { fn new() -> BorrowFlag { BorrowFlag { flag: Cell::new(UNUSED), locations: StdRefCell::new(Vec::new()), } } fn push(&self, caller: Location) { self.locations.borrow_mut().push(caller); } fn pop(&self) { self.locations.borrow_mut().pop(); } } #[cfg(not(debug_assertions))] #[inline] fn get_caller() -> Location {} #[inline(never)] #[cfg(debug_assertions)] fn get_caller() -> Location { backtrace::Backtrace::new() } unsafe impl<T: ?Sized> Send for RefCell<T> where T: Send {} impl<T: Clone> Clone for RefCell<T> { #[inline] fn clone(&self) -> RefCell<T> { RefCell::new(self.borrow().clone()) } } impl<T:Default> Default for RefCell<T> { #[inline] fn default() -> RefCell<T> { RefCell::new(Default::default()) } } impl<T: ?Sized + PartialEq> PartialEq for RefCell<T> { #[inline] fn eq(&self, other: &RefCell<T>) -> bool { *self.borrow() == *other.borrow() } } impl<T: ?Sized + Eq> Eq for RefCell<T> {} struct BorrowRef<'b> { borrow: &'b BorrowFlag, } impl<'b> BorrowRef<'b> { #[cfg_attr(debug_assertions, inline(never))] #[cfg_attr(not(debug_assertions), inline)] fn new(borrow: &'b BorrowFlag) -> Option<BorrowRef<'b>> { let flag = borrow.flag.get(); if flag == WRITING { return None } borrow.flag.set(flag + 1); borrow.push(get_caller()); Some(BorrowRef { borrow: borrow }) } } impl<'b> Drop for BorrowRef<'b> { #[inline] fn drop(&mut self) { let flag = self.borrow.flag.get(); debug_assert!(flag != WRITING && flag != UNUSED); self.borrow.flag.set(flag - 1); self.borrow.pop(); } } /// Wraps a borrowed reference to a value in a `RefCell` box. /// A wrapper type for an immutably borrowed value from a `RefCell<T>`. /// /// See the [module-level documentation](index.html) for more. pub struct Ref<'b, T: ?Sized + 'b> { // FIXME #12808: strange name to try to avoid interfering with // field accesses of the contained type via Deref _value: &'b T, _borrow: BorrowRef<'b>, } impl<'b, T: ?Sized> Deref for Ref<'b, T> { type Target = T; fn deref(&self) -> &T { self._value } } struct BorrowRefMut<'b> { borrow: &'b BorrowFlag, } impl<'b> BorrowRefMut<'b> { #[cfg_attr(debug_assertions, inline(never))] #[cfg_attr(not(debug_assertions), inline)] fn new(borrow: &'b BorrowFlag) -> Option<BorrowRefMut<'b>> { if borrow.flag.get() != UNUSED { return None } borrow.flag.set(WRITING); borrow.push(get_caller()); Some(BorrowRefMut { borrow: borrow }) } } impl<'b> Drop for BorrowRefMut<'b> { #[inline] fn drop(&mut self) { debug_assert!(self.borrow.flag.get() == WRITING); self.borrow.flag.set(UNUSED); self.borrow.pop(); } } /// A wrapper type for a mutably borrowed value from a `RefCell<T>`. pub struct RefMut<'b, T: ?Sized + 'b> { // FIXME #12808: strange name to try to avoid interfering with // field accesses of the contained type via Deref _value: &'b mut T, _borrow: BorrowRefMut<'b>, } impl<'b, T: ?Sized> Deref for RefMut<'b, T> { type Target = T; fn deref(&self) -> &T { self._value } } impl<'b, T: ?Sized> DerefMut for RefMut<'b, T> { fn deref_mut(&mut self) -> &mut T { self._value } } #[cfg(test)] mod tests { use super::RefCell; #[test] fn ok_borrows() { let a = RefCell::new(2); let b = a.borrow(); let c = a.borrow(); assert_eq!(*b, 2); assert_eq!(*c, 2); drop((b, c)); let mut b = a.borrow_mut(); assert_eq!(*b, 2); *b = 4; drop(b); assert_eq!(*a.borrow(), 4); } #[should_panic] #[test] fn bad_borrow_mut() { let a = RefCell::new(2); let _a = a.borrow(); a.borrow_mut(); } #[should_panic] #[test] fn bad_borrow() { let a = RefCell::new(2); let _a = a.borrow_mut(); a.borrow(); } }
25.702006
80
0.581271
f4ca2327970278ad65307d52c97d205456712284
384
use super::*; use assert2::assert; type Range<const START: usize, const END: usize> = RiUsize<START, END>; #[test] fn non_empty_range_returns_expected_value() { // Given const MIN: usize = 0; const MAX: usize = 11; type Sut = Range<MIN, MAX>; let expected = false; // When let result = Sut::is_empty(); // Then assert!(result == expected); }
17.454545
71
0.614583
ff6ae2b3a28c5d7d9a8470bda965645b4978def4
576
use bytes::*; use crate::management::interface::command::Command; use crate::management::interface::controller::Controller; #[derive(Debug)] pub struct Request { pub opcode: Command, pub controller: Controller, pub param: Bytes, } impl Into<Bytes> for Request { fn into(self) -> Bytes { let mut buf = BytesMut::with_capacity(6 + self.param.len()); buf.put_u16_le(self.opcode as u16); buf.put_u16_le(self.controller.into()); buf.put_u16_le(self.param.len() as u16); buf.put(self.param); buf.freeze() } }
23.04
68
0.640625
edc2e6e8423796f3b0aabf96cf472407b512fba5
9,060
#[cfg(feature = "flow-ctrl")] pub(super) mod flow_manager; #[cfg(feature = "flow-ctrl")] use super::AsyncLock; #[cfg(feature = "flow-ctrl")] use super::HostControllerInterface; use super::{common, HciAclDataInterface, HostInterface}; use crate::l2cap::{AclData, AclDataFragment, ConnectionChannel}; use alloc::vec::Vec; #[cfg(feature = "flow-ctrl")] use core::task::Waker; use core::{ future::Future, ops::Deref, pin::Pin, task::{Context, Poll}, }; /// A HCI channel for a LE-U Logical Link /// /// This is a HCI connection channel over L2CAP. It is only for a L2CAP LE-U logical link as it does /// not support an ACL-U link. The default configuration for a LE-U logical link will be used for /// data sent and received through this channel. This configuration cannot be changed as there is /// no attached flow controller. The user of this channel must be aware of both the controllers /// maximum HCI data packet size and the amount of packets sent to the HCI LE data buffer (or the /// shared with BR/EDR data buffer if there is no LE only data buffer). #[bo_tie_macros::host_interface] pub(super) struct HciLeUChannel<I, HI> where HI: Deref<Target = HostInterface<I>>, I: HciAclDataInterface, { mtu: core::cell::Cell<usize>, maximum_mtu: usize, minimum_mtu: usize, handle: common::ConnectionHandle, hi: HI, } #[cfg(not(feature = "flow-ctrl"))] impl<I, HI> HciLeUChannel<I, HI> where HI: Deref<Target = HostInterface<I>>, I: HciAclDataInterface, { /// Create a new raw `HciLeUChannel` /// /// This HciLeUChannel provides no flow control of sent data to the controller. It up to the /// user to make sure that the host does not send either to large of data packets or to many /// data packets to the controller. pub fn new_raw<T>(hi: HI, handle: common::ConnectionHandle, maximum_mtu: T) -> Self where T: Into<Option<u16>>, { use crate::l2cap::MinimumMtu; let max_mtu: usize = maximum_mtu .into() .map(|mtu| mtu.into()) .unwrap_or(crate::l2cap::LeU::MIN_MTU); assert!(max_mtu >= crate::l2cap::LeU::MIN_MTU); hi.interface.start_receiver(handle); HciLeUChannel { mtu: crate::l2cap::LeU::MIN_MTU.into(), maximum_mtu: max_mtu, minimum_mtu: crate::l2cap::LeU::MIN_MTU.into(), handle, hi, } } } #[bo_tie_macros::host_interface] impl<I, HI> HciLeUChannel<I, HI> where HI: Deref<Target = HostInterface<I>>, I: HciAclDataInterface, Self: crate::l2cap::ConnectionChannel, { /// Get the MTU for a specified data packet /// /// Data packets can have a different MTU based on the request to use a specified MTU by `data`. fn get_send_mtu(&self, data: &AclData) -> usize { match data.get_mtu() { crate::l2cap::AclDataSuggestedMtu::Minimum => self.min_mtu(), crate::l2cap::AclDataSuggestedMtu::Channel => self.get_mtu(), crate::l2cap::AclDataSuggestedMtu::Mtu(mtu) => self.get_mtu().min(mtu).max(self.min_mtu()), } } } /// The 'raw' connection channel implementation /// /// This implementation uses a [`RawSender`](RawSender) for `SendFut`, which provides no flow /// control on the number of packets that can be sent to the controller (from the host). However, /// the packet size is limited to the minimum size for the type of connection channel (either LE /// or ACL) #[cfg(not(feature = "flow-ctrl"))] impl<I, HI> crate::l2cap::ConnectionChannel for HciLeUChannel<I, HI> where HI: Deref<Target = HostInterface<I>>, I: HciAclDataInterface, { type SendFut = RawSender; type SendFutErr = (); fn send(&self, data: AclData) -> Self::SendFut { use crate::hci::{AclBroadcastFlag, AclPacketBoundary, HciAclData}; let mtu = self.get_send_mtu(&data); let packet = data.into_raw_data(); packet .chunks(mtu + HciAclData::HEADER_SIZE) .enumerate() .for_each(|(i, chunk)| { let hci_acl_data = if i == 0 { HciAclData::new( self.handle, AclPacketBoundary::FirstNonFlushable, AclBroadcastFlag::NoBroadcast, chunk.to_vec(), ) } else { HciAclData::new( self.handle, AclPacketBoundary::ContinuingFragment, AclBroadcastFlag::NoBroadcast, chunk.to_vec(), ) }; self.hi .interface .send(hci_acl_data) .expect("Failed to send hci acl data"); }); RawSender } fn set_mtu(&self, mtu: u16) { self.mtu.set(<usize>::from(mtu).max(self.min_mtu()).min(self.max_mtu())); } fn get_mtu(&self) -> usize { self.mtu.get() } fn max_mtu(&self) -> usize { self.maximum_mtu } fn min_mtu(&self) -> usize { self.minimum_mtu } fn receive(&self, waker: &core::task::Waker) -> Option<alloc::vec::Vec<crate::l2cap::AclDataFragment>> { self.hi .interface .receive(&self.handle, waker) .and_then(|received| match received { Ok(packets) => packets .into_iter() .map(|packet| packet.into_acl_fragment()) .collect::<Vec<AclDataFragment>>() .into(), Err(e) => { log::error!("Failed to receive data: {}", e); Vec::new().into() } }) } } #[cfg(feature = "flow-ctrl")] impl<I, HI, M> HciLeUChannel<I, HI, M> where HI: Deref<Target = HostInterface<I, M>>, I: HostControllerInterface + HciAclDataInterface + 'static, M: for<'a> AsyncLock<'a>, { /// Create a new `HciLeUChannel` from the `HciDataPacketFlowManager` of a `HostInterface` for /// LE-U /// /// This flow controller is attached to the flow manager of the `hi` input. The other inputs /// are the connection handle for the connection and a maximum mtu value. /// /// # Note /// No validation is made for the value of `maximum_mtu`. pub fn new_le_flow_controller(hi: HI, handle: common::ConnectionHandle, maximum_mtu: usize) -> Self { use crate::l2cap::MinimumMtu; Self { mtu: crate::l2cap::LeU::MIN_MTU.into(), maximum_mtu: maximum_mtu.into(), minimum_mtu: crate::l2cap::LeU::MIN_MTU, handle, hi, } } } #[cfg(feature = "flow-ctrl")] impl<I, HI, M> crate::l2cap::ConnectionChannel for HciLeUChannel<I, HI, M> where HI: Deref<Target = HostInterface<I, M>> + Unpin + Clone + 'static, I: HciAclDataInterface + HostControllerInterface + Unpin + 'static, M: for<'a> AsyncLock<'a>, { type SendFut = flow_manager::SendFuture<HI, I>; type SendFutErr = flow_manager::FlowControllerError<I>; fn send(&self, data: AclData) -> Self::SendFut { let max_fc_mtu = self.hi.flow_controller.get_max_payload_size() - AclData::HEADER_SIZE; flow_manager::SendFuture::new( self.hi.clone(), self.get_send_mtu(&data).min(max_fc_mtu), data, self.handle, ) } fn set_mtu(&self, mtu: u16) { self.mtu.set(<usize>::from(mtu).max(self.min_mtu()).min(self.max_mtu())); } fn get_mtu(&self) -> usize { self.mtu.get() } fn max_mtu(&self) -> usize { self.maximum_mtu } fn min_mtu(&self) -> usize { self.minimum_mtu } fn receive(&self, waker: &Waker) -> Option<Vec<AclDataFragment>> { self.hi .interface .receive(&self.handle, waker) .and_then(|received| match received { Ok(packets) => packets .into_iter() .map(|packet| packet.into_acl_fragment()) .collect::<Vec<AclDataFragment>>() .into(), Err(e) => { log::error!("Failed to receive data: {}", e); Vec::new().into() } }) } } #[bo_tie_macros::host_interface] impl<I, HI> core::ops::Drop for HciLeUChannel<I, HI> where HI: Deref<Target = HostInterface<I>>, I: HciAclDataInterface, { fn drop(&mut self) { self.hi.interface.stop_receiver(&self.handle) } } /// A 'raw' sender /// /// This is used for the sending future in a raw connection channel. This sender provides no flow /// control capabilities and can either overflow or pub(super) struct RawSender; impl Future for RawSender { type Output = Result<(), ()>; fn poll(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Self::Output> { Poll::Ready(Ok(())) } }
31.027397
108
0.578146
18a11e772088f2515f703210e7b3d1dd1f91689f
41,946
// Generated from definition io.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIService /// APIService represents a server for a particular GroupVersion. Name must be "version.group". #[derive(Clone, Debug, Default, PartialEq)] pub struct APIService { pub metadata: Option<crate::v1_9::apimachinery::pkg::apis::meta::v1::ObjectMeta>, /// Spec contains information for locating and communicating with a server pub spec: Option<crate::v1_9::kube_aggregator::pkg::apis::apiregistration::v1beta1::APIServiceSpec>, /// Status contains derived information about an API server pub status: Option<crate::v1_9::kube_aggregator::pkg::apis::apiregistration::v1beta1::APIServiceStatus>, } // Begin apiregistration.k8s.io/v1beta1/APIService // Generated from operation createApiregistrationV1beta1APIService impl APIService { /// create an APIService /// /// Use the returned [`crate::ResponseBody`]`<`[`CreateAPIServiceResponse`]`>` constructor, or [`CreateAPIServiceResponse`] directly, to parse the HTTP response. /// /// # Arguments /// /// * `body` /// /// * `optional` /// /// Optional parameters. Use `Default::default()` to not pass any. #[cfg(feature = "api")] pub fn create_api_service( body: &crate::v1_9::kube_aggregator::pkg::apis::apiregistration::v1beta1::APIService, optional: CreateAPIServiceOptional<'_>, ) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<CreateAPIServiceResponse>), crate::RequestError> { let CreateAPIServiceOptional { pretty, } = optional; let __url = "/apis/apiregistration.k8s.io/v1beta1/apiservices?".to_owned(); let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url); if let Some(pretty) = pretty { __query_pairs.append_pair("pretty", pretty); } let __url = __query_pairs.finish(); let mut __request = http::Request::post(__url); let __body = serde_json::to_vec(body).map_err(crate::RequestError::Json)?; __request.header(http::header::CONTENT_TYPE, http::header::HeaderValue::from_static("application/json")); match __request.body(__body) { Ok(request) => Ok((request, crate::ResponseBody::new)), Err(err) => Err(crate::RequestError::Http(err)), } } } /// Optional parameters of [`APIService::create_api_service`] #[cfg(feature = "api")] #[derive(Clone, Copy, Debug, Default)] pub struct CreateAPIServiceOptional<'a> { /// If 'true', then the output is pretty printed. pub pretty: Option<&'a str>, } /// Use `<CreateAPIServiceResponse as Response>::try_from_parts` to parse the HTTP response body of [`APIService::create_api_service`] #[cfg(feature = "api")] #[derive(Debug)] pub enum CreateAPIServiceResponse { Ok(crate::v1_9::kube_aggregator::pkg::apis::apiregistration::v1beta1::APIService), Created(crate::v1_9::kube_aggregator::pkg::apis::apiregistration::v1beta1::APIService), Accepted(crate::v1_9::kube_aggregator::pkg::apis::apiregistration::v1beta1::APIService), Other(Result<Option<serde_json::Value>, serde_json::Error>), } #[cfg(feature = "api")] impl crate::Response for CreateAPIServiceResponse { fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> { match status_code { http::StatusCode::OK => { let result = match serde_json::from_slice(buf) { Ok(value) => value, Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => return Err(crate::ResponseError::Json(err)), }; Ok((CreateAPIServiceResponse::Ok(result), buf.len())) }, http::StatusCode::CREATED => { let result = match serde_json::from_slice(buf) { Ok(value) => value, Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => return Err(crate::ResponseError::Json(err)), }; Ok((CreateAPIServiceResponse::Created(result), buf.len())) }, http::StatusCode::ACCEPTED => { let result = match serde_json::from_slice(buf) { Ok(value) => value, Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => return Err(crate::ResponseError::Json(err)), }; Ok((CreateAPIServiceResponse::Accepted(result), buf.len())) }, _ => { let (result, read) = if buf.is_empty() { (Ok(None), 0) } else { match serde_json::from_slice(buf) { Ok(value) => (Ok(Some(value)), buf.len()), Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => (Err(err), 0), } }; Ok((CreateAPIServiceResponse::Other(result), read)) }, } } } // Generated from operation deleteApiregistrationV1beta1APIService impl APIService { /// delete an APIService /// /// Use the returned [`crate::ResponseBody`]`<`[`DeleteAPIServiceResponse`]`>` constructor, or [`DeleteAPIServiceResponse`] directly, to parse the HTTP response. /// /// # Arguments /// /// * `name` /// /// name of the APIService /// /// * `optional` /// /// Optional parameters. Use `Default::default()` to not pass any. #[cfg(feature = "api")] pub fn delete_api_service( name: &str, optional: crate::v1_9::DeleteOptional<'_>, ) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<DeleteAPIServiceResponse>), crate::RequestError> { let __url = format!("/apis/apiregistration.k8s.io/v1beta1/apiservices/{name}", name = crate::percent_encoding::percent_encode(name.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET), ); let mut __request = http::Request::delete(__url); let __body = serde_json::to_vec(&optional).map_err(crate::RequestError::Json)?; __request.header(http::header::CONTENT_TYPE, http::header::HeaderValue::from_static("application/json")); match __request.body(__body) { Ok(request) => Ok((request, crate::ResponseBody::new)), Err(err) => Err(crate::RequestError::Http(err)), } } } /// Use `<DeleteAPIServiceResponse as Response>::try_from_parts` to parse the HTTP response body of [`APIService::delete_api_service`] #[cfg(feature = "api")] #[derive(Debug)] pub enum DeleteAPIServiceResponse { OkStatus(crate::v1_9::apimachinery::pkg::apis::meta::v1::Status), OkValue(crate::v1_9::kube_aggregator::pkg::apis::apiregistration::v1beta1::APIService), Other(Result<Option<serde_json::Value>, serde_json::Error>), } #[cfg(feature = "api")] impl crate::Response for DeleteAPIServiceResponse { fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> { match status_code { http::StatusCode::OK => { let result: serde_json::Map<String, serde_json::Value> = match serde_json::from_slice(buf) { Ok(value) => value, Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => return Err(crate::ResponseError::Json(err)), }; let is_status = match result.get("kind") { Some(serde_json::Value::String(s)) if s == "Status" => true, _ => false, }; if is_status { let result = serde::Deserialize::deserialize(serde_json::Value::Object(result)); let result = result.map_err(crate::ResponseError::Json)?; Ok((DeleteAPIServiceResponse::OkStatus(result), buf.len())) } else { let result = serde::Deserialize::deserialize(serde_json::Value::Object(result)); let result = result.map_err(crate::ResponseError::Json)?; Ok((DeleteAPIServiceResponse::OkValue(result), buf.len())) } }, _ => { let (result, read) = if buf.is_empty() { (Ok(None), 0) } else { match serde_json::from_slice(buf) { Ok(value) => (Ok(Some(value)), buf.len()), Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => (Err(err), 0), } }; Ok((DeleteAPIServiceResponse::Other(result), read)) }, } } } // Generated from operation deleteApiregistrationV1beta1CollectionAPIService impl APIService { /// delete collection of APIService /// /// Use the returned [`crate::ResponseBody`]`<`[`DeleteCollectionAPIServiceResponse`]`>` constructor, or [`DeleteCollectionAPIServiceResponse`] directly, to parse the HTTP response. /// /// # Arguments /// /// * `delete_optional` /// /// Delete options. Use `Default::default()` to not pass any. /// /// * `list_optional` /// /// List options. Use `Default::default()` to not pass any. #[cfg(feature = "api")] pub fn delete_collection_api_service( delete_optional: crate::v1_9::DeleteOptional<'_>, list_optional: crate::v1_9::ListOptional<'_>, ) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<DeleteCollectionAPIServiceResponse>), crate::RequestError> { let __url = "/apis/apiregistration.k8s.io/v1beta1/apiservices?".to_owned(); let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url); list_optional.__serialize(&mut __query_pairs); let __url = __query_pairs.finish(); let mut __request = http::Request::delete(__url); let __body = serde_json::to_vec(&delete_optional).map_err(crate::RequestError::Json)?; __request.header(http::header::CONTENT_TYPE, http::header::HeaderValue::from_static("application/json")); match __request.body(__body) { Ok(request) => Ok((request, crate::ResponseBody::new)), Err(err) => Err(crate::RequestError::Http(err)), } } } /// Use `<DeleteCollectionAPIServiceResponse as Response>::try_from_parts` to parse the HTTP response body of [`APIService::delete_collection_api_service`] #[cfg(feature = "api")] #[derive(Debug)] pub enum DeleteCollectionAPIServiceResponse { OkStatus(crate::v1_9::apimachinery::pkg::apis::meta::v1::Status), OkValue(crate::v1_9::kube_aggregator::pkg::apis::apiregistration::v1beta1::APIServiceList), Other(Result<Option<serde_json::Value>, serde_json::Error>), } #[cfg(feature = "api")] impl crate::Response for DeleteCollectionAPIServiceResponse { fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> { match status_code { http::StatusCode::OK => { let result: serde_json::Map<String, serde_json::Value> = match serde_json::from_slice(buf) { Ok(value) => value, Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => return Err(crate::ResponseError::Json(err)), }; let is_status = match result.get("kind") { Some(serde_json::Value::String(s)) if s == "Status" => true, _ => false, }; if is_status { let result = serde::Deserialize::deserialize(serde_json::Value::Object(result)); let result = result.map_err(crate::ResponseError::Json)?; Ok((DeleteCollectionAPIServiceResponse::OkStatus(result), buf.len())) } else { let result = serde::Deserialize::deserialize(serde_json::Value::Object(result)); let result = result.map_err(crate::ResponseError::Json)?; Ok((DeleteCollectionAPIServiceResponse::OkValue(result), buf.len())) } }, _ => { let (result, read) = if buf.is_empty() { (Ok(None), 0) } else { match serde_json::from_slice(buf) { Ok(value) => (Ok(Some(value)), buf.len()), Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => (Err(err), 0), } }; Ok((DeleteCollectionAPIServiceResponse::Other(result), read)) }, } } } // Generated from operation listApiregistrationV1beta1APIService impl APIService { /// list or watch objects of kind APIService /// /// This operation only supports listing all items of this type. /// /// Use the returned [`crate::ResponseBody`]`<`[`ListAPIServiceResponse`]`>` constructor, or [`ListAPIServiceResponse`] directly, to parse the HTTP response. /// /// # Arguments /// /// * `optional` /// /// Optional parameters. Use `Default::default()` to not pass any. #[cfg(feature = "api")] pub fn list_api_service( optional: crate::v1_9::ListOptional<'_>, ) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<ListAPIServiceResponse>), crate::RequestError> { let __url = "/apis/apiregistration.k8s.io/v1beta1/apiservices?".to_owned(); let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url); optional.__serialize(&mut __query_pairs); let __url = __query_pairs.finish(); let mut __request = http::Request::get(__url); let __body = vec![]; match __request.body(__body) { Ok(request) => Ok((request, crate::ResponseBody::new)), Err(err) => Err(crate::RequestError::Http(err)), } } } /// Use `<ListAPIServiceResponse as Response>::try_from_parts` to parse the HTTP response body of [`APIService::list_api_service`] #[cfg(feature = "api")] #[derive(Debug)] pub enum ListAPIServiceResponse { Ok(crate::v1_9::kube_aggregator::pkg::apis::apiregistration::v1beta1::APIServiceList), Other(Result<Option<serde_json::Value>, serde_json::Error>), } #[cfg(feature = "api")] impl crate::Response for ListAPIServiceResponse { fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> { match status_code { http::StatusCode::OK => { let result = match serde_json::from_slice(buf) { Ok(value) => value, Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => return Err(crate::ResponseError::Json(err)), }; Ok((ListAPIServiceResponse::Ok(result), buf.len())) }, _ => { let (result, read) = if buf.is_empty() { (Ok(None), 0) } else { match serde_json::from_slice(buf) { Ok(value) => (Ok(Some(value)), buf.len()), Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => (Err(err), 0), } }; Ok((ListAPIServiceResponse::Other(result), read)) }, } } } // Generated from operation patchApiregistrationV1beta1APIService impl APIService { /// partially update the specified APIService /// /// Use the returned [`crate::ResponseBody`]`<`[`PatchAPIServiceResponse`]`>` constructor, or [`PatchAPIServiceResponse`] directly, to parse the HTTP response. /// /// # Arguments /// /// * `name` /// /// name of the APIService /// /// * `body` /// /// * `optional` /// /// Optional parameters. Use `Default::default()` to not pass any. #[cfg(feature = "api")] pub fn patch_api_service( name: &str, body: &crate::v1_9::apimachinery::pkg::apis::meta::v1::Patch, optional: crate::v1_9::PatchOptional<'_>, ) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<PatchAPIServiceResponse>), crate::RequestError> { let __url = format!("/apis/apiregistration.k8s.io/v1beta1/apiservices/{name}?", name = crate::percent_encoding::percent_encode(name.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET), ); let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url); optional.__serialize(&mut __query_pairs); let __url = __query_pairs.finish(); let mut __request = http::Request::patch(__url); let __body = serde_json::to_vec(body).map_err(crate::RequestError::Json)?; __request.header(http::header::CONTENT_TYPE, http::header::HeaderValue::from_static(match body { crate::v1_9::apimachinery::pkg::apis::meta::v1::Patch::Json(_) => "application/json-patch+json", crate::v1_9::apimachinery::pkg::apis::meta::v1::Patch::Merge(_) => "application/merge-patch+json", crate::v1_9::apimachinery::pkg::apis::meta::v1::Patch::StrategicMerge(_) => "application/strategic-merge-patch+json", })); match __request.body(__body) { Ok(request) => Ok((request, crate::ResponseBody::new)), Err(err) => Err(crate::RequestError::Http(err)), } } } /// Use `<PatchAPIServiceResponse as Response>::try_from_parts` to parse the HTTP response body of [`APIService::patch_api_service`] #[cfg(feature = "api")] #[derive(Debug)] pub enum PatchAPIServiceResponse { Ok(crate::v1_9::kube_aggregator::pkg::apis::apiregistration::v1beta1::APIService), Other(Result<Option<serde_json::Value>, serde_json::Error>), } #[cfg(feature = "api")] impl crate::Response for PatchAPIServiceResponse { fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> { match status_code { http::StatusCode::OK => { let result = match serde_json::from_slice(buf) { Ok(value) => value, Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => return Err(crate::ResponseError::Json(err)), }; Ok((PatchAPIServiceResponse::Ok(result), buf.len())) }, _ => { let (result, read) = if buf.is_empty() { (Ok(None), 0) } else { match serde_json::from_slice(buf) { Ok(value) => (Ok(Some(value)), buf.len()), Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => (Err(err), 0), } }; Ok((PatchAPIServiceResponse::Other(result), read)) }, } } } // Generated from operation readApiregistrationV1beta1APIService impl APIService { /// read the specified APIService /// /// Use the returned [`crate::ResponseBody`]`<`[`ReadAPIServiceResponse`]`>` constructor, or [`ReadAPIServiceResponse`] directly, to parse the HTTP response. /// /// # Arguments /// /// * `name` /// /// name of the APIService /// /// * `optional` /// /// Optional parameters. Use `Default::default()` to not pass any. #[cfg(feature = "api")] pub fn read_api_service( name: &str, optional: ReadAPIServiceOptional<'_>, ) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<ReadAPIServiceResponse>), crate::RequestError> { let ReadAPIServiceOptional { exact, export, pretty, } = optional; let __url = format!("/apis/apiregistration.k8s.io/v1beta1/apiservices/{name}?", name = crate::percent_encoding::percent_encode(name.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET), ); let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url); if let Some(exact) = exact { __query_pairs.append_pair("exact", &exact.to_string()); } if let Some(export) = export { __query_pairs.append_pair("export", &export.to_string()); } if let Some(pretty) = pretty { __query_pairs.append_pair("pretty", pretty); } let __url = __query_pairs.finish(); let mut __request = http::Request::get(__url); let __body = vec![]; match __request.body(__body) { Ok(request) => Ok((request, crate::ResponseBody::new)), Err(err) => Err(crate::RequestError::Http(err)), } } } /// Optional parameters of [`APIService::read_api_service`] #[cfg(feature = "api")] #[derive(Clone, Copy, Debug, Default)] pub struct ReadAPIServiceOptional<'a> { /// Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. pub exact: Option<bool>, /// Should this value be exported. Export strips fields that a user can not specify. pub export: Option<bool>, /// If 'true', then the output is pretty printed. pub pretty: Option<&'a str>, } /// Use `<ReadAPIServiceResponse as Response>::try_from_parts` to parse the HTTP response body of [`APIService::read_api_service`] #[cfg(feature = "api")] #[derive(Debug)] pub enum ReadAPIServiceResponse { Ok(crate::v1_9::kube_aggregator::pkg::apis::apiregistration::v1beta1::APIService), Other(Result<Option<serde_json::Value>, serde_json::Error>), } #[cfg(feature = "api")] impl crate::Response for ReadAPIServiceResponse { fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> { match status_code { http::StatusCode::OK => { let result = match serde_json::from_slice(buf) { Ok(value) => value, Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => return Err(crate::ResponseError::Json(err)), }; Ok((ReadAPIServiceResponse::Ok(result), buf.len())) }, _ => { let (result, read) = if buf.is_empty() { (Ok(None), 0) } else { match serde_json::from_slice(buf) { Ok(value) => (Ok(Some(value)), buf.len()), Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => (Err(err), 0), } }; Ok((ReadAPIServiceResponse::Other(result), read)) }, } } } // Generated from operation replaceApiregistrationV1beta1APIService impl APIService { /// replace the specified APIService /// /// Use the returned [`crate::ResponseBody`]`<`[`ReplaceAPIServiceResponse`]`>` constructor, or [`ReplaceAPIServiceResponse`] directly, to parse the HTTP response. /// /// # Arguments /// /// * `name` /// /// name of the APIService /// /// * `body` /// /// * `optional` /// /// Optional parameters. Use `Default::default()` to not pass any. #[cfg(feature = "api")] pub fn replace_api_service( name: &str, body: &crate::v1_9::kube_aggregator::pkg::apis::apiregistration::v1beta1::APIService, optional: ReplaceAPIServiceOptional<'_>, ) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<ReplaceAPIServiceResponse>), crate::RequestError> { let ReplaceAPIServiceOptional { pretty, } = optional; let __url = format!("/apis/apiregistration.k8s.io/v1beta1/apiservices/{name}?", name = crate::percent_encoding::percent_encode(name.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET), ); let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url); if let Some(pretty) = pretty { __query_pairs.append_pair("pretty", pretty); } let __url = __query_pairs.finish(); let mut __request = http::Request::put(__url); let __body = serde_json::to_vec(body).map_err(crate::RequestError::Json)?; __request.header(http::header::CONTENT_TYPE, http::header::HeaderValue::from_static("application/json")); match __request.body(__body) { Ok(request) => Ok((request, crate::ResponseBody::new)), Err(err) => Err(crate::RequestError::Http(err)), } } } /// Optional parameters of [`APIService::replace_api_service`] #[cfg(feature = "api")] #[derive(Clone, Copy, Debug, Default)] pub struct ReplaceAPIServiceOptional<'a> { /// If 'true', then the output is pretty printed. pub pretty: Option<&'a str>, } /// Use `<ReplaceAPIServiceResponse as Response>::try_from_parts` to parse the HTTP response body of [`APIService::replace_api_service`] #[cfg(feature = "api")] #[derive(Debug)] pub enum ReplaceAPIServiceResponse { Ok(crate::v1_9::kube_aggregator::pkg::apis::apiregistration::v1beta1::APIService), Created(crate::v1_9::kube_aggregator::pkg::apis::apiregistration::v1beta1::APIService), Other(Result<Option<serde_json::Value>, serde_json::Error>), } #[cfg(feature = "api")] impl crate::Response for ReplaceAPIServiceResponse { fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> { match status_code { http::StatusCode::OK => { let result = match serde_json::from_slice(buf) { Ok(value) => value, Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => return Err(crate::ResponseError::Json(err)), }; Ok((ReplaceAPIServiceResponse::Ok(result), buf.len())) }, http::StatusCode::CREATED => { let result = match serde_json::from_slice(buf) { Ok(value) => value, Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => return Err(crate::ResponseError::Json(err)), }; Ok((ReplaceAPIServiceResponse::Created(result), buf.len())) }, _ => { let (result, read) = if buf.is_empty() { (Ok(None), 0) } else { match serde_json::from_slice(buf) { Ok(value) => (Ok(Some(value)), buf.len()), Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => (Err(err), 0), } }; Ok((ReplaceAPIServiceResponse::Other(result), read)) }, } } } // Generated from operation replaceApiregistrationV1beta1APIServiceStatus impl APIService { /// replace status of the specified APIService /// /// Use the returned [`crate::ResponseBody`]`<`[`ReplaceAPIServiceStatusResponse`]`>` constructor, or [`ReplaceAPIServiceStatusResponse`] directly, to parse the HTTP response. /// /// # Arguments /// /// * `name` /// /// name of the APIService /// /// * `body` /// /// * `optional` /// /// Optional parameters. Use `Default::default()` to not pass any. #[cfg(feature = "api")] pub fn replace_api_service_status( name: &str, body: &crate::v1_9::kube_aggregator::pkg::apis::apiregistration::v1beta1::APIService, optional: ReplaceAPIServiceStatusOptional<'_>, ) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<ReplaceAPIServiceStatusResponse>), crate::RequestError> { let ReplaceAPIServiceStatusOptional { pretty, } = optional; let __url = format!("/apis/apiregistration.k8s.io/v1beta1/apiservices/{name}/status?", name = crate::percent_encoding::percent_encode(name.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET), ); let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url); if let Some(pretty) = pretty { __query_pairs.append_pair("pretty", pretty); } let __url = __query_pairs.finish(); let mut __request = http::Request::put(__url); let __body = serde_json::to_vec(body).map_err(crate::RequestError::Json)?; __request.header(http::header::CONTENT_TYPE, http::header::HeaderValue::from_static("application/json")); match __request.body(__body) { Ok(request) => Ok((request, crate::ResponseBody::new)), Err(err) => Err(crate::RequestError::Http(err)), } } } /// Optional parameters of [`APIService::replace_api_service_status`] #[cfg(feature = "api")] #[derive(Clone, Copy, Debug, Default)] pub struct ReplaceAPIServiceStatusOptional<'a> { /// If 'true', then the output is pretty printed. pub pretty: Option<&'a str>, } /// Use `<ReplaceAPIServiceStatusResponse as Response>::try_from_parts` to parse the HTTP response body of [`APIService::replace_api_service_status`] #[cfg(feature = "api")] #[derive(Debug)] pub enum ReplaceAPIServiceStatusResponse { Ok(crate::v1_9::kube_aggregator::pkg::apis::apiregistration::v1beta1::APIService), Created(crate::v1_9::kube_aggregator::pkg::apis::apiregistration::v1beta1::APIService), Other(Result<Option<serde_json::Value>, serde_json::Error>), } #[cfg(feature = "api")] impl crate::Response for ReplaceAPIServiceStatusResponse { fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> { match status_code { http::StatusCode::OK => { let result = match serde_json::from_slice(buf) { Ok(value) => value, Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => return Err(crate::ResponseError::Json(err)), }; Ok((ReplaceAPIServiceStatusResponse::Ok(result), buf.len())) }, http::StatusCode::CREATED => { let result = match serde_json::from_slice(buf) { Ok(value) => value, Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => return Err(crate::ResponseError::Json(err)), }; Ok((ReplaceAPIServiceStatusResponse::Created(result), buf.len())) }, _ => { let (result, read) = if buf.is_empty() { (Ok(None), 0) } else { match serde_json::from_slice(buf) { Ok(value) => (Ok(Some(value)), buf.len()), Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => (Err(err), 0), } }; Ok((ReplaceAPIServiceStatusResponse::Other(result), read)) }, } } } // Generated from operation watchApiregistrationV1beta1APIService impl APIService { /// list or watch objects of kind APIService /// /// This operation only supports watching one item, or a list of items, of this type for changes. /// /// Use the returned [`crate::ResponseBody`]`<`[`WatchAPIServiceResponse`]`>` constructor, or [`WatchAPIServiceResponse`] directly, to parse the HTTP response. /// /// # Arguments /// /// * `optional` /// /// Optional parameters. Use `Default::default()` to not pass any. #[cfg(feature = "api")] pub fn watch_api_service( optional: crate::v1_9::WatchOptional<'_>, ) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<WatchAPIServiceResponse>), crate::RequestError> { let __url = "/apis/apiregistration.k8s.io/v1beta1/apiservices?".to_owned(); let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url); optional.__serialize(&mut __query_pairs); let __url = __query_pairs.finish(); let mut __request = http::Request::get(__url); let __body = vec![]; match __request.body(__body) { Ok(request) => Ok((request, crate::ResponseBody::new)), Err(err) => Err(crate::RequestError::Http(err)), } } } /// Use `<WatchAPIServiceResponse as Response>::try_from_parts` to parse the HTTP response body of [`APIService::watch_api_service`] #[cfg(feature = "api")] #[derive(Debug)] pub enum WatchAPIServiceResponse { Ok(crate::v1_9::apimachinery::pkg::apis::meta::v1::WatchEvent<APIService>), Other(Result<Option<serde_json::Value>, serde_json::Error>), } #[cfg(feature = "api")] impl crate::Response for WatchAPIServiceResponse { fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> { match status_code { http::StatusCode::OK => { let mut deserializer = serde_json::Deserializer::from_slice(buf).into_iter(); let (result, byte_offset) = match deserializer.next() { Some(Ok(value)) => (value, deserializer.byte_offset()), Some(Err(ref err)) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Some(Err(err)) => return Err(crate::ResponseError::Json(err)), None => return Err(crate::ResponseError::NeedMoreData), }; Ok((WatchAPIServiceResponse::Ok(result), byte_offset)) }, _ => { let (result, read) = if buf.is_empty() { (Ok(None), 0) } else { match serde_json::from_slice(buf) { Ok(value) => (Ok(Some(value)), buf.len()), Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => (Err(err), 0), } }; Ok((WatchAPIServiceResponse::Other(result), read)) }, } } } // End apiregistration.k8s.io/v1beta1/APIService impl crate::Resource for APIService { fn api_version() -> &'static str { "apiregistration.k8s.io/v1beta1" } fn group() -> &'static str { "apiregistration.k8s.io" } fn kind() -> &'static str { "APIService" } fn version() -> &'static str { "v1beta1" } } impl crate::Metadata for APIService { type Ty = crate::v1_9::apimachinery::pkg::apis::meta::v1::ObjectMeta; fn metadata(&self) -> Option<&<Self as crate::Metadata>::Ty> { self.metadata.as_ref() } } impl<'de> serde::Deserialize<'de> for APIService { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de> { #[allow(non_camel_case_types)] enum Field { Key_api_version, Key_kind, Key_metadata, Key_spec, Key_status, Other, } impl<'de> serde::Deserialize<'de> for Field { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de> { struct Visitor; impl<'de> serde::de::Visitor<'de> for Visitor { type Value = Field; fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "field identifier") } fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: serde::de::Error { Ok(match v { "apiVersion" => Field::Key_api_version, "kind" => Field::Key_kind, "metadata" => Field::Key_metadata, "spec" => Field::Key_spec, "status" => Field::Key_status, _ => Field::Other, }) } } deserializer.deserialize_identifier(Visitor) } } struct Visitor; impl<'de> serde::de::Visitor<'de> for Visitor { type Value = APIService; fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "struct APIService") } fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> where A: serde::de::MapAccess<'de> { let mut value_metadata: Option<crate::v1_9::apimachinery::pkg::apis::meta::v1::ObjectMeta> = None; let mut value_spec: Option<crate::v1_9::kube_aggregator::pkg::apis::apiregistration::v1beta1::APIServiceSpec> = None; let mut value_status: Option<crate::v1_9::kube_aggregator::pkg::apis::apiregistration::v1beta1::APIServiceStatus> = None; while let Some(key) = serde::de::MapAccess::next_key::<Field>(&mut map)? { match key { Field::Key_api_version => { let value_api_version: String = serde::de::MapAccess::next_value(&mut map)?; if value_api_version != <Self::Value as crate::Resource>::api_version() { return Err(serde::de::Error::invalid_value(serde::de::Unexpected::Str(&value_api_version), &<Self::Value as crate::Resource>::api_version())); } }, Field::Key_kind => { let value_kind: String = serde::de::MapAccess::next_value(&mut map)?; if value_kind != <Self::Value as crate::Resource>::kind() { return Err(serde::de::Error::invalid_value(serde::de::Unexpected::Str(&value_kind), &<Self::Value as crate::Resource>::kind())); } }, Field::Key_metadata => value_metadata = serde::de::MapAccess::next_value(&mut map)?, Field::Key_spec => value_spec = serde::de::MapAccess::next_value(&mut map)?, Field::Key_status => value_status = serde::de::MapAccess::next_value(&mut map)?, Field::Other => { let _: serde::de::IgnoredAny = serde::de::MapAccess::next_value(&mut map)?; }, } } Ok(APIService { metadata: value_metadata, spec: value_spec, status: value_status, }) } } deserializer.deserialize_struct( "APIService", &[ "apiVersion", "kind", "metadata", "spec", "status", ], Visitor, ) } } impl serde::Serialize for APIService { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer { let mut state = serializer.serialize_struct( "APIService", 2 + self.metadata.as_ref().map_or(0, |_| 1) + self.spec.as_ref().map_or(0, |_| 1) + self.status.as_ref().map_or(0, |_| 1), )?; serde::ser::SerializeStruct::serialize_field(&mut state, "apiVersion", <Self as crate::Resource>::api_version())?; serde::ser::SerializeStruct::serialize_field(&mut state, "kind", <Self as crate::Resource>::kind())?; if let Some(value) = &self.metadata { serde::ser::SerializeStruct::serialize_field(&mut state, "metadata", value)?; } if let Some(value) = &self.spec { serde::ser::SerializeStruct::serialize_field(&mut state, "spec", value)?; } if let Some(value) = &self.status { serde::ser::SerializeStruct::serialize_field(&mut state, "status", value)?; } serde::ser::SerializeStruct::end(state) } }
43.287926
185
0.564464
e88a46f90323f12b6931a649501f491e4bec3aeb
10,517
use num_bigint::{BigInt, Sign}; #[cfg(feature = "ring")] use ring::{hmac, rand::SecureRandom, signature as ring_signature}; use crate::types::Base64UrlEncodedBytes; use crate::{JsonWebKey, SignatureVerificationError, SigningError}; use super::{jwk::CoreJsonCurveType, CoreJsonWebKey, CoreJsonWebKeyType}; use std::ops::Deref; #[cfg(feature = "ring")] pub fn sign_hmac(key: &[u8], hmac_alg: hmac::Algorithm, msg: &[u8]) -> Vec<u8> { let signing_key = hmac::Key::new(hmac_alg, key); hmac::sign(&signing_key, msg).as_ref().into() } #[cfg(feature = "ring")] pub fn verify_hmac( key: &CoreJsonWebKey, hmac_alg: hmac::Algorithm, msg: &[u8], signature: &[u8], ) -> Result<(), SignatureVerificationError> { let k = key.k.as_ref().ok_or_else(|| { SignatureVerificationError::InvalidKey("Symmetric key `k` is missing".to_string()) })?; let verification_key = hmac::Key::new(hmac_alg, k); hmac::verify(&verification_key, msg, signature) .map_err(|_| SignatureVerificationError::CryptoError("bad HMAC".to_string())) } #[cfg(feature = "ring")] pub fn sign_rsa( key: &ring_signature::RsaKeyPair, padding_alg: &'static dyn ring_signature::RsaEncoding, rng: &dyn SecureRandom, msg: &[u8], ) -> Result<Vec<u8>, SigningError> { let sig_len = key.public_modulus_len(); let mut sig = vec![0; sig_len]; key.sign(padding_alg, rng, msg, &mut sig) .map_err(|_| SigningError::CryptoError)?; Ok(sig) } fn rsa_public_key( key: &CoreJsonWebKey, ) -> Result<(&Base64UrlEncodedBytes, &Base64UrlEncodedBytes), String> { if *key.key_type() != CoreJsonWebKeyType::RSA { Err("RSA key required".to_string()) } else { let n = key .n .as_ref() .ok_or_else(|| "RSA modulus `n` is missing".to_string())?; let e = key .e .as_ref() .ok_or_else(|| "RSA exponent `e` is missing".to_string())?; Ok((n, e)) } } fn ec_public_key( key: &CoreJsonWebKey, ) -> Result< ( &Base64UrlEncodedBytes, &Base64UrlEncodedBytes, &CoreJsonCurveType, ), String, > { if *key.key_type() != CoreJsonWebKeyType::EllipticCurve { Err("EC key required".to_string()) } else { let x = key .x .as_ref() .ok_or_else(|| "EC `x` part is missing".to_string())?; let y = key .y .as_ref() .ok_or_else(|| "EC `y` part is missing".to_string())?; let crv = key .crv .as_ref() .ok_or_else(|| "EC `crv` part is missing".to_string())?; Ok((x, y, crv)) } } #[cfg(feature = "ring")] pub fn verify_rsa_signature( key: &CoreJsonWebKey, params: &ring_signature::RsaParameters, msg: &[u8], signature: &[u8], ) -> Result<(), SignatureVerificationError> { let (n, e) = rsa_public_key(key).map_err(SignatureVerificationError::InvalidKey)?; // let's n and e as a big integers to prevent issues with leading zeros // according to https://datatracker.ietf.org/doc/html/rfc7518#section-6.3.1.1 // `n` is alwasy unsigned (hence has sign plus) let n_bigint = BigInt::from_bytes_be(Sign::Plus, n.deref()); let e_bigint = BigInt::from_bytes_be(Sign::Plus, e.deref()); let public_key = ring_signature::RsaPublicKeyComponents { n: &n_bigint.to_bytes_be().1, e: &e_bigint.to_bytes_be().1, }; public_key .verify(params, msg, signature) .map_err(|_| SignatureVerificationError::CryptoError("bad signature".to_string())) } #[cfg(feature = "rustcrypto")] pub fn verify_rsa_signature( key: &CoreJsonWebKey, padding: rsa::PaddingScheme, msg: &[u8], signature: &[u8], ) -> Result<(), SignatureVerificationError> { use rsa::PublicKey; let (n, e) = rsa_public_key(key).map_err(SignatureVerificationError::InvalidKey)?; // let's n and e as a big integers to prevent issues with leading zeros // according to https://datatracker.ietf.org/doc/html/rfc7518#section-6.3.1.1 // `n` is alwasy unsigned (hence has sign plus) let n_bigint = rsa::BigUint::from_bytes_be(n.deref()); let e_bigint = rsa::BigUint::from_bytes_be(e.deref()); let public_key = rsa::RsaPublicKey::new(n_bigint, e_bigint) .map_err(|e| SignatureVerificationError::InvalidKey(format!("{}", e)))?; public_key .verify(padding, msg, signature) .map_err(|_| SignatureVerificationError::CryptoError("bad signature".to_string())) } /// According to RFC5480, Section-2.2 implementations of Elliptic Curve Cryptography MUST support the uncompressed form. /// The first octet of the octet string indicates whether the uncompressed or compressed form is used. For the uncompressed /// form, the first octet has to be 0x04. /// According to https://briansmith.org/rustdoc/ring/signature/index.html#ecdsa__fixed-details-fixed-length-pkcs11-style-ecdsa-signatures, /// to recover the X and Y coordinates from an octet string, the Octet-String-To-Elliptic-Curve-Point Conversion /// is used (Section 2.3.4 of https://www.secg.org/sec1-v2.pdf). #[cfg(feature = "ring")] pub fn verify_ec_signature( key: &CoreJsonWebKey, params: &'static ring_signature::EcdsaVerificationAlgorithm, msg: &[u8], signature: &[u8], ) -> Result<(), SignatureVerificationError> { let (x, y, crv) = ec_public_key(&key).map_err(SignatureVerificationError::InvalidKey)?; if *crv == CoreJsonCurveType::P521 { return Err(SignatureVerificationError::UnsupportedAlg( "P521".to_string(), )); } let mut pk = vec![0x04]; pk.extend(x.deref()); pk.extend(y.deref()); let public_key = ring_signature::UnparsedPublicKey::new(params, pk); public_key .verify(msg, signature) .map_err(|_| SignatureVerificationError::CryptoError("EC Signature was wrong".to_string())) } #[cfg(feature = "rustcrypto")] pub fn verify_ec_signature( key: &CoreJsonWebKey, msg: &[u8], signature: &[u8], ) -> Result<(), SignatureVerificationError> { use p256::ecdsa::signature::{Signature, Verifier}; let (x, y, crv) = ec_public_key(&key).map_err(SignatureVerificationError::InvalidKey)?; if *crv == CoreJsonCurveType::P521 { return Err(SignatureVerificationError::UnsupportedAlg( "P521".to_string(), )); } let mut pk = vec![0x04]; pk.extend(x.deref()); pk.extend(y.deref()); let public_key = match *crv { CoreJsonCurveType::P256 => p256::ecdsa::VerifyingKey::from_sec1_bytes(&pk), CoreJsonCurveType::P384 => { // p384::ecdsa::VerifyingKey::from_sec1_bytes(pk) return Err(SignatureVerificationError::UnsupportedAlg( "P384".to_string(), )); } CoreJsonCurveType::P521 => { return Err(SignatureVerificationError::UnsupportedAlg( "P521".to_string(), )); } } .map_err(|e| SignatureVerificationError::InvalidKey(format!("{}", e)))?; public_key .verify( msg, &p256::ecdsa::Signature::from_bytes(signature).map_err(|_| { SignatureVerificationError::CryptoError("Invalid signature".to_string()) })?, ) .map_err(|_| SignatureVerificationError::CryptoError("EC Signature was wrong".to_string())) } #[cfg(test)] mod tests { use super::*; use std::ops::Deref; #[cfg(feature = "rustcrypto")] use sha2::Digest; use crate::{ core::{crypto::rsa_public_key, CoreJsonWebKey}, SignatureVerificationError, }; #[test] fn test_leading_zeros_are_parsed_correctly() { // The message we signed let msg = "THIS IS A SIGNATURE TEST"; let signature = base64::decode_config("bg0ohqKwYHAiODeG6qkJ-6IhodN7LGPxAh4hbWeIoBdSXrXMt8Ft8U0BV7vANPvF56h20XB9C0021x2kt7iAbMgPNcZ7LCuXMPPq04DrBpMHafH5BXBwnyDKJKrzDm5sfr6OgEkcxSLHaSJ6gTWQ3waPt6_SeH2-Fi74rg13MHyX-0iqz7bZveoBbGIs5yQCwvXgrDS9zW5LUwUHozHfE6FuSi_Z92ioXeu7FHHDg1KFfg3hs8ZLx4wAX15Vw2GCQOzvyNdbItxXRLnrN1NPqxFquVNo5RGlx6ihR1Jfe7y_n0NSR2q2TuU4cIwR0LRwEaANy5SDqtleQPrTEn8nGQ", base64::URL_SAFE_NO_PAD).unwrap(); // RSA pub key with leading 0 let key : CoreJsonWebKey = serde_json::from_value(serde_json::json!( { "kty": "RSA", "e": "AQAB", "use": "sig", "kid": "TEST_KEY_ID", "alg": "RS256", "n": "AN0M6Y760b9Ok2PxDOps1TgSmiOaR9mLIfUHtZ_o-6JypOckGcl1CxrteyokOb3WyDsfIAN9fFNrycv5YoLKO7sh0IcfzNEXFgzK84HTBcGuqhN8NV98Z6N9EryUrgJYsJeVoPYm0MzkDe4NyWHhnq-9OyNCQzVELH0NhhViQqRyM92OPrJcQlk8s3ZvcgRmkd-rEtRua8SbS3GEvfvgweVy5-qcJCGoziKfx-IteMOm6yKoHvqisKb91N-qw_kSS4YQUx-DZVDo2g24F7VIbcYzJGUOU674HUF1j-wJyXzG3VV8lAXD8hABs5Lh87gr8_hIZD5gbYBJRObJk9XZbfk" } )).unwrap(); #[cfg(feature = "ring")] { // Old way of verifying the jwt, take the modulus directly form the JWK let (n, e) = rsa_public_key(&key) .map_err(SignatureVerificationError::InvalidKey) .unwrap(); let public_key = ring_signature::RsaPublicKeyComponents { n: n.deref(), e: e.deref(), }; // This fails, since ring expects the keys to have no leading zeros assert! { public_key .verify( &ring_signature::RSA_PKCS1_2048_8192_SHA256, msg.as_bytes(), &signature, ).is_err() }; // This should succeed as the function uses big-integers to actually harmonize parsing assert! { verify_rsa_signature( &key, &ring_signature::RSA_PKCS1_2048_8192_SHA256, msg.as_bytes(), &signature, ).is_ok() } } #[cfg(feature = "rustcrypto")] { let mut hasher = sha2::Sha256::new(); hasher.update(msg); let hash = hasher.finalize().to_vec(); assert! { verify_rsa_signature( &key, rsa::PaddingScheme::new_pkcs1v15_sign(Some(rsa::Hash::SHA2_256)), &hash, &signature, ).is_ok() } } } }
36.517361
426
0.617191
8ab16131d374bf0e24e4ddd99a70e1e8b4341671
34,647
#[doc = "Register `QSPIx_SSCTL` reader"] pub struct R(crate::R<QSPIX_SSCTL_SPEC>); impl core::ops::Deref for R { type Target = crate::R<QSPIX_SSCTL_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl From<crate::R<QSPIX_SSCTL_SPEC>> for R { #[inline(always)] fn from(reader: crate::R<QSPIX_SSCTL_SPEC>) -> Self { R(reader) } } #[doc = "Register `QSPIx_SSCTL` writer"] pub struct W(crate::W<QSPIX_SSCTL_SPEC>); impl core::ops::Deref for W { type Target = crate::W<QSPIX_SSCTL_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::ops::DerefMut for W { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl From<crate::W<QSPIX_SSCTL_SPEC>> for W { #[inline(always)] fn from(writer: crate::W<QSPIX_SSCTL_SPEC>) -> Self { W(writer) } } #[doc = "Slave Selection Control (Master Only)\nIf AUTOSS bit is cleared to 0,\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum SS_A { #[doc = "0: set the QSPIx_SS line to inactive state.\\nKeep the QSPIx_SS line at inactive state"] _0 = 0, #[doc = "1: set the QSPIx_SS line to active state.\\nQSPIx_SS line will be automatically driven to active state for the duration of data transfer, and will be driven to inactive state for the rest of the time. The active state of QSPIx_SS is specified in SSACTPOL (QSPIx_SSCTL\\[2\\])"] _1 = 1, } impl From<SS_A> for bool { #[inline(always)] fn from(variant: SS_A) -> Self { variant as u8 != 0 } } #[doc = "Field `SS` reader - Slave Selection Control (Master Only)\nIf AUTOSS bit is cleared to 0,"] pub struct SS_R(crate::FieldReader<bool, SS_A>); impl SS_R { pub(crate) fn new(bits: bool) -> Self { SS_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> SS_A { match self.bits { false => SS_A::_0, true => SS_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == SS_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == SS_A::_1 } } impl core::ops::Deref for SS_R { type Target = crate::FieldReader<bool, SS_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `SS` writer - Slave Selection Control (Master Only)\nIf AUTOSS bit is cleared to 0,"] pub struct SS_W<'a> { w: &'a mut W, } impl<'a> SS_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: SS_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "set the QSPIx_SS line to inactive state.\nKeep the QSPIx_SS line at inactive state"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(SS_A::_0) } #[doc = "set the QSPIx_SS line to active state.\nQSPIx_SS line will be automatically driven to active state for the duration of data transfer, and will be driven to inactive state for the rest of the time. The active state of QSPIx_SS is specified in SSACTPOL (QSPIx_SSCTL\\[2\\])"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(SS_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !0x01) | (value as u32 & 0x01); self.w } } #[doc = "Slave Selection Active Polarity\nThis bit defines the active polarity of slave selection signal (QSPIx_SS).\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum SSACTPOL_A { #[doc = "0: The slave selection signal QSPIx_SS is active low"] _0 = 0, #[doc = "1: The slave selection signal QSPIx_SS is active high"] _1 = 1, } impl From<SSACTPOL_A> for bool { #[inline(always)] fn from(variant: SSACTPOL_A) -> Self { variant as u8 != 0 } } #[doc = "Field `SSACTPOL` reader - Slave Selection Active Polarity\nThis bit defines the active polarity of slave selection signal (QSPIx_SS)."] pub struct SSACTPOL_R(crate::FieldReader<bool, SSACTPOL_A>); impl SSACTPOL_R { pub(crate) fn new(bits: bool) -> Self { SSACTPOL_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> SSACTPOL_A { match self.bits { false => SSACTPOL_A::_0, true => SSACTPOL_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == SSACTPOL_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == SSACTPOL_A::_1 } } impl core::ops::Deref for SSACTPOL_R { type Target = crate::FieldReader<bool, SSACTPOL_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `SSACTPOL` writer - Slave Selection Active Polarity\nThis bit defines the active polarity of slave selection signal (QSPIx_SS)."] pub struct SSACTPOL_W<'a> { w: &'a mut W, } impl<'a> SSACTPOL_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: SSACTPOL_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "The slave selection signal QSPIx_SS is active low"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(SSACTPOL_A::_0) } #[doc = "The slave selection signal QSPIx_SS is active high"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(SSACTPOL_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 2)) | ((value as u32 & 0x01) << 2); self.w } } #[doc = "Automatic Slave Selection Function Enable Bit (Master Only)\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum AUTOSS_A { #[doc = "0: Automatic slave selection function Disabled. Slave selection signal will be asserted/de-asserted according to SS (QSPIx_SSCTL\\[0\\])"] _0 = 0, #[doc = "1: Automatic slave selection function Enabled"] _1 = 1, } impl From<AUTOSS_A> for bool { #[inline(always)] fn from(variant: AUTOSS_A) -> Self { variant as u8 != 0 } } #[doc = "Field `AUTOSS` reader - Automatic Slave Selection Function Enable Bit (Master Only)"] pub struct AUTOSS_R(crate::FieldReader<bool, AUTOSS_A>); impl AUTOSS_R { pub(crate) fn new(bits: bool) -> Self { AUTOSS_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> AUTOSS_A { match self.bits { false => AUTOSS_A::_0, true => AUTOSS_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == AUTOSS_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == AUTOSS_A::_1 } } impl core::ops::Deref for AUTOSS_R { type Target = crate::FieldReader<bool, AUTOSS_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `AUTOSS` writer - Automatic Slave Selection Function Enable Bit (Master Only)"] pub struct AUTOSS_W<'a> { w: &'a mut W, } impl<'a> AUTOSS_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: AUTOSS_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Automatic slave selection function Disabled. Slave selection signal will be asserted/de-asserted according to SS (QSPIx_SSCTL\\[0\\])"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(AUTOSS_A::_0) } #[doc = "Automatic slave selection function Enabled"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(AUTOSS_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 3)) | ((value as u32 & 0x01) << 3); self.w } } #[doc = "Slave 3-wire Mode Enable Bit\nIn Slave 3-wire mode, the QSPI controller can work with 3-wire interface including QSPIx_CLK, QSPIx_MISO and QSPIx_MOSI pins.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum SLV3WIRE_A { #[doc = "0: 4-wire bi-direction interface"] _0 = 0, #[doc = "1: 3-wire bi-direction interface"] _1 = 1, } impl From<SLV3WIRE_A> for bool { #[inline(always)] fn from(variant: SLV3WIRE_A) -> Self { variant as u8 != 0 } } #[doc = "Field `SLV3WIRE` reader - Slave 3-wire Mode Enable Bit\nIn Slave 3-wire mode, the QSPI controller can work with 3-wire interface including QSPIx_CLK, QSPIx_MISO and QSPIx_MOSI pins."] pub struct SLV3WIRE_R(crate::FieldReader<bool, SLV3WIRE_A>); impl SLV3WIRE_R { pub(crate) fn new(bits: bool) -> Self { SLV3WIRE_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> SLV3WIRE_A { match self.bits { false => SLV3WIRE_A::_0, true => SLV3WIRE_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == SLV3WIRE_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == SLV3WIRE_A::_1 } } impl core::ops::Deref for SLV3WIRE_R { type Target = crate::FieldReader<bool, SLV3WIRE_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `SLV3WIRE` writer - Slave 3-wire Mode Enable Bit\nIn Slave 3-wire mode, the QSPI controller can work with 3-wire interface including QSPIx_CLK, QSPIx_MISO and QSPIx_MOSI pins."] pub struct SLV3WIRE_W<'a> { w: &'a mut W, } impl<'a> SLV3WIRE_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: SLV3WIRE_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "4-wire bi-direction interface"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(SLV3WIRE_A::_0) } #[doc = "3-wire bi-direction interface"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(SLV3WIRE_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 4)) | ((value as u32 & 0x01) << 4); self.w } } #[doc = "Slave Mode Time-out Interrupt Enable Bit\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum SLVTOIEN_A { #[doc = "0: Slave mode time-out interrupt Disabled"] _0 = 0, #[doc = "1: Slave mode time-out interrupt Enabled"] _1 = 1, } impl From<SLVTOIEN_A> for bool { #[inline(always)] fn from(variant: SLVTOIEN_A) -> Self { variant as u8 != 0 } } #[doc = "Field `SLVTOIEN` reader - Slave Mode Time-out Interrupt Enable Bit"] pub struct SLVTOIEN_R(crate::FieldReader<bool, SLVTOIEN_A>); impl SLVTOIEN_R { pub(crate) fn new(bits: bool) -> Self { SLVTOIEN_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> SLVTOIEN_A { match self.bits { false => SLVTOIEN_A::_0, true => SLVTOIEN_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == SLVTOIEN_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == SLVTOIEN_A::_1 } } impl core::ops::Deref for SLVTOIEN_R { type Target = crate::FieldReader<bool, SLVTOIEN_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `SLVTOIEN` writer - Slave Mode Time-out Interrupt Enable Bit"] pub struct SLVTOIEN_W<'a> { w: &'a mut W, } impl<'a> SLVTOIEN_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: SLVTOIEN_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Slave mode time-out interrupt Disabled"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(SLVTOIEN_A::_0) } #[doc = "Slave mode time-out interrupt Enabled"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(SLVTOIEN_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 5)) | ((value as u32 & 0x01) << 5); self.w } } #[doc = "Slave Mode Time-out Reset Control\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum SLVTORST_A { #[doc = "0: When Slave mode time-out event occurs, the TX and RX control circuit will not be reset"] _0 = 0, #[doc = "1: When Slave mode time-out event occurs, the TX and RX control circuit will be reset by hardware"] _1 = 1, } impl From<SLVTORST_A> for bool { #[inline(always)] fn from(variant: SLVTORST_A) -> Self { variant as u8 != 0 } } #[doc = "Field `SLVTORST` reader - Slave Mode Time-out Reset Control"] pub struct SLVTORST_R(crate::FieldReader<bool, SLVTORST_A>); impl SLVTORST_R { pub(crate) fn new(bits: bool) -> Self { SLVTORST_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> SLVTORST_A { match self.bits { false => SLVTORST_A::_0, true => SLVTORST_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == SLVTORST_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == SLVTORST_A::_1 } } impl core::ops::Deref for SLVTORST_R { type Target = crate::FieldReader<bool, SLVTORST_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `SLVTORST` writer - Slave Mode Time-out Reset Control"] pub struct SLVTORST_W<'a> { w: &'a mut W, } impl<'a> SLVTORST_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: SLVTORST_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "When Slave mode time-out event occurs, the TX and RX control circuit will not be reset"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(SLVTORST_A::_0) } #[doc = "When Slave mode time-out event occurs, the TX and RX control circuit will be reset by hardware"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(SLVTORST_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 6)) | ((value as u32 & 0x01) << 6); self.w } } #[doc = "Slave Mode Bit Count Error Interrupt Enable Bit\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum SLVBEIEN_A { #[doc = "0: Slave mode bit count error interrupt Disabled"] _0 = 0, #[doc = "1: Slave mode bit count error interrupt Enabled"] _1 = 1, } impl From<SLVBEIEN_A> for bool { #[inline(always)] fn from(variant: SLVBEIEN_A) -> Self { variant as u8 != 0 } } #[doc = "Field `SLVBEIEN` reader - Slave Mode Bit Count Error Interrupt Enable Bit"] pub struct SLVBEIEN_R(crate::FieldReader<bool, SLVBEIEN_A>); impl SLVBEIEN_R { pub(crate) fn new(bits: bool) -> Self { SLVBEIEN_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> SLVBEIEN_A { match self.bits { false => SLVBEIEN_A::_0, true => SLVBEIEN_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == SLVBEIEN_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == SLVBEIEN_A::_1 } } impl core::ops::Deref for SLVBEIEN_R { type Target = crate::FieldReader<bool, SLVBEIEN_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `SLVBEIEN` writer - Slave Mode Bit Count Error Interrupt Enable Bit"] pub struct SLVBEIEN_W<'a> { w: &'a mut W, } impl<'a> SLVBEIEN_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: SLVBEIEN_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Slave mode bit count error interrupt Disabled"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(SLVBEIEN_A::_0) } #[doc = "Slave mode bit count error interrupt Enabled"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(SLVBEIEN_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 8)) | ((value as u32 & 0x01) << 8); self.w } } #[doc = "Slave Mode TX Under Run Interrupt Enable Bit\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum SLVURIEN_A { #[doc = "0: Slave mode TX under run interrupt Disabled"] _0 = 0, #[doc = "1: Slave mode TX under run interrupt Enabled"] _1 = 1, } impl From<SLVURIEN_A> for bool { #[inline(always)] fn from(variant: SLVURIEN_A) -> Self { variant as u8 != 0 } } #[doc = "Field `SLVURIEN` reader - Slave Mode TX Under Run Interrupt Enable Bit"] pub struct SLVURIEN_R(crate::FieldReader<bool, SLVURIEN_A>); impl SLVURIEN_R { pub(crate) fn new(bits: bool) -> Self { SLVURIEN_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> SLVURIEN_A { match self.bits { false => SLVURIEN_A::_0, true => SLVURIEN_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == SLVURIEN_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == SLVURIEN_A::_1 } } impl core::ops::Deref for SLVURIEN_R { type Target = crate::FieldReader<bool, SLVURIEN_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `SLVURIEN` writer - Slave Mode TX Under Run Interrupt Enable Bit"] pub struct SLVURIEN_W<'a> { w: &'a mut W, } impl<'a> SLVURIEN_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: SLVURIEN_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Slave mode TX under run interrupt Disabled"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(SLVURIEN_A::_0) } #[doc = "Slave mode TX under run interrupt Enabled"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(SLVURIEN_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 9)) | ((value as u32 & 0x01) << 9); self.w } } #[doc = "Slave Select Active Interrupt Enable Bit\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum SSACTIEN_A { #[doc = "0: Slave select active interrupt Disabled"] _0 = 0, #[doc = "1: Slave select active interrupt Enabled"] _1 = 1, } impl From<SSACTIEN_A> for bool { #[inline(always)] fn from(variant: SSACTIEN_A) -> Self { variant as u8 != 0 } } #[doc = "Field `SSACTIEN` reader - Slave Select Active Interrupt Enable Bit"] pub struct SSACTIEN_R(crate::FieldReader<bool, SSACTIEN_A>); impl SSACTIEN_R { pub(crate) fn new(bits: bool) -> Self { SSACTIEN_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> SSACTIEN_A { match self.bits { false => SSACTIEN_A::_0, true => SSACTIEN_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == SSACTIEN_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == SSACTIEN_A::_1 } } impl core::ops::Deref for SSACTIEN_R { type Target = crate::FieldReader<bool, SSACTIEN_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `SSACTIEN` writer - Slave Select Active Interrupt Enable Bit"] pub struct SSACTIEN_W<'a> { w: &'a mut W, } impl<'a> SSACTIEN_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: SSACTIEN_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Slave select active interrupt Disabled"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(SSACTIEN_A::_0) } #[doc = "Slave select active interrupt Enabled"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(SSACTIEN_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 12)) | ((value as u32 & 0x01) << 12); self.w } } #[doc = "Slave Select Inactive Interrupt Enable Bit\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum SSINAIEN_A { #[doc = "0: Slave select inactive interrupt Disabled"] _0 = 0, #[doc = "1: Slave select inactive interrupt Enabled"] _1 = 1, } impl From<SSINAIEN_A> for bool { #[inline(always)] fn from(variant: SSINAIEN_A) -> Self { variant as u8 != 0 } } #[doc = "Field `SSINAIEN` reader - Slave Select Inactive Interrupt Enable Bit"] pub struct SSINAIEN_R(crate::FieldReader<bool, SSINAIEN_A>); impl SSINAIEN_R { pub(crate) fn new(bits: bool) -> Self { SSINAIEN_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> SSINAIEN_A { match self.bits { false => SSINAIEN_A::_0, true => SSINAIEN_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == SSINAIEN_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == SSINAIEN_A::_1 } } impl core::ops::Deref for SSINAIEN_R { type Target = crate::FieldReader<bool, SSINAIEN_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `SSINAIEN` writer - Slave Select Inactive Interrupt Enable Bit"] pub struct SSINAIEN_W<'a> { w: &'a mut W, } impl<'a> SSINAIEN_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: SSINAIEN_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Slave select inactive interrupt Disabled"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(SSINAIEN_A::_0) } #[doc = "Slave select inactive interrupt Enabled"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(SSINAIEN_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 13)) | ((value as u32 & 0x01) << 13); self.w } } #[doc = "Field `SLVTOCNT` reader - Slave Mode Time-out Period\nIn Slave mode, these bits indicate the time-out period when there is bus clock input during slave select active. The clock source of the time-out counter is Slave peripheral clock. If the value is 0, it indicates the slave mode time-out function is disabled."] pub struct SLVTOCNT_R(crate::FieldReader<u16, u16>); impl SLVTOCNT_R { pub(crate) fn new(bits: u16) -> Self { SLVTOCNT_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for SLVTOCNT_R { type Target = crate::FieldReader<u16, u16>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `SLVTOCNT` writer - Slave Mode Time-out Period\nIn Slave mode, these bits indicate the time-out period when there is bus clock input during slave select active. The clock source of the time-out counter is Slave peripheral clock. If the value is 0, it indicates the slave mode time-out function is disabled."] pub struct SLVTOCNT_W<'a> { w: &'a mut W, } impl<'a> SLVTOCNT_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u16) -> &'a mut W { self.w.bits = (self.w.bits & !(0xffff << 16)) | ((value as u32 & 0xffff) << 16); self.w } } impl R { #[doc = "Bit 0 - Slave Selection Control (Master Only) If AUTOSS bit is cleared to 0,"] #[inline(always)] pub fn ss(&self) -> SS_R { SS_R::new((self.bits & 0x01) != 0) } #[doc = "Bit 2 - Slave Selection Active Polarity This bit defines the active polarity of slave selection signal (QSPIx_SS)."] #[inline(always)] pub fn ssactpol(&self) -> SSACTPOL_R { SSACTPOL_R::new(((self.bits >> 2) & 0x01) != 0) } #[doc = "Bit 3 - Automatic Slave Selection Function Enable Bit (Master Only)"] #[inline(always)] pub fn autoss(&self) -> AUTOSS_R { AUTOSS_R::new(((self.bits >> 3) & 0x01) != 0) } #[doc = "Bit 4 - Slave 3-wire Mode Enable Bit In Slave 3-wire mode, the QSPI controller can work with 3-wire interface including QSPIx_CLK, QSPIx_MISO and QSPIx_MOSI pins."] #[inline(always)] pub fn slv3wire(&self) -> SLV3WIRE_R { SLV3WIRE_R::new(((self.bits >> 4) & 0x01) != 0) } #[doc = "Bit 5 - Slave Mode Time-out Interrupt Enable Bit"] #[inline(always)] pub fn slvtoien(&self) -> SLVTOIEN_R { SLVTOIEN_R::new(((self.bits >> 5) & 0x01) != 0) } #[doc = "Bit 6 - Slave Mode Time-out Reset Control"] #[inline(always)] pub fn slvtorst(&self) -> SLVTORST_R { SLVTORST_R::new(((self.bits >> 6) & 0x01) != 0) } #[doc = "Bit 8 - Slave Mode Bit Count Error Interrupt Enable Bit"] #[inline(always)] pub fn slvbeien(&self) -> SLVBEIEN_R { SLVBEIEN_R::new(((self.bits >> 8) & 0x01) != 0) } #[doc = "Bit 9 - Slave Mode TX Under Run Interrupt Enable Bit"] #[inline(always)] pub fn slvurien(&self) -> SLVURIEN_R { SLVURIEN_R::new(((self.bits >> 9) & 0x01) != 0) } #[doc = "Bit 12 - Slave Select Active Interrupt Enable Bit"] #[inline(always)] pub fn ssactien(&self) -> SSACTIEN_R { SSACTIEN_R::new(((self.bits >> 12) & 0x01) != 0) } #[doc = "Bit 13 - Slave Select Inactive Interrupt Enable Bit"] #[inline(always)] pub fn ssinaien(&self) -> SSINAIEN_R { SSINAIEN_R::new(((self.bits >> 13) & 0x01) != 0) } #[doc = "Bits 16:31 - Slave Mode Time-out Period In Slave mode, these bits indicate the time-out period when there is bus clock input during slave select active. The clock source of the time-out counter is Slave peripheral clock. If the value is 0, it indicates the slave mode time-out function is disabled."] #[inline(always)] pub fn slvtocnt(&self) -> SLVTOCNT_R { SLVTOCNT_R::new(((self.bits >> 16) & 0xffff) as u16) } } impl W { #[doc = "Bit 0 - Slave Selection Control (Master Only) If AUTOSS bit is cleared to 0,"] #[inline(always)] pub fn ss(&mut self) -> SS_W { SS_W { w: self } } #[doc = "Bit 2 - Slave Selection Active Polarity This bit defines the active polarity of slave selection signal (QSPIx_SS)."] #[inline(always)] pub fn ssactpol(&mut self) -> SSACTPOL_W { SSACTPOL_W { w: self } } #[doc = "Bit 3 - Automatic Slave Selection Function Enable Bit (Master Only)"] #[inline(always)] pub fn autoss(&mut self) -> AUTOSS_W { AUTOSS_W { w: self } } #[doc = "Bit 4 - Slave 3-wire Mode Enable Bit In Slave 3-wire mode, the QSPI controller can work with 3-wire interface including QSPIx_CLK, QSPIx_MISO and QSPIx_MOSI pins."] #[inline(always)] pub fn slv3wire(&mut self) -> SLV3WIRE_W { SLV3WIRE_W { w: self } } #[doc = "Bit 5 - Slave Mode Time-out Interrupt Enable Bit"] #[inline(always)] pub fn slvtoien(&mut self) -> SLVTOIEN_W { SLVTOIEN_W { w: self } } #[doc = "Bit 6 - Slave Mode Time-out Reset Control"] #[inline(always)] pub fn slvtorst(&mut self) -> SLVTORST_W { SLVTORST_W { w: self } } #[doc = "Bit 8 - Slave Mode Bit Count Error Interrupt Enable Bit"] #[inline(always)] pub fn slvbeien(&mut self) -> SLVBEIEN_W { SLVBEIEN_W { w: self } } #[doc = "Bit 9 - Slave Mode TX Under Run Interrupt Enable Bit"] #[inline(always)] pub fn slvurien(&mut self) -> SLVURIEN_W { SLVURIEN_W { w: self } } #[doc = "Bit 12 - Slave Select Active Interrupt Enable Bit"] #[inline(always)] pub fn ssactien(&mut self) -> SSACTIEN_W { SSACTIEN_W { w: self } } #[doc = "Bit 13 - Slave Select Inactive Interrupt Enable Bit"] #[inline(always)] pub fn ssinaien(&mut self) -> SSINAIEN_W { SSINAIEN_W { w: self } } #[doc = "Bits 16:31 - Slave Mode Time-out Period In Slave mode, these bits indicate the time-out period when there is bus clock input during slave select active. The clock source of the time-out counter is Slave peripheral clock. If the value is 0, it indicates the slave mode time-out function is disabled."] #[inline(always)] pub fn slvtocnt(&mut self) -> SLVTOCNT_W { SLVTOCNT_W { w: self } } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.0.bits(bits); self } } #[doc = "QSPI Slave Select Control Register\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [qspix_ssctl](index.html) module"] pub struct QSPIX_SSCTL_SPEC; impl crate::RegisterSpec for QSPIX_SSCTL_SPEC { type Ux = u32; } #[doc = "`read()` method returns [qspix_ssctl::R](R) reader structure"] impl crate::Readable for QSPIX_SSCTL_SPEC { type Reader = R; } #[doc = "`write(|w| ..)` method takes [qspix_ssctl::W](W) writer structure"] impl crate::Writable for QSPIX_SSCTL_SPEC { type Writer = W; } #[doc = "`reset()` method sets QSPIx_SSCTL to value 0"] impl crate::Resettable for QSPIX_SSCTL_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0 } }
33.540174
426
0.593587
1e9497ab892aa38a80ea12cfd5a56221f2493048
8,738
use nom::bytes::complete::take_while1; use nom::character::complete::digit1; use nom::character::complete::line_ending; use nom::error::ParseError; use nom::multi::{many0, many1}; use nom::{bytes::complete::tag, combinator::map, combinator::opt, sequence::tuple, IResult}; use std::{collections::HashMap, collections::HashSet, error::Error}; #[derive(Clone, Debug)] pub struct IndexTable { tbl_map: HashMap<String, Vec<(u16, String)>>, } impl Default for IndexTable { fn default() -> Self { Self { tbl_map: HashMap::default(), } } } impl IndexTable { pub fn new() -> Self { Self { tbl_map: HashMap::new(), } } pub fn from_hashmap(m: HashMap<String, Vec<(u16, String)>>) -> Self { Self { tbl_map: m } } pub fn get<S>(&self, key: S) -> Option<&Vec<(u16, String)>> where S: Into<String>, { self.tbl_map.get(&key.into()) } pub fn get_from_suffix<S>(&self, key: S) -> Vec<(u16, String)> where S: Into<String>, { let passed_k = key.into(); let mut result: HashSet<(u16, String)> = HashSet::default(); for (k, v) in self.tbl_map.iter() { if k.ends_with(&passed_k) { for e in v { result.insert(e.clone()); } } } result.into_iter().collect() } } fn element_extractor<'a, E>() -> impl Fn(&'a str) -> IResult<&str, (u16, &str), E> where E: ParseError<&'a str>, { map( tuple(( digit1, tag(":"), take_while1(|chr| chr != ',' && chr != '\r' && chr != '\n'), opt(tag(",")), )), |(freq, _, target, _)| { let f: &str = freq; (f.parse::<u16>().unwrap(), target) }, ) } fn parse_index_line<'a, E>() -> impl Fn(&'a str) -> IResult<&str, (String, Vec<(u16, String)>), E> where E: ParseError<&'a str>, { map( tuple(( map(take_while1(|chr| chr != '\t'), |e: &str| e.to_string()), tag("\t"), many1(map(element_extractor(), |(freq, v)| (freq, v.to_string()))), )), |tup| (tup.0, tup.2), ) } fn parse_file_e(input: &str) -> IResult<&str, Vec<(String, Vec<(u16, String)>)>> { many0(map(tuple((parse_index_line(), opt(line_ending))), |e| e.0))(input) } pub fn parse_file(input: &str) -> Result<IndexTable, Box<dyn Error>> { debug!("Start parsing"); let extracted_result: Vec<(String, Vec<(u16, String)>)> = parse_file_e(input).unwrap().1; debug!("Finished parsing.."); let mut index_data = HashMap::new(); index_data.reserve(extracted_result.len()); for (k, v) in extracted_result.into_iter() { index_data.insert(k, v); } Ok(IndexTable { tbl_map: index_data, }) } #[cfg(test)] mod tests { use super::*; fn run_parse_index_line(input: &str) -> IResult<&str, (String, Vec<(u16, String)>)> { parse_index_line()(input) } #[test] fn parse_sample_line() { assert_eq!( run_parse_index_line( "PantsWorkaroundCache\t0:@third_party_jvm//3rdparty/jvm/com/twitter:util_cache" ) .unwrap() .1, ( String::from("PantsWorkaroundCache"), vec![( 0, String::from("@third_party_jvm//3rdparty/jvm/com/twitter:util_cache") )] ) ); } #[test] fn test_parse_troublesome_line() { assert_eq!( run_parse_index_line( "javax.annotation.Nullable\t236:@third_party_jvm//3rdparty/jvm/com/google/code/findbugs:jsr305,75:@third_party_jvm//3rdparty/jvm/com/google/code/findbugs:annotations" ) .unwrap() .1, ( String::from("javax.annotation.Nullable"), vec![( 236, String::from("@third_party_jvm//3rdparty/jvm/com/google/code/findbugs:jsr305") ), ( 75, String::from("@third_party_jvm//3rdparty/jvm/com/google/code/findbugs:annotations"), ), ] ) ); } #[test] fn parse_multiple_lines() { let parsed_file = parse_file( "scala.reflect.internal.SymbolPairs.Cursor.anon.1\t1:@third_party_jvm//3rdparty/jvm/org/scala_lang:scala_reflect org.apache.parquet.thrift.test.TestPerson.TestPersonTupleScheme\t0:@third_party_jvm//3rdparty/jvm/org/apache/parquet:parquet_thrift_jar_tests org.apache.commons.lang3.concurrent.MultiBackgroundInitializer\t68:@third_party_jvm//3rdparty/jvm/org/apache/commons:commons_lang3 org.apache.hadoop.util.GcTimeMonitor\t38:@third_party_jvm//3rdparty/jvm/org/apache/hadoop:hadoop_common org.apache.hadoop.fs.FSProtos.FileStatusProto.FileType\t38:@third_party_jvm//3rdparty/jvm/org/apache/hadoop:hadoop_common com.twitter.chill.JavaIterableWrapperSerializer\t2:@third_party_jvm//3rdparty/jvm/com/twitter:chill org.apache.commons.collections4.map.ListOrderedMap$EntrySetView\t0:@third_party_jvm//3rdparty/jvm/org/apache/commons:commons_collections4 scala.collection.convert.AsJavaConverters\t41:@third_party_jvm//3rdparty/jvm/org/scala_lang:scala_library org.ehcache.xml.XmlConfiguration.1\t0:@third_party_jvm//3rdparty/jvm/org/ehcache:ehcache com.ibm.icu.text.CharsetRecog_sbcs$CharsetRecog_8859_1_de\t1:@third_party_jvm//3rdparty/jvm/com/ibm/icu:icu4j scala.reflect.internal.Definitions$DefinitionsClass$VarArityClass\t1:@third_party_jvm//3rdparty/jvm/org/scala_lang:scala_reflect org.apache.http.nio.pool.AbstractNIOConnPool.1\t0:@third_party_jvm//3rdparty/jvm/org/apache/httpcomponents:httpcore_nio io.circe.generic.util.macros.DerivationMacros$$typecreator1$1 21:@third_party_jvm//3rdparty/jvm/io/circe:circe_generic org.apache.zookeeper.server.NettyServerCnxn.DumpCommand\t0:@third_party_jvm//3rdparty/jvm/org/apache/zookeeper:zookeeper org.apache.logging.log4j.core.appender.OutputStreamAppender$OutputStreamManagerFactory\t53:@third_party_jvm//3rdparty/jvm/org/apache/logging/log4j:log4j_core com.twitter.finagle.http.service.RoutingService.anonfun\t2:@third_party_jvm//3rdparty/jvm/com/twitter:finagle_http org.bouncycastle.util.CollectionStor\t10:@third_party_jvm//3rdparty/jvm/org/bouncycastle:bcprov_jdk15on org.apache.avro.io.parsing.JsonGrammarGenerator$1\t0:@third_party_jvm//3rdparty/jvm/org/apache/avro:avro org.terracotta.statistics.util\t0:@third_party_jvm//3rdparty/jvm/org/ehcache:ehcache com.ibm.icu.impl.Normalizer2Impl$1\t1:@third_party_jvm//3rdparty/jvm/com/ibm/icu:icu4j org.eclipse.jetty.io.ByteBufferPool.Bucket\t0:@third_party_jvm//3rdparty/jvm/org/eclipse javax.annotation.Nonnull$Checker\t236:@third_party_jvm//3rdparty/jvm/com/google/code/findbugs:jsr305,75:@third_party_jvm//3rdparty/jvm/com/google/code/findbugs:annotations javax.annotation.Nonnull.Checker\t236:@third_party_jvm//3rdparty/jvm/com/google/code/findbugs:jsr305,75:@third_party_jvm//3rdparty/jvm/com/google/code/findbugs:annotations javax.annotation.Nullable\t236:@third_party_jvm//3rdparty/jvm/com/google/code/findbugs:jsr305,75:@third_party_jvm//3rdparty/jvm/com/google/code/findbugs:annotations javax.annotation.OverridingMethodsMustInvokeSuper\t236:@third_party_jvm//3rdparty/jvm/com/google/code/findbugs:jsr305,75:@third_party_jvm//3rdparty/jvm/com/google/code/findbugs:annotations javax.annotation.ParametersAreNonnullByDefault\t236:@third_party_jvm//3rdparty/jvm/com/google/code/findbugs:jsr305,75:@third_party_jvm//3rdparty/jvm/com/google/code/findbugs:annotations javax.annotation.ParametersAreNullableByDefault\t236:@third_party_jvm//3rdparty/jvm/com/google/code/findbugs:jsr305,75:@third_party_jvm//3rdparty/jvm/com/google/code/findbugs:annotations" ).unwrap(); assert_eq!( parsed_file.get("org.apache.parquet.thrift.test.TestPerson.TestPersonTupleScheme"), Some(&vec![( 0, String::from( "@third_party_jvm//3rdparty/jvm/org/apache/parquet:parquet_thrift_jar_tests" ) )]) ); assert_eq!( parsed_file.get("javax.annotation.Nullable"), Some(&vec![ ( 236, String::from("@third_party_jvm//3rdparty/jvm/com/google/code/findbugs:jsr305") ), ( 75, String::from( "@third_party_jvm//3rdparty/jvm/com/google/code/findbugs:annotations" ), ), ]) ); } }
40.831776
188
0.639849
4b957b6f85fc5ffdf066bdea99eb2ade5b3fcfe2
10,773
// Copyright (c) The Libra Core Contributors // SPDX-License-Identifier: Apache-2.0 use crate::{ checker::*, common::LineSp, compiler::Compiler, evaluator::{eval, EvaluationLog, EvaluationOutput}, preprocessor::{build_transactions, extract_global_config, split_input}, }; use difference::Changeset; use regex::Regex; use std::{ env, fmt::Write as FmtWrite, fs::{read_to_string, File}, io::Write, iter, path::{Path, PathBuf}, }; use termcolor::{Buffer, BufferWriter, Color, ColorChoice, ColorSpec, WriteColor}; pub const PRETTY: &str = "PRETTY"; pub const FILTER: &str = "FILTER"; pub const UPDATE_BASELINE: &str = "UPDATE_BASELINE"; fn at_most_n_chars(s: impl IntoIterator<Item = char>, n: usize) -> String { let mut it = s.into_iter(); let mut s = String::new(); for _ in 0..n { match it.next() { Some(c) => s.push(c), None => return s, } } if it.next().is_some() { s.push_str("...") } s } fn at_most_n_before_and_m_after( s: &str, n: usize, start: usize, end: usize, m: usize, ) -> (String, String, String) { let before = at_most_n_chars(s[..start].chars().rev(), n) .chars() .rev() .collect(); let matched = s[start..end].to_string(); let after = at_most_n_chars(s[end..].chars(), m).chars().collect(); (before, matched, after) } fn env_var(var_name: &str) -> String { env::var(var_name) .unwrap_or_else(|_| "".to_string()) .to_ascii_lowercase() } fn pretty_mode() -> bool { let pretty = env_var(PRETTY); pretty == "1" || pretty == "true" } fn update_baseline() -> bool { let update = env_var(UPDATE_BASELINE); update == "1" || update == "true" } fn print_stage(haystack: &str) -> bool { env::var(FILTER) .map(|needle| { let needle = Regex::new(&needle).unwrap(); needle.is_match(haystack) }) .unwrap_or(true) } fn write_horizontal_line(output: &mut Buffer, term_width: usize) -> std::io::Result<()> { writeln!( output, "{}", iter::repeat('=').take(term_width).collect::<String>() ) } fn write_test_header( output: &mut Buffer, term_width: usize, test_file_path: &Path, ) -> std::io::Result<()> { writeln!(output)?; write_horizontal_line(output, term_width)?; writeln!(output, "{}", test_file_path.display())?; writeln!(output) } fn check_or_update_expected_output( bufwtr: &BufferWriter, output: &mut Buffer, term_width: usize, test_file_path: &Path, exp_file_path: &Path, log: &EvaluationLog, ) -> datatest_stable::Result<()> { let mut text = String::new(); for (idx, entry) in log.to_text_for_matching().into_iter().enumerate() { writeln!(&mut text, "[{}] {}", idx, entry)?; } let expected = read_to_string(&exp_file_path)?; let changeset = Changeset::new(&expected, &text, "\n"); // TODO: make this less sensitive to spaces. if changeset.distance != 0 { if update_baseline() { let mut f = File::create(&exp_file_path)?; f.write_all(text.as_bytes())?; } else { write_test_header(output, term_width, test_file_path)?; writeln!(output, "{}", changeset)?; writeln!( output, " Note: run with `env UPDATE_BASELINE=1` to update the exp files." )?; writeln!(output)?; write_horizontal_line(output, term_width)?; writeln!(output)?; bufwtr.print(output)?; panic!("test failed") } } Ok(()) } fn run_checker_directives( bufwtr: &BufferWriter, output: &mut Buffer, term_width: usize, test_file_path: &Path, lines: &[String], log: &EvaluationLog, directives: &[LineSp<Directive>], ) -> datatest_stable::Result<()> { let res = match_output(&log, directives); let errs = match res.status { MatchStatus::Success => return Ok(()), MatchStatus::Failure(errs) => errs, }; // Helpers for directives and matches. macro_rules! print_directive { ($idx: expr) => {{ let d = &directives[$idx]; write!(output, "{} | {}", d.line + 1, &lines[d.line][..d.start])?; output.set_color(ColorSpec::new().set_underline(true))?; write!(output, "{}", &lines[d.line][d.start..d.end])?; output.reset()?; write!(output, "{}", &lines[d.line][d.end..]) }}; } macro_rules! print_match { ($indent: expr, $is_positive: expr, $m: expr) => {{ let m: &Match = $m; let indent: &str = $indent; let prefix = format!("[{}] ", m.entry_id); let (before, matched, after) = at_most_n_before_and_m_after(&res.text[m.entry_id], 30, m.start, m.end, 50); write!(output, "{}", indent)?; write!(output, "{}{}", prefix, before)?; output.set_color(ColorSpec::new().set_underline(true).set_fg(Some( if $is_positive { Color::Green } else { Color::Red }, )))?; write!(output, "{}", matched)?; output.reset()?; writeln!(output, "{}", after)?; let offset = prefix.chars().count() + before.chars().count(); write!(output, "{}", indent)?; write!( output, "{}", iter::repeat(' ').take(offset).collect::<String>() )?; print_directive!(m.pat_id)?; writeln!(output) }}; } write_test_header(output, term_width, test_file_path)?; // Render the evaluation log. output.set_color(ColorSpec::new().set_bold(true).set_fg(Some(Color::Yellow)))?; write!(output, "info: ")?; output.set_color(ColorSpec::new().set_bold(true))?; writeln!(output, "Evaluation Outputs")?; output.reset()?; if pretty_mode() { writeln!( output, "{}", log.outputs .iter() .enumerate() .map(|(id, entry)| { match entry { EvaluationOutput::Error(err) => { format!("[{}] Error: {}\n", id, err.root_cause()) } _ => format!("[{}] {}\n", id, entry), } }) .filter(|x| print_stage(&x)) .collect::<String>() .lines() .map(|line| format!(" {}\n", line)) .collect::<String>() )?; } else { for (id, entry) in res.text.iter().enumerate() { if print_stage(entry) { writeln!(output, " [{}] {}", id, entry)?; } } writeln!(output)?; writeln!( output, " Note: enable pretty printing by setting 'env PRETTY=1'." )?; writeln!( output, " You can filter logs by setting 'env FILTER=\"<regex pattern>\"'." )?; writeln!(output)?; } writeln!(output)?; // Render previously successful matches if any. if !res.matches.is_empty() { output.set_color(ColorSpec::new().set_bold(true).set_fg(Some(Color::Yellow)))?; write!(output, "info: ")?; output.set_color(ColorSpec::new().set_bold(true))?; writeln!(output, "Successful Matches")?; output.reset()?; for m in &res.matches { print_match!(" ", true, m)?; writeln!(output)?; } writeln!(output)?; } // Render errors. for err in errs { output.set_color(ColorSpec::new().set_bold(true).set_fg(Some(Color::Red)))?; write!(output, "error: ")?; output.reset()?; match err { MatchError::UnmatchedErrors(errs) => { output.set_color(ColorSpec::new().set_bold(true))?; writeln!(output, "Unmatched Errors")?; output.reset()?; for id in errs.iter() { write!(output, " [{}] ", id)?; writeln!(output, "{}", at_most_n_chars(res.text[*id].chars(), 80))?; } } MatchError::NegativeMatch(m) => { output.set_color(ColorSpec::new().set_bold(true))?; writeln!(output, "Negative Match")?; output.reset()?; print_match!(" ", false, &m)?; } MatchError::UnmatchedDirectives(dirs) => { output.set_color(ColorSpec::new().set_bold(true))?; writeln!(output, "Unmatched Directives")?; output.reset()?; for idx in &dirs { write!(output, " ")?; print_directive!(*idx)?; writeln!(output)?; } writeln!(output)?; writeln!(output)?; } } } writeln!(output)?; write_horizontal_line(output, term_width)?; writeln!(output)?; bufwtr.print(&output)?; panic!("test failed") } // Runs all tests under the test/testsuite directory. pub fn functional_tests<TComp: Compiler>( compiler: TComp, test_file_path: &Path, ) -> datatest_stable::Result<()> { let mut exp_file_path = PathBuf::from(test_file_path); exp_file_path.set_extension("exp"); let exp_mode = exp_file_path.exists(); let input = read_to_string(test_file_path)?; let lines: Vec<String> = input.lines().map(|line| line.to_string()).collect(); let config = extract_global_config(&lines, exp_mode)?; let (directives, transactions) = split_input(&lines, &config)?; let commands = build_transactions(&config, &transactions)?; let log = eval(&config, compiler, &commands)?; // Set up colored output stream for error rendering. let bufwtr = BufferWriter::stdout(ColorChoice::Auto); let mut output = bufwtr.buffer(); let term_width = match term_size::dimensions() { Some((w, _h)) => w, _ => 80, }; if exp_mode { check_or_update_expected_output( &bufwtr, &mut output, term_width, test_file_path, &exp_file_path, &log, )?; } else { run_checker_directives( &bufwtr, &mut output, term_width, test_file_path, &lines, &log, &directives, )?; } Ok(()) }
29.925
92
0.517776
8fd6f062b9238d37e830a5575e383eaa0ed87d95
3,968
use super::super::types::{TypeId, Types}; use crate::ir::{module::name, util::spaces}; use nom::{ branch::alt, bytes::complete::tag, character::complete::{char, digit1}, combinator::map, error::VerboseError, sequence::preceded, IResult, }; pub fn parse<'a>( source: &'a str, types: &Types, ) -> IResult<&'a str, TypeId, VerboseError<&'a str>> { let (mut source, mut base) = if let Ok((source, _)) = preceded(spaces, char('['))(source) { parse_array(source, types)? } else if let Ok((source, _)) = preceded(spaces, char('{'))(source) { parse_struct(source, types, false)? } else if let Ok((source, _)) = preceded(spaces, tag("<{"))(source) { parse_struct(source, types, true)? } else if let Ok((source, name)) = preceded(spaces, preceded(char('%'), name::parse))(source) { (source, types.base_mut().named_type(name)) } else { preceded( spaces, alt(( map(tag("void"), |_| types.base().void()), map(tag("i1"), |_| types.base().i1()), map(tag("i8"), |_| types.base().i8()), map(tag("i32"), |_| types.base().i32()), map(tag("i64"), |_| types.base().i64()), )), )(source)? }; loop { if let Ok((source_, _ptr)) = preceded(spaces, char('*'))(source) { base = types.base_mut().pointer(base); source = source_; continue; } if let Ok((source_, _ptr)) = preceded(spaces, char('('))(source) { let (source_, base_) = parse_func_type(source_, types, base)?; base = base_; source = source_; continue; } break; } Ok((source, base)) } fn parse_array<'a>( source: &'a str, types: &Types, ) -> IResult<&'a str, TypeId, VerboseError<&'a str>> { let (source, n) = preceded(spaces, digit1)(source)?; let (source, _) = preceded(spaces, char('x'))(source)?; let (source, ty) = parse(source, types)?; let (source, _) = preceded(spaces, char(']'))(source)?; let ary_ty = types.base_mut().array(ty, n.parse::<u32>().unwrap()); Ok((source, ary_ty)) } fn parse_struct<'a>( mut source: &'a str, types: &Types, is_packed: bool, ) -> IResult<&'a str, TypeId, VerboseError<&'a str>> { if let Ok((source, _)) = preceded(spaces, tag(if is_packed { "}>" } else { "}" }))(source) { return Ok((source, types.base_mut().anonymous_struct(vec![], is_packed))); } let mut elems = vec![]; loop { let (source_, ty) = parse(source, types)?; elems.push(ty); if let Ok((source_, _)) = preceded(spaces, char(','))(source_) { source = source_; continue; } let (source_, _) = preceded(spaces, tag(if is_packed { "}>" } else { "}" }))(source_)?; return Ok((source_, types.base_mut().anonymous_struct(elems, is_packed))); } } fn parse_func_type<'a>( mut source: &'a str, types: &Types, ret: TypeId, ) -> IResult<&'a str, TypeId, VerboseError<&'a str>> { if let Ok((source, _)) = preceded(spaces, char(')'))(source) { let func_ty = types.base_mut().function(ret, vec![], false); return Ok((source, func_ty)); } let mut params = vec![]; let mut is_var_arg = false; loop { if let Ok((source_, _)) = preceded(spaces, tag("..."))(source) { is_var_arg = true; source = source_; break; } let (source_, param) = parse(source, types)?; source = source_; params.push(param); if let Ok((source_, _)) = preceded(spaces, char(','))(source) { source = source_; continue; } break; } let (source, _) = preceded(spaces, char(')'))(source)?; let func_ty = types.base_mut().function(ret, params, is_var_arg); Ok((source, func_ty)) }
31
99
0.529738
1da63882b69e5be014ca479ba0a9acf590e40e81
2,256
// ! Tests for array initializer parsing. use crate::syntax::{ ast::{constant::Const, node::Node}, parser::tests::check_parser, }; /// Checks an empty array. #[test] fn check_empty() { check_parser("[]", vec![Node::array_decl(Vec::new())]); } /// Checks an array with empty slot. #[test] fn check_empty_slot() { check_parser( "[,]", vec![Node::array_decl(vec![Node::Const(Const::Undefined)])], ); } /// Checks a numeric array. #[test] fn check_numeric_array() { check_parser( "[1, 2, 3]", vec![Node::array_decl(vec![ Node::const_node(1), Node::const_node(2), Node::const_node(3), ])], ); } // Checks a numeric array with trailing comma #[test] fn check_numeric_array_trailing() { check_parser( "[1, 2, 3,]", vec![Node::array_decl(vec![ Node::const_node(1), Node::const_node(2), Node::const_node(3), ])], ); } /// Checks a numeric array with an elision. #[test] fn check_numeric_array_elision() { check_parser( "[1, 2, , 3]", vec![Node::array_decl(vec![ Node::const_node(1), Node::const_node(2), Node::Const(Const::Undefined), Node::const_node(3), ])], ); } /// Checks a numeric array with repeated elisions. #[test] fn check_numeric_array_repeated_elision() { check_parser( "[1, 2, ,, 3]", vec![Node::array_decl(vec![ Node::const_node(1), Node::const_node(2), Node::Const(Const::Undefined), Node::Const(Const::Undefined), Node::const_node(3), ])], ); } /// Checks a combined array. #[test] fn check_combined() { check_parser( "[1, \"a\", 2]", vec![Node::array_decl(vec![ Node::const_node(1), Node::const_node("a"), Node::const_node(2), ])], ); } /// Checks a combined array with an empty string #[test] fn check_combined_empty_str() { check_parser( "[1, \"\", 2]", vec![Node::array_decl(vec![ Node::const_node(1), Node::const_node(""), Node::const_node(2), ])], ); }
21.902913
68
0.522606
677c0877777ba35a7ac6086be0bbb56e65740f78
5,857
use bevy_ecs::prelude::{Component, ReflectComponent}; use bevy_reflect::std_traits::ReflectDefault; use bevy_reflect::Reflect; type LayerMask = u32; /// An identifier for a rendering layer. pub type Layer = u8; /// Describes which rendering layers an entity belongs to. /// /// Cameras with this component will only render entities with intersecting /// layers. /// /// There are 32 layers numbered `0` - [`TOTAL_LAYERS`](RenderLayers::TOTAL_LAYERS). Entities may /// belong to one or more layers, or no layer at all. /// /// The [`Default`] instance of `RenderLayers` contains layer `0`, the first layer. /// /// An entity with this component without any layers is invisible. /// /// Entities without this component belong to layer `0`. #[derive(Component, Copy, Clone, Reflect, PartialEq, Eq, PartialOrd, Ord)] #[reflect(Component, Default, PartialEq)] pub struct RenderLayers(LayerMask); impl std::fmt::Debug for RenderLayers { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_tuple("RenderLayers") .field(&self.iter().collect::<Vec<_>>()) .finish() } } impl std::iter::FromIterator<Layer> for RenderLayers { fn from_iter<T: IntoIterator<Item = Layer>>(i: T) -> Self { i.into_iter().fold(Self::none(), |mask, g| mask.with(g)) } } /// Defaults to containing to layer `0`, the first layer. impl Default for RenderLayers { fn default() -> Self { RenderLayers::layer(0) } } impl RenderLayers { /// The total number of layers supported. pub const TOTAL_LAYERS: usize = std::mem::size_of::<LayerMask>() * 8; /// Create a new `RenderLayers` belonging to the given layer. pub const fn layer(n: Layer) -> Self { RenderLayers(0).with(n) } /// Create a new `RenderLayers` that belongs to all layers. pub const fn all() -> Self { RenderLayers(u32::MAX) } /// Create a new `RenderLayers` that belongs to no layers. pub const fn none() -> Self { RenderLayers(0) } /// Create a `RenderLayers` from a list of layers. pub fn from_layers(layers: &[Layer]) -> Self { layers.iter().copied().collect() } /// Add the given layer. /// /// This may be called multiple times to allow an entity to belong /// to multiple rendering layers. The maximum layer is `TOTAL_LAYERS - 1`. /// /// # Panics /// Panics when called with a layer greater than `TOTAL_LAYERS - 1`. #[must_use] pub const fn with(mut self, layer: Layer) -> Self { assert!((layer as usize) < Self::TOTAL_LAYERS); self.0 |= 1 << layer; self } /// Removes the given rendering layer. /// /// # Panics /// Panics when called with a layer greater than `TOTAL_LAYERS - 1`. #[must_use] pub const fn without(mut self, layer: Layer) -> Self { assert!((layer as usize) < Self::TOTAL_LAYERS); self.0 &= !(1 << layer); self } /// Get an iterator of the layers. pub fn iter(&self) -> impl Iterator<Item = Layer> { let total: Layer = std::convert::TryInto::try_into(Self::TOTAL_LAYERS).unwrap(); let mask = *self; (0..total).filter(move |g| RenderLayers::layer(*g).intersects(&mask)) } /// Determine if a `RenderLayers` intersects another. /// /// `RenderLayers`s intersect if they share any common layers. /// /// A `RenderLayers` with no layers will not match any other /// `RenderLayers`, even another with no layers. pub fn intersects(&self, other: &RenderLayers) -> bool { (self.0 & other.0) > 0 } } #[cfg(test)] mod rendering_mask_tests { use super::{Layer, RenderLayers}; #[test] fn rendering_mask_sanity() { assert_eq!( RenderLayers::TOTAL_LAYERS, 32, "total layers is what we think it is" ); assert_eq!(RenderLayers::layer(0).0, 1, "layer 0 is mask 1"); assert_eq!(RenderLayers::layer(1).0, 2, "layer 1 is mask 2"); assert_eq!(RenderLayers::layer(0).with(1).0, 3, "layer 0 + 1 is mask 3"); assert_eq!( RenderLayers::layer(0).with(1).without(0).0, 2, "layer 0 + 1 - 0 is mask 2" ); assert!( RenderLayers::layer(1).intersects(&RenderLayers::layer(1)), "layers match like layers" ); assert!( RenderLayers::layer(0).intersects(&RenderLayers(1)), "a layer of 0 means the mask is just 1 bit" ); assert!( RenderLayers::layer(0) .with(3) .intersects(&RenderLayers::layer(3)), "a mask will match another mask containing any similar layers" ); assert!( RenderLayers::default().intersects(&RenderLayers::default()), "default masks match each other" ); assert!( !RenderLayers::layer(0).intersects(&RenderLayers::layer(1)), "masks with differing layers do not match" ); assert!( !RenderLayers(0).intersects(&RenderLayers(0)), "empty masks don't match" ); assert_eq!( RenderLayers::from_layers(&[0, 2, 16, 30]) .iter() .collect::<Vec<_>>(), vec![0, 2, 16, 30], "from_layers and get_layers should roundtrip" ); assert_eq!( format!("{:?}", RenderLayers::from_layers(&[0, 1, 2, 3])).as_str(), "RenderLayers([0, 1, 2, 3])", "Debug instance shows layers" ); assert_eq!( RenderLayers::from_layers(&[0, 1, 2]), <RenderLayers as std::iter::FromIterator<Layer>>::from_iter(vec![0, 1, 2]), "from_layers and from_iter are equivalent" ); } }
32.181319
97
0.581697
64b9c96ac525beab5c8b99a822b594d9e7df18da
21,032
use anchor_client::Cluster; use anchor_syn::idl::Idl; use anyhow::{anyhow, Error, Result}; use clap::{ArgEnum, Clap}; use heck::SnakeCase; use serde::{Deserialize, Serialize}; use solana_sdk::pubkey::Pubkey; use solana_sdk::signature::{Keypair, Signer}; use std::collections::BTreeMap; use std::convert::TryFrom; use std::fs::{self, File}; use std::io::prelude::*; use std::ops::Deref; use std::path::Path; use std::path::PathBuf; use std::str::FromStr; #[derive(Default, Debug, Clap)] pub struct ConfigOverride { /// Cluster override. #[clap(global = true, long = "provider.cluster")] pub cluster: Option<Cluster>, /// Wallet override. #[clap(global = true, long = "provider.wallet")] pub wallet: Option<WalletPath>, } pub struct WithPath<T> { inner: T, path: PathBuf, } impl<T> WithPath<T> { pub fn new(inner: T, path: PathBuf) -> Self { Self { inner, path } } pub fn path(&self) -> &PathBuf { &self.path } pub fn into_inner(self) -> T { self.inner } } impl<T> std::convert::AsRef<T> for WithPath<T> { fn as_ref(&self) -> &T { &self.inner } } #[derive(Debug, Clone, PartialEq)] pub struct Manifest(cargo_toml::Manifest); impl Manifest { pub fn from_path(p: impl AsRef<Path>) -> Result<Self> { cargo_toml::Manifest::from_path(p) .map(Manifest) .map_err(Into::into) } pub fn lib_name(&self) -> Result<String> { if self.lib.is_some() && self.lib.as_ref().unwrap().name.is_some() { Ok(self .lib .as_ref() .unwrap() .name .as_ref() .unwrap() .to_string() .to_snake_case()) } else { Ok(self .package .as_ref() .ok_or_else(|| anyhow!("package section not provided"))? .name .to_string() .to_snake_case()) } } pub fn version(&self) -> String { match &self.package { Some(package) => package.version.to_string(), _ => "0.0.0".to_string(), } } // Climbs each parent directory from the current dir until we find a Cargo.toml pub fn discover() -> Result<Option<WithPath<Manifest>>> { Manifest::discover_from_path(std::env::current_dir()?) } // Climbs each parent directory from a given starting directory until we find a Cargo.toml. pub fn discover_from_path(start_from: PathBuf) -> Result<Option<WithPath<Manifest>>> { let mut cwd_opt = Some(start_from.as_path()); while let Some(cwd) = cwd_opt { for f in fs::read_dir(cwd)? { let p = f?.path(); if let Some(filename) = p.file_name() { if filename.to_str() == Some("Cargo.toml") { let m = WithPath::new(Manifest::from_path(&p)?, p); return Ok(Some(m)); } } } // Not found. Go up a directory level. cwd_opt = cwd.parent(); } Ok(None) } } impl Deref for Manifest { type Target = cargo_toml::Manifest; fn deref(&self) -> &Self::Target { &self.0 } } impl WithPath<Config> { pub fn get_program_list(&self) -> Result<Vec<PathBuf>> { // Canonicalize the workspace filepaths to compare with relative paths. let (members, exclude) = self.canonicalize_workspace()?; // Get all candidate programs. // // If [workspace.members] exists, then use that. // Otherwise, default to `programs/*`. let program_paths: Vec<PathBuf> = { if members.is_empty() { let path = self.path().parent().unwrap().join("programs"); fs::read_dir(path)? .filter(|entry| entry.as_ref().map(|e| e.path().is_dir()).unwrap_or(false)) .map(|dir| dir.map(|d| d.path().canonicalize().unwrap())) .collect::<Vec<Result<PathBuf, std::io::Error>>>() .into_iter() .collect::<Result<Vec<PathBuf>, std::io::Error>>()? } else { members } }; // Filter out everything part of the exclude array. Ok(program_paths .into_iter() .filter(|m| !exclude.contains(m)) .collect()) } // TODO: this should read idl dir instead of parsing source. pub fn read_all_programs(&self) -> Result<Vec<Program>> { let mut r = vec![]; for path in self.get_program_list()? { let cargo = Manifest::from_path(&path.join("Cargo.toml"))?; let lib_name = cargo.lib_name()?; let version = cargo.version(); let idl = anchor_syn::idl::file::parse(path.join("src/lib.rs"), version)?; r.push(Program { lib_name, path, idl, }); } Ok(r) } pub fn canonicalize_workspace(&self) -> Result<(Vec<PathBuf>, Vec<PathBuf>)> { let members = self .workspace .members .iter() .map(|m| { self.path() .parent() .unwrap() .join(m) .canonicalize() .unwrap() }) .collect(); let exclude = self .workspace .exclude .iter() .map(|m| { self.path() .parent() .unwrap() .join(m) .canonicalize() .unwrap() }) .collect(); Ok((members, exclude)) } pub fn get_program(&self, name: &str) -> Result<Option<WithPath<Program>>> { for program in self.read_all_programs()? { let cargo_toml = program.path.join("Cargo.toml"); if !cargo_toml.exists() { return Err(anyhow!( "Did not find Cargo.toml at the path: {}", program.path.display() )); } let p_lib_name = Manifest::from_path(&cargo_toml)?.lib_name()?; if name == p_lib_name { let path = self .path() .parent() .unwrap() .canonicalize()? .join(&program.path); return Ok(Some(WithPath::new(program, path))); } } Ok(None) } } impl<T> std::ops::Deref for WithPath<T> { type Target = T; fn deref(&self) -> &Self::Target { &self.inner } } impl<T> std::ops::DerefMut for WithPath<T> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } #[derive(Debug, Default)] pub struct Config { pub anchor_version: Option<String>, pub solana_version: Option<String>, pub registry: RegistryConfig, pub provider: ProviderConfig, pub programs: ProgramsConfig, pub scripts: ScriptsConfig, pub workspace: WorkspaceConfig, pub test: Option<Test>, } #[derive(Clone, Debug, Serialize, Deserialize)] pub struct RegistryConfig { pub url: String, } impl Default for RegistryConfig { fn default() -> Self { Self { url: "https://anchor.projectserum.com".to_string(), } } } #[derive(Debug, Default)] pub struct ProviderConfig { pub cluster: Cluster, pub wallet: WalletPath, } pub type ScriptsConfig = BTreeMap<String, String>; pub type ProgramsConfig = BTreeMap<Cluster, BTreeMap<String, ProgramDeployment>>; #[derive(Debug, Default, Clone, Serialize, Deserialize)] pub struct WorkspaceConfig { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub members: Vec<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub exclude: Vec<String>, #[serde(default, skip_serializing_if = "String::is_empty")] pub types: String, } #[derive(ArgEnum, Clap, Clone, PartialEq, Debug)] pub enum BootstrapMode { None, Debian, } #[derive(Debug, Clone)] pub struct BuildConfig { pub verifiable: bool, pub solana_version: Option<String>, pub docker_image: String, pub bootstrap: BootstrapMode, } impl Config { pub fn docker(&self) -> String { let ver = self .anchor_version .clone() .unwrap_or_else(|| crate::DOCKER_BUILDER_VERSION.to_string()); format!("projectserum/build:v{}", ver) } pub fn discover(cfg_override: &ConfigOverride) -> Result<Option<WithPath<Config>>> { Config::_discover().map(|opt| { opt.map(|mut cfg| { if let Some(cluster) = cfg_override.cluster.clone() { cfg.provider.cluster = cluster; } if let Some(wallet) = cfg_override.wallet.clone() { cfg.provider.wallet = wallet; } cfg }) }) } // Climbs each parent directory until we find an Anchor.toml. fn _discover() -> Result<Option<WithPath<Config>>> { let _cwd = std::env::current_dir()?; let mut cwd_opt = Some(_cwd.as_path()); while let Some(cwd) = cwd_opt { for f in fs::read_dir(cwd)? { let p = f?.path(); if let Some(filename) = p.file_name() { if filename.to_str() == Some("Anchor.toml") { let cfg = Config::from_path(&p)?; return Ok(Some(WithPath::new(cfg, p))); } } } cwd_opt = cwd.parent(); } Ok(None) } fn from_path(p: impl AsRef<Path>) -> Result<Self> { let mut cfg_file = File::open(&p)?; let mut cfg_contents = String::new(); cfg_file.read_to_string(&mut cfg_contents)?; let cfg = cfg_contents.parse()?; Ok(cfg) } pub fn wallet_kp(&self) -> Result<Keypair> { solana_sdk::signature::read_keypair_file(&self.provider.wallet.to_string()) .map_err(|_| anyhow!("Unable to read keypair file")) } } #[derive(Debug, Serialize, Deserialize)] struct _Config { anchor_version: Option<String>, solana_version: Option<String>, programs: Option<BTreeMap<String, BTreeMap<String, serde_json::Value>>>, registry: Option<RegistryConfig>, provider: Provider, workspace: Option<WorkspaceConfig>, scripts: Option<ScriptsConfig>, test: Option<Test>, } #[derive(Debug, Serialize, Deserialize)] struct Provider { cluster: String, wallet: String, } impl ToString for Config { fn to_string(&self) -> String { let programs = { let c = ser_programs(&self.programs); if c.is_empty() { None } else { Some(c) } }; let cfg = _Config { anchor_version: self.anchor_version.clone(), solana_version: self.solana_version.clone(), registry: Some(self.registry.clone()), provider: Provider { cluster: format!("{}", self.provider.cluster), wallet: self.provider.wallet.to_string(), }, test: self.test.clone(), scripts: match self.scripts.is_empty() { true => None, false => Some(self.scripts.clone()), }, programs, workspace: (!self.workspace.members.is_empty() || !self.workspace.exclude.is_empty()) .then(|| self.workspace.clone()), }; toml::to_string(&cfg).expect("Must be well formed") } } impl FromStr for Config { type Err = Error; fn from_str(s: &str) -> Result<Self, Self::Err> { let cfg: _Config = toml::from_str(s) .map_err(|e| anyhow::format_err!("Unable to deserialize config: {}", e.to_string()))?; Ok(Config { anchor_version: cfg.anchor_version, solana_version: cfg.solana_version, registry: cfg.registry.unwrap_or_default(), provider: ProviderConfig { cluster: cfg.provider.cluster.parse()?, wallet: shellexpand::tilde(&cfg.provider.wallet).parse()?, }, scripts: cfg.scripts.unwrap_or_default(), test: cfg.test, programs: cfg.programs.map_or(Ok(BTreeMap::new()), deser_programs)?, workspace: cfg.workspace.unwrap_or_default(), }) } } fn ser_programs( programs: &BTreeMap<Cluster, BTreeMap<String, ProgramDeployment>>, ) -> BTreeMap<String, BTreeMap<String, serde_json::Value>> { programs .iter() .map(|(cluster, programs)| { let cluster = cluster.to_string(); let programs = programs .iter() .map(|(name, deployment)| { ( name.clone(), to_value(&_ProgramDeployment::from(deployment)), ) }) .collect::<BTreeMap<String, serde_json::Value>>(); (cluster, programs) }) .collect::<BTreeMap<String, BTreeMap<String, serde_json::Value>>>() } fn to_value(dep: &_ProgramDeployment) -> serde_json::Value { if dep.path.is_none() && dep.idl.is_none() { return serde_json::Value::String(dep.address.to_string()); } serde_json::to_value(dep).unwrap() } fn deser_programs( programs: BTreeMap<String, BTreeMap<String, serde_json::Value>>, ) -> Result<BTreeMap<Cluster, BTreeMap<String, ProgramDeployment>>> { programs .iter() .map(|(cluster, programs)| { let cluster: Cluster = cluster.parse()?; let programs = programs .iter() .map(|(name, program_id)| { Ok(( name.clone(), ProgramDeployment::try_from(match &program_id { serde_json::Value::String(address) => _ProgramDeployment { address: address.parse()?, path: None, idl: None, }, serde_json::Value::Object(_) => { serde_json::from_value(program_id.clone()) .map_err(|_| anyhow!("Unable to read toml"))? } _ => return Err(anyhow!("Invalid toml type")), })?, )) }) .collect::<Result<BTreeMap<String, ProgramDeployment>>>()?; Ok((cluster, programs)) }) .collect::<Result<BTreeMap<Cluster, BTreeMap<String, ProgramDeployment>>>>() } #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Test { pub genesis: Option<Vec<GenesisEntry>>, pub clone: Option<Vec<CloneEntry>>, pub validator: Option<Validator>, pub startup_wait: Option<i32>, } #[derive(Debug, Clone, Serialize, Deserialize)] pub struct GenesisEntry { // Base58 pubkey string. pub address: String, // Filepath to the compiled program to embed into the genesis. pub program: String, } #[derive(Debug, Clone, Serialize, Deserialize)] pub struct CloneEntry { // Base58 pubkey string. pub address: String, } #[derive(Debug, Default, Clone, Serialize, Deserialize)] pub struct Validator { // IP address to bind the validator ports. [default: 0.0.0.0] #[serde(default = "default_bind_address")] pub bind_address: String, // Range to use for dynamically assigned ports. [default: 1024-65535] #[serde(skip_serializing_if = "Option::is_none")] pub dynamic_port_range: Option<String>, // Enable the faucet on this port [deafult: 9900]. #[serde(skip_serializing_if = "Option::is_none")] pub faucet_port: Option<u16>, // Give the faucet address this much SOL in genesis. [default: 1000000] #[serde(skip_serializing_if = "Option::is_none")] pub faucet_sol: Option<String>, // Gossip DNS name or IP address for the validator to advertise in gossip. [default: 127.0.0.1] #[serde(skip_serializing_if = "Option::is_none")] pub gossip_host: Option<String>, // Gossip port number for the validator #[serde(skip_serializing_if = "Option::is_none")] pub gossip_port: Option<u16>, // URL for Solana's JSON RPC or moniker. #[serde(skip_serializing_if = "Option::is_none")] pub url: Option<String>, // Use DIR as ledger location #[serde(default = "default_ledger_path")] pub ledger: String, // Keep this amount of shreds in root slots. [default: 10000] #[serde(skip_serializing_if = "Option::is_none")] pub limit_ledger_size: Option<String>, // Enable JSON RPC on this port, and the next port for the RPC websocket. [default: 8899] #[serde(default = "default_rpc_port")] pub rpc_port: u16, // Override the number of slots in an epoch. #[serde(skip_serializing_if = "Option::is_none")] pub slots_per_epoch: Option<String>, // Warp the ledger to WARP_SLOT after starting the validator. #[serde(skip_serializing_if = "Option::is_none")] pub warp_slot: Option<String>, } fn default_ledger_path() -> String { ".anchor/test-ledger".to_string() } fn default_bind_address() -> String { "0.0.0.0".to_string() } fn default_rpc_port() -> u16 { 8899 } #[derive(Debug, Clone)] pub struct Program { pub lib_name: String, // Canonicalized path to the program directory. pub path: PathBuf, pub idl: Option<Idl>, } impl Program { pub fn pubkey(&self) -> Result<Pubkey> { self.keypair().map(|kp| kp.pubkey()) } pub fn keypair(&self) -> Result<Keypair> { let file = self.keypair_file()?; solana_sdk::signature::read_keypair_file(file.path()) .map_err(|_| anyhow!("failed to read keypair for program: {}", self.lib_name)) } // Lazily initializes the keypair file with a new key if it doesn't exist. pub fn keypair_file(&self) -> Result<WithPath<File>> { fs::create_dir_all("target/deploy/")?; let path = std::env::current_dir() .expect("Must have current dir") .join(format!("target/deploy/{}-keypair.json", self.lib_name)); if path.exists() { return Ok(WithPath::new(File::open(&path)?, path)); } let program_kp = Keypair::generate(&mut rand::rngs::OsRng); let mut file = File::create(&path)?; file.write_all(format!("{:?}", &program_kp.to_bytes()).as_bytes())?; Ok(WithPath::new(file, path)) } pub fn binary_path(&self) -> PathBuf { std::env::current_dir() .expect("Must have current dir") .join(format!("target/deploy/{}.so", self.lib_name)) } } #[derive(Debug, Default)] pub struct ProgramDeployment { pub address: Pubkey, pub path: Option<String>, pub idl: Option<String>, } impl TryFrom<_ProgramDeployment> for ProgramDeployment { type Error = anyhow::Error; fn try_from(pd: _ProgramDeployment) -> Result<Self, Self::Error> { Ok(ProgramDeployment { address: pd.address.parse()?, path: pd.path, idl: pd.idl, }) } } #[derive(Debug, Default, Serialize, Deserialize)] pub struct _ProgramDeployment { pub address: String, pub path: Option<String>, pub idl: Option<String>, } impl From<&ProgramDeployment> for _ProgramDeployment { fn from(pd: &ProgramDeployment) -> Self { Self { address: pd.address.to_string(), path: pd.path.clone(), idl: pd.idl.clone(), } } } pub struct ProgramWorkspace { pub name: String, pub program_id: Pubkey, pub idl: Idl, } #[derive(Debug, Serialize, Deserialize)] pub struct AnchorPackage { pub name: String, pub address: String, pub idl: Option<String>, } impl AnchorPackage { pub fn from(name: String, cfg: &WithPath<Config>) -> Result<Self> { let cluster = &cfg.provider.cluster; if cluster != &Cluster::Mainnet { return Err(anyhow!("Publishing requires the mainnet cluster")); } let program_details = cfg .programs .get(cluster) .ok_or_else(|| anyhow!("Program not provided in Anchor.toml"))? .get(&name) .ok_or_else(|| anyhow!("Program not provided in Anchor.toml"))?; let idl = program_details.idl.clone(); let address = program_details.address.to_string(); Ok(Self { name, address, idl }) } } serum_common::home_path!(WalletPath, ".config/solana/id.json");
31.158519
99
0.553014
2997c1c9a0c58b3b456382d1b546d70aefd55778
769
// Copyright 2022 Webb Technologies Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // #![cfg_attr(not(feature = "std"), no_std)] sp_api::decl_runtime_apis! { pub trait DKGProposalHandlerApi<Proposal> { fn get_unsigned_proposals() -> Vec<Proposal>; } }
34.954545
75
0.741222
388dfc8149e9b1687634b57cc7483d52c8332c61
14,331
use crate::core::apply_to_state_machine; use crate::core::RaftCore; use crate::core::State; use crate::error::AppendEntriesError; use crate::raft::AppendEntriesRequest; use crate::raft::AppendEntriesResponse; use crate::raft::Entry; use crate::raft::EntryPayload; use crate::raft_types::LogIdOptionExt; use crate::EffectiveMembership; use crate::LogId; use crate::MessageSummary; use crate::RaftNetworkFactory; use crate::RaftStorage; use crate::RaftTypeConfig; use crate::StorageError; use crate::Update; impl<C: RaftTypeConfig, N: RaftNetworkFactory<C>, S: RaftStorage<C>> RaftCore<C, N, S> { /// An RPC invoked by the leader to replicate log entries (§5.3); also used as heartbeat (§5.2). /// /// See `receiver implementation: AppendEntries RPC` in raft-essentials.md in this repo. #[tracing::instrument(level = "debug", skip(self, req))] pub(super) async fn handle_append_entries_request( &mut self, req: AppendEntriesRequest<C>, ) -> Result<AppendEntriesResponse<C>, AppendEntriesError<C>> { tracing::debug!(last_log_id=?self.last_log_id, ?self.last_applied, msg=%req.summary(), "handle_append_entries_request"); let msg_entries = req.entries.as_slice(); // Partial order compare: smaller than or incomparable if req.vote < self.vote { tracing::debug!(?self.vote, %req.vote, "AppendEntries RPC term is less than current term"); return Ok(AppendEntriesResponse::HigherVote(self.vote)); } self.update_next_election_timeout(true); tracing::debug!("start to check and update to latest term/leader"); if req.vote > self.vote { self.vote = req.vote; self.save_vote().await?; // If not follower, become follower. if !self.target_state.is_follower() && !self.target_state.is_learner() { self.set_target_state(State::Follower); // State update will emit metrics. } self.report_metrics(Update::AsIs); } // Caveat: [commit-index must not advance the last known consistent log](https://datafuselabs.github.io/openraft/replication.html#caveat-commit-index-must-not-advance-the-last-known-consistent-log) // TODO(xp): cleanup commit index at sender side. let valid_commit_index = msg_entries.last().map(|x| Some(x.log_id)).unwrap_or_else(|| req.prev_log_id); let valid_committed = std::cmp::min(req.leader_commit, valid_commit_index); tracing::debug!("begin log consistency check"); // There are 5 cases a prev_log_id could have: // prev_log_id: 0 1 2 3 4 5 // +----------------+------------------------+ // ` 0 ` last_applied ` last_log_id let res = self.append_apply_log_entries(req.prev_log_id, msg_entries, valid_committed).await?; Ok(res) } #[tracing::instrument(level = "debug", skip(self))] async fn delete_conflict_logs_since(&mut self, start: LogId<C::NodeId>) -> Result<(), StorageError<C>> { self.storage.delete_conflict_logs_since(start).await?; self.last_log_id = self.storage.get_log_state().await?.last_log_id; // TODO(xp): get_membership() should have a defensive check to ensure it always returns Some() if node is // initialized. Because a node always commit a membership log as the first log entry. let membership = self.storage.get_membership().await?; // TODO(xp): This is a dirty patch: // When a node starts in a single-node mode, it does not append an initial log // but instead depends on storage.get_membership() to return a default one. // It would be better a node always append an initial log entry. let membership = membership.unwrap_or_else(|| EffectiveMembership::new_initial(self.id)); self.update_membership(membership); tracing::debug!("Done update membership"); Ok(()) } /// Skip log entries that have the same term as the entries the leader sent. /// Delete entries since the first mismatching entry from local storage. /// Returns a slice of entries that are not in local storage. /// /// Caveat: Deleting then appending entries are not atomic, thus deleting consistent entries may cause loss of /// committed logs. /// /// E.g., the entries are as following and R1 now is the leader: /// /// ```text /// R1 1,1 1,2 1,3 /// R2 1,1 1,2 /// R3 /// ``` /// /// When the following steps take place, committed entry `{1,2}` is lost: /// /// - R1 to R2: `append_entries(entries=[{1,2}, {1,3}], prev_log_id={1,1})` /// - R2 deletes `{1,2}` /// - R2 crash /// - R2 elected as leader and only see 1,1; the committed entry 1,2 is lost. /// /// **The safe way is to skip every entry that present in append-entries message then delete only the /// inconsistent entries**. /// /// Why need to delete: /// /// The following diagram shows only log term. /// /// ```text /// R1 5 /// R2 5 /// R3 5 3 3 /// R4 /// R5 2 4 4 /// ``` /// /// If log 5 is committed by R1, and log 3 is not removeC5 in future could become a new leader and overrides log /// 5 on R3. #[tracing::instrument(level="trace", skip(self, msg_entries), fields(msg_entries=%msg_entries.summary()))] async fn find_and_delete_conflict_logs(&mut self, msg_entries: &[Entry<C>]) -> Result<(), StorageError<C>> { // all msg_entries are inconsistent logs tracing::debug!(msg_entries=%msg_entries.summary(), "try to delete_inconsistent_log"); let l = msg_entries.len(); if l == 0 { return Ok(()); } if let Some(last_log_id) = self.last_log_id { if msg_entries[0].log_id.index > last_log_id.index { return Ok(()); } } tracing::debug!( "delete inconsistent log entries [{}, {}), last_log_id: {:?}, entries: {}", msg_entries[0].log_id, msg_entries[l - 1].log_id, self.last_log_id, msg_entries.summary() ); self.delete_conflict_logs_since(msg_entries[0].log_id).await?; Ok(()) } /// Append logs only when the first entry(prev_log_id) matches local store /// This way we keeps the log continuity. #[tracing::instrument(level="trace", skip(self, entries), fields(entries=%entries.summary()))] async fn append_apply_log_entries( &mut self, prev_log_id: Option<LogId<C::NodeId>>, entries: &[Entry<C>], committed: Option<LogId<C::NodeId>>, ) -> Result<AppendEntriesResponse<C>, StorageError<C>> { let mismatched = self.does_log_id_match(prev_log_id).await?; tracing::debug!( "check prev_log_id {:?} match: committed: {:?}, mismatched: {:?}", prev_log_id, self.committed, mismatched, ); if let Some(mismatched_log_id) = mismatched { // prev_log_id mismatches, the logs [prev_log_id.index, +oo) are all inconsistent and should be removed if let Some(last_log_id) = self.last_log_id { if mismatched_log_id.index <= last_log_id.index { tracing::debug!(%mismatched_log_id, "delete inconsistent log since prev_log_id"); self.delete_conflict_logs_since(mismatched_log_id).await?; } } return Ok(AppendEntriesResponse::Conflict); } // The entries left are all inconsistent log or absent let (n_matching, entries) = self.skip_matching_entries(entries).await?; tracing::debug!( ?self.committed, n_matching, entries = %entries.summary(), "skip matching entries", ); // Before appending, if an entry overrides an inconsistent one, the entries after it must be deleted first. // Raft requires log ids are in total order by (term,index). // Otherwise the log id with max index makes committed entry invisible in election. self.find_and_delete_conflict_logs(entries).await?; self.append_log_entries(entries).await?; // commit index must not > last_log_id.index // This is guaranteed by caller. self.committed = committed; let need_to_report_metrics = !self.replicate_to_state_machine_if_needed().await?; if need_to_report_metrics { self.report_metrics(Update::AsIs); } Ok(AppendEntriesResponse::Success) } /// Returns number of entries that match local storage by comparing log_id, /// and the the unmatched entries. /// /// The entries in request that are matches local ones does not need to be append again. /// Filter them out. pub async fn skip_matching_entries<'s, 'e>( &'s mut self, entries: &'e [Entry<C>], ) -> Result<(usize, &'e [Entry<C>]), StorageError<C>> { let l = entries.len(); for i in 0..l { let log_id = entries[i].log_id; if Some(log_id) <= self.committed { continue; } let index = log_id.index; // TODO(xp): this is a naive impl. Batch loading entries from storage. let log = self.storage.try_get_log_entry(index).await?; if let Some(local) = log { if local.log_id == log_id { continue; } } return Ok((i, &entries[i..])); } Ok((l, &[])) } /// Return the mismatching log id if local store contains the log id. /// /// This way to check if the entries in append-entries request is consecutive with local logs. /// Raft only accept consecutive logs to be appended. pub async fn does_log_id_match( &mut self, remote_log_id: Option<LogId<C::NodeId>>, ) -> Result<Option<LogId<C::NodeId>>, StorageError<C>> { let log_id = match remote_log_id { None => { return Ok(None); } Some(x) => x, }; // Committed entries are always safe and are consistent to a valid leader. if remote_log_id <= self.committed { return Ok(None); } let index = log_id.index; let log = self.storage.try_get_log_entry(index).await?; tracing::debug!( "check log id matching: local: {:?} remote: {}", log.as_ref().map(|x| x.log_id), log_id ); if let Some(local) = log { if local.log_id == log_id { return Ok(None); } } Ok(Some(log_id)) } /// Append the given entries to the log. /// /// Configuration changes are also detected and applied here. See `configuration changes` /// in the raft-essentials.md in this repo. #[tracing::instrument(level = "trace", skip(self, entries), fields(entries=%entries.summary()))] async fn append_log_entries(&mut self, entries: &[Entry<C>]) -> Result<(), StorageError<C>> { if entries.is_empty() { return Ok(()); } // Check the given entries for any config changes and take the most recent. let last_conf_change = entries .iter() .filter_map(|ent| match &ent.payload { EntryPayload::Membership(conf) => Some(EffectiveMembership::new(ent.log_id, conf.clone())), _ => None, }) .last(); // TODO(xp): only when last_conf_change is newer than current one. // For now it is guaranteed by `delete_logs()`, for it updates membership config when delete logs. // and `skip_matching_entries()`, for it does not re-append existent log entries. // This task should be done by StorageAdaptor. if let Some(conf) = last_conf_change { tracing::debug!({membership=?conf}, "applying new membership config received from leader"); self.update_membership(conf); }; // Replicate entries to log (same as append, but in follower mode). let entry_refs = entries.iter().collect::<Vec<_>>(); self.storage.append_to_log(&entry_refs).await?; if let Some(entry) = entries.last() { self.last_log_id = Some(entry.log_id); } Ok(()) } /// Replicate any outstanding entries to the state machine for which it is safe to do so. /// /// Very importantly, this routine must not block the main control loop main task, else it /// may cause the Raft leader to timeout the requests to this node. /// return if or not has `report_metrics`, than caller donot need to call it again. #[tracing::instrument(level = "debug", skip(self))] async fn replicate_to_state_machine_if_needed(&mut self) -> Result<bool, StorageError<C>> { tracing::debug!(?self.last_applied, ?self.committed, "replicate_to_sm_if_needed"); // If we don't have any new entries to replicate, then do nothing. if self.committed <= self.last_applied { tracing::debug!( "committed({:?}) <= last_applied({:?}), return", self.committed, self.last_applied ); return Ok(false); } // Drain entries from the beginning of the cache up to commit index. let entries = self.storage.get_log_entries(self.last_applied.next_index()..self.committed.next_index()).await?; let last_log_id = entries.last().map(|x| x.log_id).unwrap(); tracing::debug!("entries: {}", entries.as_slice().summary()); tracing::debug!(?last_log_id); let entries_refs: Vec<_> = entries.iter().collect(); apply_to_state_machine(&mut self.storage, &entries_refs, self.config.max_applied_log_to_keep).await?; self.last_applied = Some(last_log_id); self.report_metrics(Update::AsIs); self.trigger_log_compaction_if_needed(false).await; Ok(true) } }
38.420912
205
0.601842
9bd779edaed03f83adfb423d080fa5ff8d1f31c7
22,095
use crate::{error::Result, hcore::{fs as hfs, package::{self, ident::PackageIdent, PackageInstall}}}; use bimap::BiMap; use petgraph::{self, graph::NodeIndex, stable_graph::StableGraph, visit::{Bfs, Reversed, Walker}}; use std::path::Path; pub struct PackageGraph { nodes: BiMap<PackageIdent, NodeIndex>, graph: StableGraph<PackageIdent, usize, petgraph::Directed>, } impl PackageGraph { fn empty() -> Self { PackageGraph { nodes: BiMap::new(), graph: StableGraph::new(), } } /// Construct a `PackageGraph` from all the packages stored in in the biome `pkgs` /// directory pub fn from_root_path(fs_root_path: &Path) -> Result<Self> { let mut pg = Self::empty(); pg.load(fs_root_path)?; Ok(pg) } /// Load a set of packages that are stored in a package_path under a biome /// root directory fn load(&mut self, fs_root_path: &Path) -> Result<()> { let package_path = hfs::pkg_root_path(Some(&fs_root_path)); let idents = package::all_packages(&package_path)?; for ident in idents { let p = PackageInstall::load(&ident, Some(fs_root_path))?; let deps = p.deps()?; self.extend(&ident, &deps); } Ok(()) } /// Return (and possibly create) a NodeIndex for a given PackageIdent. /// Upon returning, the node will be guaranteed to be in the graph fn node_idx(&mut self, package: &PackageIdent) -> NodeIndex { match self.nodes.get_by_left(package) { Some(&idx) => idx, None => { let idx = self.graph.add_node(package.clone()); // BiMap only allows a value to appear one time in the left // and right hand sides of the map. Let's assert that we're not going to // move the `idx` from one package to another. assert!(!self.nodes.contains_right(&idx)); self.nodes.insert(package.clone(), idx); idx } } } /// Extend a graph by adding in dependencies for a package // NOTE: I suppose in an ideal world, `deps` would be something // like a slice of &PackageIdent to cut down on needless cloning, // but it's fine for now. pub fn extend(&mut self, package: &PackageIdent, deps: &[PackageIdent]) -> (usize, usize) { let idx = self.node_idx(package); for dep in deps { let dep_idx = self.node_idx(dep); self.graph.extend_with_edges(&[(idx, dep_idx)]); } assert_eq!(self.graph.node_count(), self.nodes.len()); (self.graph.node_count(), self.graph.edge_count()) } /// Return the dependencies of a given Package Identifier pub fn ordered_deps(&self, package: &PackageIdent) -> Vec<&PackageIdent> { self.nodes .get_by_left(package) .map(|&idx| { // BFS returns the original node as the first node // `skip` it here so it's not in the result Vec let bfs = Bfs::new(&self.graph, idx).iter(&self.graph).skip(1); bfs.map(|child| self.graph.node_weight(child).unwrap()) .collect() }) .unwrap_or_default() } /// Return the dependencies of a given Package Identifier as `PackageIdent`s. This /// allows you to modify the underlying graph (via `PackageGraph::remove`) while traversing the /// dependencies pub fn owned_ordered_deps(&self, package: &PackageIdent) -> Vec<PackageIdent> { self.ordered_deps(package) .iter() .map(|&p| p.clone()) .collect() } /// Return the reverse dependencies of a given Package Identifier pub fn ordered_reverse_deps(&self, package: &PackageIdent) -> Vec<&PackageIdent> { self.nodes .get_by_left(package) .map(|&idx| { // BFS returns the original node as the first node // `skip` it here so it's not in the result Vec let bfs = Bfs::new(&self.graph, idx).iter(Reversed(&self.graph)) .skip(1); bfs.map(|child| self.graph.node_weight(child).unwrap()) .collect() }) .unwrap_or_default() } /// Remove a package from a graph /// /// This will not remove the package if it is a dependency of any package /// /// Returns true if package is removed /// Return false if package was not removed pub fn remove(&mut self, package: &PackageIdent) -> bool { if let Some(0) = self.count_rdeps(package) { self.do_remove(package) } else { false } } /// Cleanly remove the node from both the node list and the graph /// /// Returns true if package is removed /// Returns false if package was not in graph fn do_remove(&mut self, package: &PackageIdent) -> bool { self.nodes .remove_by_left(package) .map(|(_, idx)| { match self.graph.remove_node(idx) { Some(ident) => assert_eq!(&ident, package), None => { panic!("removed node from map but it wasn't in the graph: {}", package) } } true }) .unwrap_or(false) } /// does a specific PackageIdent appear in the graph pub fn has_package(&self, package: &PackageIdent) -> bool { self.nodes.contains_left(package) } fn count_edges(&self, package: &PackageIdent, direction: petgraph::Direction) -> Option<usize> { self.nodes .get_by_left(package) .map(|&idx| self.graph.neighbors_directed(idx, direction).count()) } /// Returns the number of package which have this package as a dependency /// /// Returns `None` if the package is not in the graph pub fn count_rdeps(&self, ident: &PackageIdent) -> Option<usize> { self.count_edges(ident, petgraph::Incoming) } fn neighbours(&self, package: &PackageIdent, direction: petgraph::Direction) -> Vec<&PackageIdent> { self.nodes .get_by_left(package) .map(|&idx| { self.graph .neighbors_directed(idx, direction) .map(|n| self.nodes.get_by_right(&n).unwrap()) // unwrap here is ok as we have consistency between `self.graph` and `self.nodes` .collect() }) .unwrap_or_default() } /// Returns the direct dependencies for a package. /// /// Returns `None` if the package is not in the graph pub fn deps(&self, package: &PackageIdent) -> Vec<&PackageIdent> { self.neighbours(package, petgraph::Outgoing) } /// Returns the direct reverse dependencies for a package. /// /// Returns `None` if the package is not in the graph pub fn rdeps(&self, package: &PackageIdent) -> Vec<&PackageIdent> { self.neighbours(package, petgraph::Incoming) } } impl Default for PackageGraph { fn default() -> PackageGraph { PackageGraph::empty() } } #[cfg(test)] mod test { use super::*; use std::str::FromStr; impl PackageGraph { /// Returns the number of dependencies for a package /// /// Returns `None` if the package is not in the graph pub fn count_deps(&self, ident: &PackageIdent) -> Option<usize> { self.count_edges(ident, petgraph::Outgoing) } /// Returns the number of packages in the package graph fn node_count(&self) -> usize { self.graph.node_count() } /// Returns the number of edges (dependencies) in the package graph fn edge_count(&self) -> usize { self.graph.edge_count() } } struct PackageDeps { ident: PackageIdent, deps: Vec<PackageIdent>, } fn build(packages: &[PackageDeps]) -> PackageGraph { let mut graph = PackageGraph::empty(); for p in packages.iter() { graph.extend(&p.ident, &p.deps); } graph } fn empty_package_deps(ident: PackageIdent) -> PackageDeps { PackageDeps { ident, deps: vec![] } } fn package_deps(ident: PackageIdent, deps: &[PackageIdent]) -> PackageDeps { PackageDeps { ident, deps: deps.to_vec() } } fn empty_vec() -> Vec<&'static PackageIdent> { vec![] } #[test] fn empty_graph() { let packages = Vec::new(); let graph = build(&packages); assert_eq!(graph.node_count(), 0); assert_eq!(graph.edge_count(), 0); } #[test] fn no_deps_graph() { let packages = vec![empty_package_deps(PackageIdent::from_str("core/redis/2.1.0/\ 20180704142101").unwrap()), empty_package_deps(PackageIdent::from_str("core/foo/1.0/\ 20180704142702").unwrap()),]; let graph = build(&packages); assert_eq!(graph.node_count(), 2); assert_eq!(graph.edge_count(), 0); } #[test] fn simplest_graph() { let a = PackageIdent::from_str("core/redis/2.1.0/20180704142101").unwrap(); let b = PackageIdent::from_str("core/foo/1.0/20180704142702").unwrap(); let packages = vec![empty_package_deps(a.clone()), package_deps(b.clone(), &[a.clone()]),]; let graph = build(&packages); assert!(graph.has_package(&a)); assert!(graph.has_package(&b)); assert_eq!(graph.node_count(), 2); assert_eq!(graph.edge_count(), 1); } #[test] fn different_origins_graph() { // We have non-standard implementations of `Ord`, `PartialOrd` for `PackageIdent`. Make // sure this doesn't mess with the requirements of `BiMap` let packages = vec![empty_package_deps(PackageIdent::from_str("core/redis/2.1.0/\ 20180704142101").unwrap()), empty_package_deps(PackageIdent::from_str("mine/redis/2.1.0/\ 20180704142101").unwrap()),]; let graph = build(&packages); assert_eq!(graph.node_count(), 2); assert_eq!(graph.edge_count(), 0); } #[test] fn count_deps_non_existent_package() { let a = PackageIdent::from_str("core/redis/2.1.0/20180704142101").unwrap(); let b = PackageIdent::from_str("core/foo/1.0/20180704142702").unwrap(); let c = PackageIdent::from_str("core/foo/1.0/20180704142805").unwrap(); let d = PackageIdent::from_str("core/bar/1.0/20180704142805").unwrap(); let packages = vec![empty_package_deps(a.clone()), package_deps(b.clone(), &[a.clone()]), package_deps(c.clone(), &[a]), package_deps(d, &[b, c]),]; let graph = build(&packages); assert_eq!(graph.node_count(), 4); assert_eq!(graph.edge_count(), 4); let does_not_exist = PackageIdent::from_str("core/baz").unwrap(); assert!(graph.count_deps(&does_not_exist).is_none()); assert!(graph.count_rdeps(&does_not_exist).is_none()); } #[test] fn count_deps() { let a = PackageIdent::from_str("core/redis/2.1.0/20180704142101").unwrap(); let b = PackageIdent::from_str("core/foo/1.0/20180704142702").unwrap(); let c = PackageIdent::from_str("core/foo/1.0/20180704142805").unwrap(); let d = PackageIdent::from_str("core/bar/1.0/20180704142805").unwrap(); let packages = vec![empty_package_deps(a.clone()), package_deps(b.clone(), &[a.clone()]), package_deps(c.clone(), &[a.clone()]), package_deps(d.clone(), &[b.clone(), c.clone()]),]; let graph = build(&packages); assert_eq!(graph.node_count(), 4); assert_eq!(graph.edge_count(), 4); assert_eq!(graph.count_deps(&a).unwrap(), 0); assert_eq!(graph.count_deps(&b).unwrap(), 1); assert_eq!(graph.count_deps(&c).unwrap(), 1); assert_eq!(graph.count_deps(&d).unwrap(), 2); } #[test] fn deps() { let a = PackageIdent::from_str("core/redis/2.1.0/20180704142101").unwrap(); let b = PackageIdent::from_str("core/foo/1.0/20180704142702").unwrap(); let c = PackageIdent::from_str("core/foo/1.0/20180704142805").unwrap(); let d = PackageIdent::from_str("core/bar/1.0/20180704142805").unwrap(); let packages = vec![empty_package_deps(a.clone()), package_deps(b.clone(), &[a.clone()]), package_deps(c.clone(), &[a.clone()]), package_deps(d.clone(), &[b.clone(), c.clone()]),]; let graph = build(&packages); assert_eq!(graph.node_count(), 4); assert_eq!(graph.edge_count(), 4); assert_eq!(graph.deps(&a), empty_vec()); assert_eq!(graph.deps(&b), vec![&a]); assert_eq!(graph.deps(&c), vec![&a]); let result = graph.deps(&d); assert!(result.contains(&b.as_ref())); assert!(result.contains(&c.as_ref())); } #[test] fn count_rdeps() { let a = PackageIdent::from_str("core/redis/2.1.0/20180704142101").unwrap(); let b = PackageIdent::from_str("core/foo/1.0/20180704142702").unwrap(); let c = PackageIdent::from_str("core/foo/1.0/20180704142805").unwrap(); let d = PackageIdent::from_str("core/bar/1.0/20180704142805").unwrap(); let packages = vec![empty_package_deps(a.clone()), package_deps(b.clone(), &[a.clone()]), package_deps(c.clone(), &[a.clone()]), package_deps(d.clone(), &[b.clone(), c.clone()]),]; let graph = build(&packages); assert_eq!(graph.node_count(), 4); assert_eq!(graph.edge_count(), 4); assert_eq!(graph.count_rdeps(&a).unwrap(), 2); assert_eq!(graph.count_rdeps(&b).unwrap(), 1); assert_eq!(graph.count_rdeps(&c).unwrap(), 1); assert_eq!(graph.count_rdeps(&d).unwrap(), 0); } #[test] fn rdeps() { let a = PackageIdent::from_str("core/redis/2.1.0/20180704142101").unwrap(); let b = PackageIdent::from_str("core/foo/1.0/20180704142702").unwrap(); let c = PackageIdent::from_str("core/foo/1.0/20180704142805").unwrap(); let d = PackageIdent::from_str("core/bar/1.0/20180704142805").unwrap(); let packages = vec![empty_package_deps(a.clone()), package_deps(b.clone(), &[a.clone()]), package_deps(c.clone(), &[a.clone()]), package_deps(d.clone(), &[b.clone(), c.clone()]),]; let graph = build(&packages); assert_eq!(graph.node_count(), 4); assert_eq!(graph.edge_count(), 4); let rdeps = graph.rdeps(&a); assert!(rdeps.contains(&b.as_ref())); assert!(rdeps.contains(&c.as_ref())); assert_eq!(graph.rdeps(&b), vec![&d]); assert_eq!(graph.rdeps(&c), vec![&d]); assert_eq!(graph.rdeps(&d), empty_vec()); } #[test] fn remove_package_no_rdeps() { let a = PackageIdent::from_str("core/redis/2.1.0/20180704142101").unwrap(); let b = PackageIdent::from_str("core/foo/1.0/20180704142702").unwrap(); let c = PackageIdent::from_str("core/foo/1.0/20180704142805").unwrap(); let d = PackageIdent::from_str("core/bar/1.0/20180704142805").unwrap(); let packages = vec![empty_package_deps(a.clone()), package_deps(b.clone(), &[a.clone()]), package_deps(c.clone(), &[a]), package_deps(d.clone(), &[b.clone(), c.clone()]),]; let mut graph = build(&packages); assert_eq!(graph.node_count(), 4); assert_eq!(graph.edge_count(), 4); assert_eq!(graph.count_rdeps(&d).unwrap(), 0); assert!(graph.remove(&d)); // package count decremented on remove assert!(!graph.has_package(&d)); assert_eq!(graph.node_count(), 3); // rdeps of dependencies should have decreased too assert_eq!(graph.count_rdeps(&b).unwrap(), 0); assert_eq!(graph.count_rdeps(&c).unwrap(), 0); } #[test] fn cant_remove_package_with_rdeps() { let a = PackageIdent::from_str("core/redis/2.1.0/20180704142101").unwrap(); let b = PackageIdent::from_str("core/foo/1.0/20180704142702").unwrap(); let packages = vec![empty_package_deps(a.clone()), package_deps(b, &[a.clone()]),]; let mut graph = build(&packages); assert_eq!(graph.node_count(), 2); assert_eq!(graph.edge_count(), 1); assert_eq!(graph.count_rdeps(&a).unwrap(), 1); assert!(!graph.remove(&a)); assert_eq!(graph.node_count(), 2); } #[test] fn ordered_deps_of_empty_deps() { let a = PackageIdent::from_str("core/redis/2.1.0/20180704142101").unwrap(); let packages = vec![empty_package_deps(a.clone())]; let graph = build(&packages); assert_eq!(graph.node_count(), 1); assert_eq!(graph.edge_count(), 0); let odeps = graph.ordered_deps(&a); assert_eq!(odeps, empty_vec()); } #[test] fn ordered_deps_non_existent_package() { let a = PackageIdent::from_str("core/redis/2.1.0/20180704142101").unwrap(); let b = PackageIdent::from_str("core/foo/1.0/20180704142702").unwrap(); let c = PackageIdent::from_str("core/foo/1.0/20180704142805").unwrap(); let d = PackageIdent::from_str("core/bar/1.0/20180704142805").unwrap(); let packages = vec![empty_package_deps(a.clone()), package_deps(b.clone(), &[a.clone()]), package_deps(c.clone(), &[a]), package_deps(d, &[b, c]),]; let graph = build(&packages); assert_eq!(graph.node_count(), 4); assert_eq!(graph.edge_count(), 4); let does_not_exist = PackageIdent::from_str("core/baz").unwrap(); assert_eq!(graph.ordered_deps(&does_not_exist), empty_vec()); assert_eq!(graph.ordered_reverse_deps(&does_not_exist), empty_vec()); } #[test] fn ordered_deps_are_in_order() { let a = PackageIdent::from_str("core/redis/2.1.0/20180704142101").unwrap(); let b = PackageIdent::from_str("core/foo/1.0/20180704142702").unwrap(); let c = PackageIdent::from_str("core/bar/1.0/20180704142805").unwrap(); let d = PackageIdent::from_str("core/baz/1.0/20180704142805").unwrap(); let packages = vec![empty_package_deps(a.clone()), package_deps(b.clone(), &[a.clone()]), package_deps(c.clone(), &[b.clone()]), package_deps(d.clone(), &[c.clone()]),]; let graph = build(&packages); assert_eq!(graph.node_count(), 4); assert_eq!(graph.edge_count(), 3); let odeps = graph.ordered_deps(&d); let expected = vec![&c, &b, &a]; assert_eq!(expected, odeps); } #[test] fn owned_ordered_deps_are_in_order() { let a = PackageIdent::from_str("core/redis/2.1.0/20180704142101").unwrap(); let b = PackageIdent::from_str("core/foo/1.0/20180704142702").unwrap(); let c = PackageIdent::from_str("core/bar/1.0/20180704142805").unwrap(); let d = PackageIdent::from_str("core/baz/1.0/20180704142805").unwrap(); let packages = vec![empty_package_deps(a.clone()), package_deps(b.clone(), &[a.clone()]), package_deps(c.clone(), &[b.clone()]), package_deps(d.clone(), &[c.clone()]),]; let graph = build(&packages); assert_eq!(graph.node_count(), 4); assert_eq!(graph.edge_count(), 3); let odeps = graph.owned_ordered_deps(&d); let expected = vec![c, b, a]; assert_eq!(expected, odeps); } #[test] fn ordered_reverse_deps_of_empty_deps() { let a = PackageIdent::from_str("core/redis/2.1.0/20180704142101").unwrap(); let packages = vec![empty_package_deps(a.clone())]; let graph = build(&packages); assert_eq!(graph.node_count(), 1); assert_eq!(graph.edge_count(), 0); let odeps = graph.ordered_reverse_deps(&a); assert_eq!(odeps, empty_vec()); } #[test] fn ordered_reverse_deps_are_in_order() { let a = PackageIdent::from_str("core/redis/2.1.0/20180704142101").unwrap(); let b = PackageIdent::from_str("core/foo/1.0/20180704142702").unwrap(); let c = PackageIdent::from_str("core/bar/1.0/20180704142805").unwrap(); let d = PackageIdent::from_str("core/baz/1.0/20180704142805").unwrap(); let packages = vec![empty_package_deps(a.clone()), package_deps(b.clone(), &[a.clone()]), package_deps(c.clone(), &[b.clone()]), package_deps(d.clone(), &[c.clone()]),]; let graph = build(&packages); assert_eq!(graph.node_count(), 4); assert_eq!(graph.edge_count(), 3); let odeps = graph.ordered_reverse_deps(&a); let expected = vec![&b, &c, &d]; assert_eq!(expected, odeps); } }
39.245115
149
0.557773
9cac5765b97658bc0d672131e069f78192922467
43
pub mod api; pub mod convert; pub mod web;
10.75
16
0.72093
7961af9d12a9382bccd0bb397a1d74987ca93636
3,836
use super::{Notifier as NotifierTrait, Result, ResultExt}; use crate::config; use crate::config::ValueExt; use crate::protocol::{Packet, Event}; use std::net::{UdpSocket, IpAddr, Ipv4Addr, SocketAddr, ToSocketAddrs}; pub struct Notifier { bind_addr: SocketAddr, addr: SocketAddr } impl NotifierTrait for Notifier { fn from_config (notifier: &config::NotifierConfig) -> Result<Self> where Self: Sized { let config = notifier.config.as_ref() .chain_err (|| config::ErrorKind::MissingOption ("notifier.multicast")) .chain_err (|| "the notifier 'multicast' requires to be configured")?; // Get addr and bind_addr let addr = config .get_as_str_or_invalid_key ("notifier.multicast.addr") .chain_err (|| "failed to find an address for the notifier 'multicast'")? .to_socket_addrs() .chain_err (|| "failed to parse 'notifier.multicast.addr' as a socket address")? .find (|&addr| addr.is_ipv4() && addr.ip().is_multicast()) .chain_err (|| "failed to find an IPv4 multicast address for 'notifier.multicast.addr'")?; let bind_addr = config .get_as_str_or_invalid_key ("notifier.multicast.bind_addr") .chain_err (|| "failed to find a bind address for the notifier 'multicast'")? .to_socket_addrs() .chain_err (|| "failed to parse 'notifier.multicast.bind_addr' as a socket address")? .find (|&addr| addr.is_ipv4()) .chain_err (|| "failed to find an IPv4 address for 'notifier.multicast.bind_addr'")?; trace!(target: "notifier::multicast", "initialized, addr = {}, bind_addr = {}", addr, bind_addr); Ok(Self { addr, bind_addr }) } fn notify (&mut self, event: Event) -> Result<()> { let socket = UdpSocket::bind (self.bind_addr) .chain_err (|| format!("failed to bind to {}", self.bind_addr))?; let mut vec: Vec<u8> = Vec::new(); Packet::Event(event).write (&mut vec) .chain_err (|| format!("failed to write event packet '{}' to a local buffer", event))?; socket.send_to (&vec, self.addr) .chain_err (|| format!("failed to send event packet '{}' to {}", event, self.addr))?; debug!(target: "notifier::multicast", "successfully notified event \"{}\"", event); Ok(()) } fn listen(&mut self, on_event: &dyn Fn(Event, Option<SocketAddr>) -> ()) -> Result<()> { let any = Ipv4Addr::new (0, 0, 0, 0); let socket = UdpSocket::bind (self.bind_addr) .chain_err (|| format!("failed to bind to {}", self.bind_addr))?; socket .join_multicast_v4 (match self.addr.ip() { IpAddr::V4(ref ip) => ip, IpAddr::V6(..) => panic!("Got IPv6 address when expecting IPv4") }, &any) .chain_err (|| format!("failed to join multicast group '{}'", self.addr))?; let mut buf = vec![0; 3]; // for now only support 2-byte packets loop { let (number_of_bytes, src_addr) = socket.recv_from (&mut buf) .chain_err (|| "failed to receive data from multicast socket")?; let mut slice = &buf[..number_of_bytes]; match Packet::read (&mut slice) { Ok(packet) => { if let Packet::Event(event) = packet { debug!(target: "notifier::multicast", "received event \"{}\"", event); on_event(event, Some(src_addr)) } }, Err(error) => warn!(target: "notifier::multicast", "can't decode incoming packet: {}", error) } } } }
44.604651
99
0.556569
f850a1d280b5a3b0b7645e18314b1f245d5c98f9
1,317
use super::*; #[test] fn with_locked_adds_heap_message_to_mailbox_and_returns_message() { with_process_arc(|arc_process| { TestRunner::new(Config::with_source_file(file!())) .run(&strategy::term(arc_process.clone()), |message| { let destination = arc_process.pid_term(); prop_assert_eq!( erlang::send_2(destination, message, &arc_process), Ok(message) ); prop_assert!(has_process_message(&arc_process, message)); Ok(()) }) .unwrap(); }); } #[test] fn without_locked_adds_process_message_to_mailbox_and_returns_message() { with_process_arc(|arc_process| { TestRunner::new(Config::with_source_file(file!())) .run(&strategy::term(arc_process.clone()), |message| { let different_arc_process = process::test(&arc_process); let destination = different_arc_process.pid_term(); prop_assert_eq!( erlang::send_2(destination, message, &arc_process), Ok(message) ); prop_assert!(has_process_message(&different_arc_process, message)); Ok(()) }) .unwrap(); }); }
30.627907
83
0.551253
bb35fb9df8c9e03aa4a04683e452aa4a1f77a249
3,581
#[doc = "Reader of register WORD012"] pub type R = crate::R<u32, super::WORD012>; #[doc = "Writer for register WORD012"] pub type W = crate::W<u32, super::WORD012>; #[doc = "Register WORD012 `reset()`'s with value 0"] impl crate::ResetValue for super::WORD012 { type Type = u32; #[inline(always)] fn reset_value() -> Self::Type { 0 } } #[doc = "Reader of field `DATA_BYTE_3`"] pub type DATA_BYTE_3_R = crate::R<u8, u8>; #[doc = "Write proxy for field `DATA_BYTE_3`"] pub struct DATA_BYTE_3_W<'a> { w: &'a mut W, } impl<'a> DATA_BYTE_3_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !0xff) | ((value as u32) & 0xff); self.w } } #[doc = "Reader of field `DATA_BYTE_2`"] pub type DATA_BYTE_2_R = crate::R<u8, u8>; #[doc = "Write proxy for field `DATA_BYTE_2`"] pub struct DATA_BYTE_2_W<'a> { w: &'a mut W, } impl<'a> DATA_BYTE_2_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0xff << 8)) | (((value as u32) & 0xff) << 8); self.w } } #[doc = "Reader of field `DATA_BYTE_1`"] pub type DATA_BYTE_1_R = crate::R<u8, u8>; #[doc = "Write proxy for field `DATA_BYTE_1`"] pub struct DATA_BYTE_1_W<'a> { w: &'a mut W, } impl<'a> DATA_BYTE_1_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0xff << 16)) | (((value as u32) & 0xff) << 16); self.w } } #[doc = "Reader of field `DATA_BYTE_0`"] pub type DATA_BYTE_0_R = crate::R<u8, u8>; #[doc = "Write proxy for field `DATA_BYTE_0`"] pub struct DATA_BYTE_0_W<'a> { w: &'a mut W, } impl<'a> DATA_BYTE_0_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0xff << 24)) | (((value as u32) & 0xff) << 24); self.w } } impl R { #[doc = "Bits 0:7 - Data byte 3 of Rx/Tx frame."] #[inline(always)] pub fn data_byte_3(&self) -> DATA_BYTE_3_R { DATA_BYTE_3_R::new((self.bits & 0xff) as u8) } #[doc = "Bits 8:15 - Data byte 2 of Rx/Tx frame."] #[inline(always)] pub fn data_byte_2(&self) -> DATA_BYTE_2_R { DATA_BYTE_2_R::new(((self.bits >> 8) & 0xff) as u8) } #[doc = "Bits 16:23 - Data byte 1 of Rx/Tx frame."] #[inline(always)] pub fn data_byte_1(&self) -> DATA_BYTE_1_R { DATA_BYTE_1_R::new(((self.bits >> 16) & 0xff) as u8) } #[doc = "Bits 24:31 - Data byte 0 of Rx/Tx frame."] #[inline(always)] pub fn data_byte_0(&self) -> DATA_BYTE_0_R { DATA_BYTE_0_R::new(((self.bits >> 24) & 0xff) as u8) } } impl W { #[doc = "Bits 0:7 - Data byte 3 of Rx/Tx frame."] #[inline(always)] pub fn data_byte_3(&mut self) -> DATA_BYTE_3_W { DATA_BYTE_3_W { w: self } } #[doc = "Bits 8:15 - Data byte 2 of Rx/Tx frame."] #[inline(always)] pub fn data_byte_2(&mut self) -> DATA_BYTE_2_W { DATA_BYTE_2_W { w: self } } #[doc = "Bits 16:23 - Data byte 1 of Rx/Tx frame."] #[inline(always)] pub fn data_byte_1(&mut self) -> DATA_BYTE_1_W { DATA_BYTE_1_W { w: self } } #[doc = "Bits 24:31 - Data byte 0 of Rx/Tx frame."] #[inline(always)] pub fn data_byte_0(&mut self) -> DATA_BYTE_0_W { DATA_BYTE_0_W { w: self } } }
31.690265
86
0.573862
e655ce5c362b889577aeb84884eb0f569c99b470
3,964
// Copyright 2018-2020 argmin developers // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or // http://opensource.org/licenses/MIT>, at your option. This file may not be // copied, modified, or distributed except according to those terms. use crate::core::{ArgminFloat, ArgminOp, Error}; use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; use std::fmt::{Debug, Display}; /// Fake Operators for testing /// No-op operator with free choice of the types #[derive( Clone, Default, Debug, Serialize, Deserialize, Eq, PartialEq, Ord, PartialOrd, Hash, Copy, )] pub struct NoOperator<T, U, H, J, F> { /// Fake parameter param: std::marker::PhantomData<T>, /// Fake output output: std::marker::PhantomData<U>, /// Fake Hessian hessian: std::marker::PhantomData<H>, /// Fake Jacobian jacobian: std::marker::PhantomData<J>, /// Fake Float float: std::marker::PhantomData<F>, } impl<T, U, H, J, F> NoOperator<T, U, H, J, F> { /// Constructor #[allow(dead_code)] pub fn new() -> Self { NoOperator { param: std::marker::PhantomData, output: std::marker::PhantomData, hessian: std::marker::PhantomData, jacobian: std::marker::PhantomData, float: std::marker::PhantomData, } } } impl<T, U, H, J, F> Display for NoOperator<T, U, H, J, F> { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!(f, "NoOperator") } } impl<T, U, H, J, F> ArgminOp for NoOperator<T, U, H, J, F> where T: Clone + Default + Debug + Send + Sync + Serialize + DeserializeOwned, U: Clone + Default + Debug + Send + Sync + Serialize + DeserializeOwned, H: Clone + Default + Debug + Send + Sync + Serialize + DeserializeOwned, J: Clone + Default + Debug + Send + Sync + Serialize + DeserializeOwned, F: ArgminFloat, { type Param = T; type Output = U; type Hessian = H; type Jacobian = J; type Float = F; /// Do nothing, really. fn apply(&self, _p: &Self::Param) -> Result<Self::Output, Error> { Ok(Self::Output::default()) } /// Do nothing, really. fn gradient(&self, _p: &Self::Param) -> Result<Self::Param, Error> { Ok(Self::Param::default()) } /// Do nothing, really. fn hessian(&self, _p: &Self::Param) -> Result<Self::Hessian, Error> { Ok(Self::Hessian::default()) } /// Do nothing, really. fn modify(&self, _p: &Self::Param, _t: Self::Float) -> Result<Self::Param, Error> { Ok(Self::Param::default()) } } /// Minimal No-op operator which does nothing, really. #[derive( Clone, Default, Debug, Serialize, Deserialize, Eq, PartialEq, Ord, PartialOrd, Hash, Copy, )] pub struct MinimalNoOperator {} /// No-op operator with fixed types (See `ArgminOp` impl on `MinimalNoOperator`) impl MinimalNoOperator { /// Constructor #[allow(dead_code)] pub fn new() -> Self { MinimalNoOperator {} } } impl Display for MinimalNoOperator { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!(f, "MinimalNoOperator") } } impl ArgminOp for MinimalNoOperator { type Param = Vec<f64>; type Output = f64; type Hessian = Vec<Vec<f64>>; type Jacobian = Vec<f64>; type Float = f64; /// Do nothing, really. fn apply(&self, _p: &Self::Param) -> Result<Self::Output, Error> { unimplemented!() } /// Do nothing, really. fn gradient(&self, _p: &Self::Param) -> Result<Self::Param, Error> { unimplemented!() } /// Do nothing, really. fn hessian(&self, _p: &Self::Param) -> Result<Self::Hessian, Error> { unimplemented!() } /// Do nothing, really. fn modify(&self, _p: &Self::Param, _t: f64) -> Result<Self::Param, Error> { unimplemented!() } }
29.362963
94
0.609233
64ddfa7d85d691ee0252b0115fd9f463a6233646
88,747
use crate::cx::*; use libc; use libc::timeval; use makepad_x11_sys as X11_sys; use std::collections::{HashMap, BTreeSet, VecDeque}; use std::ffi::CString; use std::ffi::CStr; use std::slice; use std::sync::Mutex; use std::fs::File; use std::io::Write; use std::os::unix::io::FromRawFd; use std::mem; use std::os::raw::{c_char, c_uchar, c_int, c_uint, c_ulong, c_long, c_void}; use std::ptr; use time::precise_time_ns; #[cfg(target_arch = "arm")] pub const LINUX_CUSTOM_WINDOW_CHROME: bool = false; #[cfg(not(target_arch = "arm"))] pub const LINUX_CUSTOM_WINDOW_CHROME: bool = true; static mut GLOBAL_XLIB_APP: *mut XlibApp = 0 as *mut _; pub struct XlibApp { pub display: *mut X11_sys::Display, pub xim: X11_sys::XIM, pub clipboard: String, pub display_fd: c_int, pub signal_fds: [c_int; 2], pub window_map: HashMap<c_ulong, *mut XlibWindow>, pub time_start: u64, pub last_scroll_time: f64, pub last_click_time: f64, pub last_click_pos: (i32, i32), pub event_callback: Option<*mut dyn FnMut(&mut XlibApp, &mut Vec<Event>) -> bool>, pub event_recur_block: bool, pub event_loop_running: bool, pub timers: VecDeque<XlibTimer>, pub free_timers: Vec<usize>, pub signals: Mutex<Vec<Event >>, pub loop_block: bool, pub current_cursor: MouseCursor, pub atom_clipboard: X11_sys::Atom, pub atom_net_wm_moveresize: X11_sys::Atom, pub atom_wm_delete_window: X11_sys::Atom, pub atom_wm_protocols: X11_sys::Atom, pub atom_motif_wm_hints: X11_sys::Atom, pub atom_net_wm_state: X11_sys::Atom, pub atom_new_wm_state_maximized_horz: X11_sys::Atom, pub atom_new_wm_state_maximized_vert: X11_sys::Atom, pub atom_targets: X11_sys::Atom, pub atom_utf8_string: X11_sys::Atom, pub atom_text: X11_sys::Atom, pub atom_multiple: X11_sys::Atom, pub atom_text_plain: X11_sys::Atom, pub atom_atom: X11_sys::Atom, pub dnd: Dnd, } #[derive(Clone)] pub struct XlibWindow { pub window: Option<c_ulong>, pub xic: Option<X11_sys::XIC>, pub attributes: Option<X11_sys::XSetWindowAttributes>, pub visual_info: Option<X11_sys::XVisualInfo>, pub child_windows: Vec<XlibChildWindow>, pub last_nc_mode: Option<c_long>, pub window_id: usize, pub xlib_app: *mut XlibApp, pub last_window_geom: WindowGeom, pub time_start: u64, pub ime_spot: Vec2, pub current_cursor: MouseCursor, pub last_mouse_pos: Vec2, pub fingers_down: Vec<bool>, } #[derive(Clone)] pub struct XlibChildWindow { pub window: c_ulong, visible: bool, x: i32, y: i32, w: u32, h: u32 } #[derive(Clone, Copy)] pub struct XlibTimer { id: u64, timeout: f64, repeats: bool, delta_timeout: f64, } #[derive(Clone)] pub struct XlibSignal { pub signal_id: u64, pub value: u64 } impl XlibApp { pub fn new() -> XlibApp { unsafe { let display = X11_sys::XOpenDisplay(ptr::null()); let display_fd = X11_sys::XConnectionNumber(display); let xim = X11_sys::XOpenIM(display, ptr::null_mut(), ptr::null_mut(), ptr::null_mut()); let mut signal_fds = [0, 0]; libc::pipe(signal_fds.as_mut_ptr()); XlibApp { atom_clipboard: X11_sys::XInternAtom(display, CString::new("CLIPBOARD").unwrap().as_ptr(), 0), atom_net_wm_moveresize: X11_sys::XInternAtom(display, CString::new("_NET_WM_MOVERESIZE").unwrap().as_ptr(), 0), atom_wm_delete_window: X11_sys::XInternAtom(display, CString::new("WM_DELETE_WINDOW").unwrap().as_ptr(), 0), atom_wm_protocols: X11_sys::XInternAtom(display, CString::new("WM_PROTOCOLS").unwrap().as_ptr(), 0), atom_motif_wm_hints: X11_sys::XInternAtom(display, CString::new("_MOTIF_WM_HINTS").unwrap().as_ptr(), 0), atom_net_wm_state: X11_sys::XInternAtom(display, CString::new("_NET_WM_STATE").unwrap().as_ptr(), 0), atom_new_wm_state_maximized_horz: X11_sys::XInternAtom(display, CString::new("_NET_WM_STATE_MAXIMIZED_HORZ").unwrap().as_ptr(), 0), atom_new_wm_state_maximized_vert: X11_sys::XInternAtom(display, CString::new("_NET_WM_STATE_MAXIMIZED_VERT").unwrap().as_ptr(), 0), atom_targets: X11_sys::XInternAtom(display, CString::new("TARGETS").unwrap().as_ptr(), 0), atom_utf8_string: X11_sys::XInternAtom(display, CString::new("UTF8_STRING").unwrap().as_ptr(), 1), atom_atom: X11_sys::XInternAtom(display, CString::new("ATOM").unwrap().as_ptr(), 0), atom_text: X11_sys::XInternAtom(display, CString::new("TEXT").unwrap().as_ptr(), 0), atom_text_plain: X11_sys::XInternAtom(display, CString::new("text/plain").unwrap().as_ptr(), 0), atom_multiple: X11_sys::XInternAtom(display, CString::new("MULTIPLE").unwrap().as_ptr(), 0), xim, display, display_fd, signal_fds, clipboard: String::new(), last_scroll_time: 0.0, last_click_time: 0.0, last_click_pos: (0, 0), window_map: HashMap::new(), signals: Mutex::new(Vec::new()), time_start: precise_time_ns(), event_callback: None, event_recur_block: false, event_loop_running: true, loop_block: false, timers: VecDeque::new(), free_timers: Vec::new(), current_cursor: MouseCursor::Default, dnd: Dnd::new(display), } } } pub fn init(&mut self) { unsafe { //unsafe { X11_sys::XrmInitialize(); //} GLOBAL_XLIB_APP = self; } } pub fn event_loop<F>(&mut self, mut event_handler: F) where F: FnMut(&mut XlibApp, &mut Vec<Event>) -> bool, { unsafe { self.event_callback = Some( &mut event_handler as *const dyn FnMut(&mut XlibApp, &mut Vec<Event>) -> bool as *mut dyn FnMut(&mut XlibApp, &mut Vec<Event>) -> bool ); self.do_callback(&mut vec![ Event::Paint, ]); // Record the current time. let mut select_time = self.time_now(); while self.event_loop_running { if self.loop_block { let mut fds = mem::MaybeUninit::uninit(); libc::FD_ZERO(fds.as_mut_ptr()); libc::FD_SET(self.display_fd, fds.as_mut_ptr()); libc::FD_SET(self.signal_fds[0], fds.as_mut_ptr()); // If there are any timers, we set the timeout for select to the `delta_timeout` // of the first timer that should be fired. Otherwise, we set the timeout to // None, so that select will block indefinitely. let timeout = if let Some(timer) = self.timers.front() { // println!("Select wait {}",(timer.delta_timeout.fract() * 1000000.0) as i64); Some(timeval { // `tv_sec` is in seconds, so take the integer part of `delta_timeout` tv_sec: timer.delta_timeout.trunc() as libc::time_t, // `tv_usec` is in microseconds, so take the fractional part of // `delta_timeout` 1000000.0. tv_usec: (timer.delta_timeout.fract() * 1000000.0) as libc::time_t, }) } else { None }; let _nfds = libc::select( self.display_fd.max(self.signal_fds[0]) + 1, fds.as_mut_ptr(), ptr::null_mut(), ptr::null_mut(), if let Some(mut timeout) = timeout {&mut timeout} else {ptr::null_mut()} ); } // Update the current time, and compute the amount of time that elapsed since we // last recorded the current time. let last_select_time = select_time; select_time = self.time_now(); let mut select_time_used = select_time - last_select_time; while let Some(timer) = self.timers.front_mut() { // If the amount of time that elapsed is less than `delta_timeout` for the // next timer, then no more timers need to be fired. if select_time_used < timer.delta_timeout { timer.delta_timeout -= select_time_used; break; } let timer = *self.timers.front().unwrap(); select_time_used -= timer.delta_timeout; // Stop the timer to remove it from the list. self.stop_timer(timer.id); // If the timer is repeating, simply start it again. if timer.repeats { self.start_timer(timer.id, timer.timeout, timer.repeats); } // Fire the timer, and allow the callback to cancel the repeat self.do_callback(&mut vec![ Event::Timer(TimerEvent {timer_id: timer.id}) ]); } while self.display != ptr::null_mut() && X11_sys::XPending(self.display) != 0 { let mut event = mem::MaybeUninit::uninit(); X11_sys::XNextEvent(self.display, event.as_mut_ptr()); let mut event = event.assume_init(); match event.type_ as u32 { X11_sys::SelectionNotify => { let selection = event.xselection; if selection.property == self.dnd.atoms.selection { self.dnd.handle_selection_event(&selection); } else { // first get the size of the thing let mut actual_type = mem::MaybeUninit::uninit(); let mut actual_format = mem::MaybeUninit::uninit(); let mut n_items = mem::MaybeUninit::uninit(); let mut bytes_to_read = mem::MaybeUninit::uninit(); let mut ret = mem::MaybeUninit::uninit(); X11_sys::XGetWindowProperty( self.display, selection.requestor, selection.property, 0, 0, 0, X11_sys::AnyPropertyType as c_ulong, actual_type.as_mut_ptr(), actual_format.as_mut_ptr(), n_items.as_mut_ptr(), bytes_to_read.as_mut_ptr(), ret.as_mut_ptr() ); //let actual_type = actual_type.assume_init(); //let actual_format = actual_format.assume_init(); //let n_items = n_items.assume_init(); let bytes_to_read = bytes_to_read.assume_init(); //let mut ret = ret.assume_init(); let mut bytes_after = mem::MaybeUninit::uninit(); X11_sys::XGetWindowProperty( self.display, selection.requestor, selection.property, 0, bytes_to_read as c_long, 0, X11_sys::AnyPropertyType as c_ulong, actual_type.as_mut_ptr(), actual_format.as_mut_ptr(), n_items.as_mut_ptr(), bytes_after.as_mut_ptr(), ret.as_mut_ptr() ); let ret = ret.assume_init(); //let bytes_after = bytes_after.assume_init(); if ret != ptr::null_mut() && bytes_to_read > 0 { let utf8_slice = std::slice::from_raw_parts::<u8>(ret as *const _ as *const u8, bytes_to_read as usize); if let Ok(utf8_string) = String::from_utf8(utf8_slice.to_vec()) { self.do_callback(&mut vec![ Event::TextInput(TextInputEvent { input: utf8_string, was_paste: true, replace_last: false }) ]); } X11_sys::XFree(ret as *mut _ as *mut c_void); } } }, X11_sys::SelectionRequest => { let request = event.xselectionrequest; let mut response = X11_sys::XSelectionEvent { type_: X11_sys::SelectionNotify as i32, serial: 0, send_event: 0, display: self.display, requestor: request.requestor, selection: request.selection, target: request.target, time: request.time, property: request.property, }; if request.target == self.atom_targets { let mut targets = [self.atom_utf8_string]; X11_sys::XChangeProperty( self.display, request.requestor, request.property, 4, 32, X11_sys::PropModeReplace as i32, targets.as_mut() as *mut _ as *mut c_uchar, targets.len() as i32 ); } else if request.target == self.atom_utf8_string { X11_sys::XChangeProperty( self.display, request.requestor, request.property, self.atom_utf8_string, 8, X11_sys::PropModeReplace as i32, self.clipboard.as_ptr() as *const _ as *const c_uchar, self.clipboard.len() as i32 ); } else { response.property = 0; } X11_sys::XSendEvent(self.display, request.requestor, 1, 0, &mut response as *mut _ as *mut X11_sys::XEvent); }, X11_sys::DestroyNotify => { // our window got destroyed let destroy_window = event.xdestroywindow; if let Some(window_ptr) = self.window_map.get(&destroy_window.window) { let window = &mut (**window_ptr); window.do_callback(&mut vec![Event::WindowClosed(WindowClosedEvent { window_id: window.window_id, })]); } }, X11_sys::ConfigureNotify => { let cfg = event.xconfigure; if let Some(window_ptr) = self.window_map.get(&cfg.window) { let window = &mut (**window_ptr); if cfg.window == window.window.unwrap() { window.send_change_event(); } } }, X11_sys::EnterNotify => {}, X11_sys::LeaveNotify => { let crossing = event.xcrossing; if crossing.detail == 4 { if let Some(window_ptr) = self.window_map.get(&crossing.window) { let window = &mut (**window_ptr); window.do_callback(&mut vec![Event::FingerHover(FingerHoverEvent { digit: 0, window_id: window.window_id, any_down: false, abs: window.last_mouse_pos, rel: window.last_mouse_pos, rect: Rect::default(), handled: false, hover_state: HoverState::Out, modifiers: KeyModifiers::default(), time: window.time_now() })]); } } }, X11_sys::MotionNotify => { // mousemove let motion = event.xmotion; if let Some(window_ptr) = self.window_map.get(&motion.window) { let window = &mut (**window_ptr); let mut x = motion.x; let mut y = motion.y; if window.window.is_none() { return; // shutdown } if motion.window != window.window.unwrap() { // find the right child for child in &window.child_windows { if child.window == motion.window { x += child.x; y += child.y; break } } } let pos = Vec2 {x: x as f32 / window.last_window_geom.dpi_factor, y: y as f32 / window.last_window_geom.dpi_factor}; // query window for chrome let mut drag_query_events = vec![ Event::WindowDragQuery(WindowDragQueryEvent { window_id: window.window_id, abs: window.last_mouse_pos, response: WindowDragQueryResponse::NoAnswer }) ]; window.do_callback(&mut drag_query_events); // otherwise lets check if we are hover the window edge to resize the window //println!("{} {}", window.last_window_geom.inner_size.x, pos.x); window.send_finger_hover_and_move(pos, KeyModifiers::default()); let window_size = window.last_window_geom.inner_size; if pos.x >= 0.0 && pos.x < 10.0 && pos.y >= 0.0 && pos.y < 10.0 { window.last_nc_mode = Some(_NET_WM_MOVERESIZE_SIZE_TOPLEFT); window.do_callback(&mut vec![Event::WindowSetHoverCursor(MouseCursor::NwResize)]); } else if pos.x >= 0.0 && pos.x < 10.0 && pos.y >= window_size.y - 10.0 { window.last_nc_mode = Some(_NET_WM_MOVERESIZE_SIZE_BOTTOMLEFT); window.do_callback(&mut vec![Event::WindowSetHoverCursor(MouseCursor::SwResize)]); } else if pos.x >= 0.0 && pos.x < 5.0 { window.last_nc_mode = Some(_NET_WM_MOVERESIZE_SIZE_LEFT); window.do_callback(&mut vec![Event::WindowSetHoverCursor(MouseCursor::WResize)]); } else if pos.x >= window_size.x - 10.0 && pos.y >= 0.0 && pos.y < 10.0 { window.last_nc_mode = Some(_NET_WM_MOVERESIZE_SIZE_TOPRIGHT); window.do_callback(&mut vec![Event::WindowSetHoverCursor(MouseCursor::NeResize)]); } else if pos.x >= window_size.x - 10.0 && pos.y >= window_size.y - 10.0 { window.last_nc_mode = Some(_NET_WM_MOVERESIZE_SIZE_BOTTOMRIGHT); window.do_callback(&mut vec![Event::WindowSetHoverCursor(MouseCursor::SeResize)]); } else if pos.x >= window_size.x - 5.0 { window.last_nc_mode = Some(_NET_WM_MOVERESIZE_SIZE_RIGHT); window.do_callback(&mut vec![Event::WindowSetHoverCursor(MouseCursor::EResize)]); } else if pos.y <= 5.0 { window.last_nc_mode = Some(_NET_WM_MOVERESIZE_SIZE_TOP); window.do_callback(&mut vec![Event::WindowSetHoverCursor(MouseCursor::NResize)]); } else if pos.y > window_size.y - 5.0 { window.last_nc_mode = Some(_NET_WM_MOVERESIZE_SIZE_BOTTOM); window.do_callback(&mut vec![Event::WindowSetHoverCursor(MouseCursor::SResize)]); } else { match &drag_query_events[0] { Event::WindowDragQuery(wd) => match &wd.response { WindowDragQueryResponse::Caption => { window.last_nc_mode = Some(_NET_WM_MOVERESIZE_MOVE); }, _ => { window.last_nc_mode = None; } }, _ => () } } } }, X11_sys::ButtonPress => { // mouse down let button = event.xbutton; let time_now = self.time_now(); if let Some(window_ptr) = self.window_map.get(&button.window) { let window = &mut (**window_ptr); X11_sys::XSetInputFocus(self.display, window.window.unwrap(), X11_sys::None as i32, X11_sys::CurrentTime as c_ulong); if button.button >= 4 && button.button <= 7 { let last_scroll_time = self.last_scroll_time; self.last_scroll_time = time_now; // completely arbitrary scroll acceleration curve. let speed = 1200.0 * (0.2 - 2. * (self.last_scroll_time - last_scroll_time)).max(0.01); self.do_callback(&mut vec![Event::FingerScroll(FingerScrollEvent { digit: 0, window_id: window.window_id, scroll: Vec2 { x: if button.button == 6 {-speed as f32} else if button.button == 7 {speed as f32} else {0.}, y: if button.button == 4 {-speed as f32} else if button.button == 5 {speed as f32} else {0.} }, abs: window.last_mouse_pos, rel: window.last_mouse_pos, rect: Rect::default(), input_type: FingerInputType::Mouse, modifiers: self.xkeystate_to_modifiers(button.state), handled_x: false, handled_y: false, time: self.last_scroll_time })]) } else { // do all the 'nonclient' area messaging to the window manager if let Some(last_nc_mode) = window.last_nc_mode { if (time_now - self.last_click_time) < 0.35 && (button.x_root - self.last_click_pos.0).abs() < 5 && (button.y_root - self.last_click_pos.1).abs() < 5 && last_nc_mode == _NET_WM_MOVERESIZE_MOVE { if window.get_is_maximized() { window.restore(); } else { window.maximize(); } } else { let default_screen = X11_sys::XDefaultScreen(self.display); let root_window = X11_sys::XRootWindow(self.display, default_screen); X11_sys::XUngrabPointer(self.display, 0); X11_sys::XFlush(self.display); let mut xclient = X11_sys::XClientMessageEvent { type_: X11_sys::ClientMessage as i32, serial: 0, send_event: 0, display: self.display, window: window.window.unwrap(), message_type: self.atom_net_wm_moveresize, format: 32, data: { let mut msg = mem::zeroed::<X11_sys::XClientMessageEvent__bindgen_ty_1>(); msg.l[0] = button.x_root as c_long; msg.l[1] = button.y_root as c_long; msg.l[2] = last_nc_mode; msg } }; X11_sys::XSendEvent( self.display, root_window, 0, (X11_sys::SubstructureRedirectMask | X11_sys::SubstructureNotifyMask) as c_long, &mut xclient as *mut _ as *mut X11_sys::XEvent ); } } else { window.send_finger_down(button.button as usize, self.xkeystate_to_modifiers(button.state)) } } } self.last_click_time = time_now; self.last_click_pos = (button.x_root, button.y_root); }, X11_sys::ButtonRelease => { // mouse up let button = event.xbutton; if let Some(window_ptr) = self.window_map.get(&button.window) { let window = &mut (**window_ptr); window.send_finger_up(button.button as usize, self.xkeystate_to_modifiers(button.state)) } }, X11_sys::KeyPress => { if let Some(window_ptr) = self.window_map.get(&event.xkey.window) { let window = &mut (**window_ptr); let block_text = if event.xkey.keycode != 0 { let key_code = self.xkeyevent_to_keycode(&mut event.xkey); let modifiers = self.xkeystate_to_modifiers(event.xkey.state); if modifiers.control || modifiers.logo { match key_code { KeyCode::KeyV => { // paste // request the pasteable text from the other side X11_sys::XConvertSelection( self.display, self.atom_clipboard, self.atom_utf8_string, self.atom_clipboard, window.window.unwrap(), event.xkey.time ); /* self.do_callback(&mut vec![ Event::TextInput(TextInputEvent { input: String::new(), was_paste: true, replace_last: false }) ]); */ } KeyCode::KeyX | KeyCode::KeyC => { let mut events = vec![ Event::TextCopy(TextCopyEvent { response: None }) ]; self.do_callback(&mut events); match &events[0] { Event::TextCopy(req) => if let Some(response) = &req.response { // store the text on the clipboard self.clipboard = response.clone(); // lets set the owner println!("Set selection owner"); X11_sys::XSetSelectionOwner( self.display, self.atom_clipboard, window.window.unwrap(), event.xkey.time ); X11_sys::XFlush(self.display); }, _ => () }; } _ => () } } let block_text = modifiers.control || modifiers.logo || modifiers.alt; self.do_callback(&mut vec![Event::KeyDown(KeyEvent { key_code: key_code, is_repeat: false, modifiers: modifiers, time: self.time_now() })]); block_text }else {false}; if !block_text { // decode the character let mut buffer = [0u8; 32]; let mut keysym = mem::MaybeUninit::uninit(); let mut status = mem::MaybeUninit::uninit(); let count = X11_sys::Xutf8LookupString( window.xic.unwrap(), &mut event.xkey, buffer.as_mut_ptr() as *mut c_char, buffer.len() as c_int, keysym.as_mut_ptr(), status.as_mut_ptr(), ); //let keysym = keysym.assume_init(); let status = status.assume_init(); if status != X11_sys::XBufferOverflow { let utf8 = std::str::from_utf8(&buffer[..count as usize]).unwrap_or("").to_string(); let char_code = utf8.chars().next().unwrap_or('\0'); if char_code >= ' ' && char_code != 127 as char { self.do_callback(&mut vec![ Event::TextInput(TextInputEvent { input: utf8, was_paste: false, replace_last: false }) ]); } } } } }, X11_sys::KeyRelease => { self.do_callback(&mut vec![Event::KeyUp(KeyEvent { key_code: self.xkeyevent_to_keycode(&mut event.xkey), is_repeat: false, modifiers: self.xkeystate_to_modifiers(event.xkey.state), time: self.time_now() })]); }, X11_sys::ClientMessage => { let event = event.xclient; if event.message_type == self.atom_wm_protocols { if let Some(window_ptr) = self.window_map.get(&event.window) { let window = &mut (**window_ptr); window.close_window(); } } if event.message_type == self.dnd.atoms.enter { self.dnd.handle_enter_event(&event); } else if event.message_type == self.dnd.atoms.drop { self.dnd.handle_drop_event(&event); } else if event.message_type == self.dnd.atoms.leave { self.dnd.handle_leave_event(&event); } else if event.message_type == self.dnd.atoms.position { self.dnd.handle_position_event(&event); } }, X11_sys::Expose => { /* (glx.glXMakeCurrent)(display, window, context); gl::ClearColor(1.0, 0.0, 0.0, 1.0); gl::Clear(gl::COLOR_BUFFER_BIT); (glx.glXSwapBuffers)(display, window); */ }, _ => {} } } // process all signals in the queue let mut proc_signals = if let Ok(mut signals) = self.signals.lock() { let sigs = signals.clone(); signals.truncate(0); sigs } else { Vec::new() }; if proc_signals.len() > 0 { self.do_callback(&mut proc_signals); } self.do_callback(&mut vec![ Event::Paint, ]); } self.event_callback = None; } } pub fn do_callback(&mut self, events: &mut Vec<Event>) { unsafe { if self.event_callback.is_none() || self.event_recur_block { return }; self.event_recur_block = true; let callback = self.event_callback.unwrap(); self.loop_block = (*callback)(self, events); self.event_recur_block = false; } } pub fn start_timer(&mut self, id: u64, timeout: f64, repeats: bool) { //println!("STARTING TIMER {:?} {:?} {:?}", id, timeout, repeats); // Timers are stored in an ordered list. Each timer stores the amount of time between // when its predecessor in the list should fire and when the timer itself should fire // in `delta_timeout`. // Since we are starting a new timer, our first step is to find where in the list this // new timer should be inserted. `delta_timeout` is initially set to `timeout`. As we move // through the list, we subtract the `delta_timeout` of the timers preceding the new timer // in the list. Once this subtraction would cause an overflow, we have found the correct // position in the list. The timer should fire after the one preceding it in the list, and // before the one succeeding it in the list. Moreover `delta_timeout` is now set to the // correct value. let mut delta_timeout = timeout; let index = self.timers.iter().position( | timer | { if delta_timeout < timer.delta_timeout { return true; } delta_timeout -= timer.delta_timeout; false }).unwrap_or(self.timers.len()); // Insert the timer in the list. // // We also store the original `timeout` with each timer. This is necessary if the timer is // repeatable and we want to restart it later on. self.timers.insert( index, XlibTimer { id, timeout, repeats, delta_timeout, }, ); // The timer succeeding the newly inserted timer now has a new timer preceding it, so we // need to adjust its `delta_timeout`. // // Note that by construction, `timer.delta_timeout < delta_timeout`. Otherwise, the newly // inserted timer would have been inserted *after* the timer succeeding it, not before it. if index < self.timers.len() - 1 { let timer = &mut self.timers[index + 1]; // This computation should never underflow (see above) timer.delta_timeout -= delta_timeout; } } pub fn stop_timer(&mut self, id: u64) { //println!("STOPPING TIMER {:?}", id); // Since we are stopping an existing timer, our first step is to find where in the list this // timer should be removed. let index = if let Some(index) = self.timers.iter().position( | timer | timer.id == id) { index } else { return; }; // Remove the timer from the list. let delta_timeout = self.timers.remove(index).unwrap().delta_timeout; // The timer succeeding the removed timer now has a different timer preceding it, so we need // to adjust its `delta timeout`. if index < self.timers.len() { self.timers[index].delta_timeout += delta_timeout; } } pub fn post_signal(signal: Signal, status: StatusId) { unsafe { if let Ok(mut signals_locked) = (*GLOBAL_XLIB_APP).signals.lock() { let mut signals = HashMap::new(); let mut set = BTreeSet::new(); set.insert(status); signals.insert(signal, set); signals_locked.push(Event::Signal(SignalEvent {signals})); let mut f = unsafe { File::from_raw_fd((*GLOBAL_XLIB_APP).signal_fds[1]) }; let _ = write!(&mut f, "\0"); } } } pub fn terminate_event_loop(&mut self) { // maybe need to do more here self.event_loop_running = false; unsafe {X11_sys::XCloseIM(self.xim)}; unsafe {X11_sys::XCloseDisplay(self.display)}; self.display = ptr::null_mut(); } pub fn time_now(&self) -> f64 { let time_now = precise_time_ns(); (time_now - self.time_start) as f64 / 1_000_000_000.0 } pub fn load_first_cursor(&self, names: &[&[u8]]) -> Option<c_ulong> { unsafe { for name in names { let cursor = X11_sys::XcursorLibraryLoadCursor( self.display, name.as_ptr() as *const c_char, ); if cursor != 0 { return Some(cursor) } } } return None } pub fn set_mouse_cursor(&mut self, cursor: MouseCursor) { if self.current_cursor != cursor { self.current_cursor = cursor.clone(); let x11_cursor = match cursor { MouseCursor::Hidden => { return; }, MouseCursor::EResize => self.load_first_cursor(&[b"right_side\0"]), MouseCursor::NResize => self.load_first_cursor(&[b"top_side\0"]), MouseCursor::NeResize => self.load_first_cursor(&[b"top_right_corner\0"]), MouseCursor::NwResize => self.load_first_cursor(&[b"top_left_corner\0"]), MouseCursor::SResize => self.load_first_cursor(&[b"bottom_side\0"]), MouseCursor::SeResize => self.load_first_cursor(&[b"bottom_right_corner\0"]), MouseCursor::SwResize => self.load_first_cursor(&[b"bottom_left_corner\0"]), MouseCursor::WResize => self.load_first_cursor(&[b"left_side\0"]), MouseCursor::Default => self.load_first_cursor(&[b"left_ptr\0"]), MouseCursor::Crosshair => self.load_first_cursor(&[b"crosshair"]), MouseCursor::Hand => self.load_first_cursor(&[b"hand2\0", b"hand1\0"]), MouseCursor::Arrow => self.load_first_cursor(&[b"arrow\0"]), MouseCursor::Move => self.load_first_cursor(&[b"move\0"]), MouseCursor::NotAllowed => self.load_first_cursor(&[b"crossed_circle\0"]), MouseCursor::Text => self.load_first_cursor(&[b"text\0", b"xterm\0"]), MouseCursor::Wait => self.load_first_cursor(&[b"watch\0"]), MouseCursor::Help => self.load_first_cursor(&[b"question_arrow\0"]), MouseCursor::NsResize => self.load_first_cursor(&[b"v_double_arrow\0"]), MouseCursor::NeswResize => self.load_first_cursor(&[b"fd_double_arrow\0", b"size_fdiag\0"]), MouseCursor::EwResize => self.load_first_cursor(&[b"h_double_arrow\0"]), MouseCursor::NwseResize => self.load_first_cursor(&[b"bd_double_arrow\0", b"size_bdiag\0"]), MouseCursor::ColResize => self.load_first_cursor(&[b"split_h\0", b"h_double_arrow\0"]), MouseCursor::RowResize => self.load_first_cursor(&[b"split_v\0", b"v_double_arrow\0"]), }; if let Some(x11_cursor) = x11_cursor { unsafe { for (k, v) in &self.window_map { if !(**v).window.is_none() { X11_sys::XDefineCursor(self.display, *k, x11_cursor); } } X11_sys::XFreeCursor(self.display, x11_cursor); } } } } fn xkeystate_to_modifiers(&self, state: c_uint) -> KeyModifiers { KeyModifiers { alt: state & X11_sys::Mod1Mask != 0, shift: state & X11_sys::ShiftMask != 0, control: state & X11_sys::ControlMask != 0, logo: state & X11_sys::Mod4Mask != 0, } } fn xkeyevent_to_keycode(&self, key_event: &mut X11_sys::XKeyEvent) -> KeyCode { let mut keysym = 0; unsafe { X11_sys::XLookupString( key_event, ptr::null_mut(), 0, &mut keysym, ptr::null_mut(), ); } match keysym as u32 { X11_sys::XK_a => KeyCode::KeyA, X11_sys::XK_A => KeyCode::KeyA, X11_sys::XK_b => KeyCode::KeyB, X11_sys::XK_B => KeyCode::KeyB, X11_sys::XK_c => KeyCode::KeyC, X11_sys::XK_C => KeyCode::KeyC, X11_sys::XK_d => KeyCode::KeyD, X11_sys::XK_D => KeyCode::KeyD, X11_sys::XK_e => KeyCode::KeyE, X11_sys::XK_E => KeyCode::KeyE, X11_sys::XK_f => KeyCode::KeyF, X11_sys::XK_F => KeyCode::KeyF, X11_sys::XK_g => KeyCode::KeyG, X11_sys::XK_G => KeyCode::KeyG, X11_sys::XK_h => KeyCode::KeyH, X11_sys::XK_H => KeyCode::KeyH, X11_sys::XK_i => KeyCode::KeyI, X11_sys::XK_I => KeyCode::KeyI, X11_sys::XK_j => KeyCode::KeyJ, X11_sys::XK_J => KeyCode::KeyJ, X11_sys::XK_k => KeyCode::KeyK, X11_sys::XK_K => KeyCode::KeyK, X11_sys::XK_l => KeyCode::KeyL, X11_sys::XK_L => KeyCode::KeyL, X11_sys::XK_m => KeyCode::KeyM, X11_sys::XK_M => KeyCode::KeyM, X11_sys::XK_n => KeyCode::KeyN, X11_sys::XK_N => KeyCode::KeyN, X11_sys::XK_o => KeyCode::KeyO, X11_sys::XK_O => KeyCode::KeyO, X11_sys::XK_p => KeyCode::KeyP, X11_sys::XK_P => KeyCode::KeyP, X11_sys::XK_q => KeyCode::KeyQ, X11_sys::XK_Q => KeyCode::KeyQ, X11_sys::XK_r => KeyCode::KeyR, X11_sys::XK_R => KeyCode::KeyR, X11_sys::XK_s => KeyCode::KeyS, X11_sys::XK_S => KeyCode::KeyS, X11_sys::XK_t => KeyCode::KeyT, X11_sys::XK_T => KeyCode::KeyT, X11_sys::XK_u => KeyCode::KeyU, X11_sys::XK_U => KeyCode::KeyU, X11_sys::XK_v => KeyCode::KeyV, X11_sys::XK_V => KeyCode::KeyV, X11_sys::XK_w => KeyCode::KeyW, X11_sys::XK_W => KeyCode::KeyW, X11_sys::XK_x => KeyCode::KeyX, X11_sys::XK_X => KeyCode::KeyX, X11_sys::XK_y => KeyCode::KeyY, X11_sys::XK_Y => KeyCode::KeyY, X11_sys::XK_z => KeyCode::KeyZ, X11_sys::XK_Z => KeyCode::KeyZ, X11_sys::XK_0 => KeyCode::Key0, X11_sys::XK_1 => KeyCode::Key1, X11_sys::XK_2 => KeyCode::Key2, X11_sys::XK_3 => KeyCode::Key3, X11_sys::XK_4 => KeyCode::Key4, X11_sys::XK_5 => KeyCode::Key5, X11_sys::XK_6 => KeyCode::Key6, X11_sys::XK_7 => KeyCode::Key7, X11_sys::XK_8 => KeyCode::Key8, X11_sys::XK_9 => KeyCode::Key9, X11_sys::XK_Alt_L => KeyCode::Alt, X11_sys::XK_Alt_R => KeyCode::Alt, X11_sys::XK_Meta_L => KeyCode::Logo, X11_sys::XK_Meta_R => KeyCode::Logo, X11_sys::XK_Shift_L => KeyCode::Shift, X11_sys::XK_Shift_R => KeyCode::Shift, X11_sys::XK_Control_L => KeyCode::Control, X11_sys::XK_Control_R => KeyCode::Control, X11_sys::XK_equal => KeyCode::Equals, X11_sys::XK_minus => KeyCode::Minus, X11_sys::XK_bracketright => KeyCode::RBracket, X11_sys::XK_bracketleft => KeyCode::LBracket, X11_sys::XK_Return => KeyCode::Return, X11_sys::XK_grave => KeyCode::Backtick, X11_sys::XK_semicolon => KeyCode::Semicolon, X11_sys::XK_backslash => KeyCode::Backslash, X11_sys::XK_comma => KeyCode::Comma, X11_sys::XK_slash => KeyCode::Slash, X11_sys::XK_period => KeyCode::Period, X11_sys::XK_Tab => KeyCode::Tab, X11_sys::XK_ISO_Left_Tab => KeyCode::Tab, X11_sys::XK_space => KeyCode::Space, X11_sys::XK_BackSpace => KeyCode::Backspace, X11_sys::XK_Escape => KeyCode::Escape, X11_sys::XK_Caps_Lock => KeyCode::Capslock, X11_sys::XK_KP_Decimal => KeyCode::NumpadDecimal, X11_sys::XK_KP_Multiply => KeyCode::NumpadMultiply, X11_sys::XK_KP_Add => KeyCode::NumpadAdd, X11_sys::XK_Num_Lock => KeyCode::Numlock, X11_sys::XK_KP_Divide => KeyCode::NumpadDivide, X11_sys::XK_KP_Enter => KeyCode::NumpadEnter, X11_sys::XK_KP_Subtract => KeyCode::NumpadSubtract, //keysim::XK_9 => KeyCode::NumpadEquals, X11_sys::XK_KP_0 => KeyCode::Numpad0, X11_sys::XK_KP_1 => KeyCode::Numpad1, X11_sys::XK_KP_2 => KeyCode::Numpad2, X11_sys::XK_KP_3 => KeyCode::Numpad3, X11_sys::XK_KP_4 => KeyCode::Numpad4, X11_sys::XK_KP_5 => KeyCode::Numpad5, X11_sys::XK_KP_6 => KeyCode::Numpad6, X11_sys::XK_KP_7 => KeyCode::Numpad7, X11_sys::XK_KP_8 => KeyCode::Numpad8, X11_sys::XK_KP_9 => KeyCode::Numpad9, X11_sys::XK_F1 => KeyCode::F1, X11_sys::XK_F2 => KeyCode::F2, X11_sys::XK_F3 => KeyCode::F3, X11_sys::XK_F4 => KeyCode::F4, X11_sys::XK_F5 => KeyCode::F5, X11_sys::XK_F6 => KeyCode::F6, X11_sys::XK_F7 => KeyCode::F7, X11_sys::XK_F8 => KeyCode::F8, X11_sys::XK_F9 => KeyCode::F9, X11_sys::XK_F10 => KeyCode::F10, X11_sys::XK_F11 => KeyCode::F11, X11_sys::XK_F12 => KeyCode::F12, X11_sys::XK_Print => KeyCode::PrintScreen, X11_sys::XK_Home => KeyCode::Home, X11_sys::XK_Page_Up => KeyCode::PageUp, X11_sys::XK_Delete => KeyCode::Delete, X11_sys::XK_End => KeyCode::End, X11_sys::XK_Page_Down => KeyCode::PageDown, X11_sys::XK_Left => KeyCode::ArrowLeft, X11_sys::XK_Right => KeyCode::ArrowRight, X11_sys::XK_Down => KeyCode::ArrowDown, X11_sys::XK_Up => KeyCode::ArrowUp, _ => KeyCode::Unknown, } } } impl XlibWindow { pub fn new(xlib_app: &mut XlibApp, window_id: usize) -> XlibWindow { let mut fingers_down = Vec::new(); fingers_down.resize(NUM_FINGERS, false); XlibWindow { window: None, xic: None, attributes: None, visual_info: None, child_windows: Vec::new(), window_id: window_id, xlib_app: xlib_app, last_window_geom: WindowGeom::default(), time_start: xlib_app.time_start, last_nc_mode: None, ime_spot: Vec2::default(), current_cursor: MouseCursor::Default, last_mouse_pos: Vec2::default(), fingers_down: fingers_down, } } pub fn init(&mut self, title: &str, size: Vec2, position: Option<Vec2>, visual_info: X11_sys::XVisualInfo) { unsafe { let display = (*self.xlib_app).display; // The default screen of the display let default_screen = X11_sys::XDefaultScreen(display); // The root window of the default screen let root_window = X11_sys::XRootWindow(display, default_screen); let mut attributes = mem::zeroed::<X11_sys::XSetWindowAttributes>(); attributes.border_pixel = 0; //attributes.override_redirect = 1; attributes.colormap = X11_sys::XCreateColormap(display, root_window, visual_info.visual, X11_sys::AllocNone as i32); attributes.event_mask = ( X11_sys::ExposureMask | X11_sys::StructureNotifyMask | X11_sys::ButtonMotionMask | X11_sys::PointerMotionMask | X11_sys::ButtonPressMask | X11_sys::ButtonReleaseMask | X11_sys::KeyPressMask | X11_sys::KeyReleaseMask | X11_sys::VisibilityChangeMask | X11_sys::FocusChangeMask | X11_sys::EnterWindowMask | X11_sys::LeaveWindowMask ) as c_long; let dpi_factor = self.get_dpi_factor(); // Create a window let window = X11_sys::XCreateWindow( display, root_window, if position.is_some() {position.unwrap().x}else {150.0} as i32, if position.is_some() {position.unwrap().y}else {60.0} as i32, (size.x * dpi_factor) as u32, (size.y * dpi_factor) as u32, 0, visual_info.depth, X11_sys::InputOutput as u32, visual_info.visual, (X11_sys::CWBorderPixel | X11_sys::CWColormap | X11_sys::CWEventMask) as c_ulong, // | X11_sys::CWOverrideRedirect, &mut attributes, ); // Tell the window manager that we want to be notified when the window is closed X11_sys::XSetWMProtocols(display, window, &mut (*self.xlib_app).atom_wm_delete_window, 1); if LINUX_CUSTOM_WINDOW_CHROME{ let hints = MwmHints { flags: MWM_HINTS_DECORATIONS, functions: 0, decorations: 0, input_mode: 0, status: 0, }; let atom_motif_wm_hints = (*self.xlib_app).atom_motif_wm_hints; X11_sys::XChangeProperty(display, window, atom_motif_wm_hints, atom_motif_wm_hints, 32, X11_sys::PropModeReplace as i32, &hints as *const _ as *const u8, 5); } (*self.xlib_app).dnd.enable_for_window(window); // Map the window to the screen X11_sys::XMapWindow(display, window); X11_sys::XFlush(display); let title_bytes = format!("{}\0",title); X11_sys::XStoreName(display, window, title_bytes.as_bytes().as_ptr() as *const ::std::os::raw::c_char); let xic = X11_sys::XCreateIC((*self.xlib_app).xim, CStr::from_bytes_with_nul(X11_sys::XNInputStyle.as_ref()).unwrap().as_ptr(), (X11_sys::XIMPreeditNothing | X11_sys::XIMStatusNothing) as i32, CStr::from_bytes_with_nul(X11_sys::XNClientWindow.as_ref()).unwrap().as_ptr(), window, CStr::from_bytes_with_nul(X11_sys::XNFocusWindow.as_ref()).unwrap().as_ptr(), window, ptr::null_mut() as *mut c_void); // Create a window (*self.xlib_app).window_map.insert(window, self); self.attributes = Some(attributes); self.visual_info = Some(visual_info); self.window = Some(window); self.xic = Some(xic); self.last_window_geom = self.get_window_geom(); (*self.xlib_app).event_recur_block = false; let new_geom = self.get_window_geom(); self.do_callback(&mut vec![ Event::WindowGeomChange(WindowGeomChangeEvent { window_id: self.window_id, old_geom: new_geom.clone(), new_geom: new_geom }) ]); (*self.xlib_app).event_recur_block = true; } } pub fn hide_child_windows(&mut self) { unsafe { let display = (*self.xlib_app).display; for child in &mut self.child_windows { if child.visible { X11_sys::XUnmapWindow(display, child.window); child.visible = false } } } } pub fn alloc_child_window(&mut self, x: i32, y: i32, w: u32, h: u32) -> Option<c_ulong> { unsafe { let display = (*self.xlib_app).display; // ok lets find a childwindow that matches x/y/w/h and show it if need be for child in &mut self.child_windows { if child.x == x && child.y == y && child.w == w && child.h == h { if!child.visible { X11_sys::XMapWindow(display, child.window); child.visible = true; } X11_sys::XRaiseWindow(display, child.window); return Some(child.window); } } for child in &mut self.child_windows { if !child.visible { child.x = x; child.y = y; child.w = w; child.h = h; X11_sys::XMoveResizeWindow(display, child.window, x, y, w, h); X11_sys::XMapWindow(display, child.window); X11_sys::XRaiseWindow(display, child.window); child.visible = true; return Some(child.window); } } let new_child = X11_sys::XCreateWindow( display, self.window.unwrap(), x, y, w, h, 0, self.visual_info.unwrap().depth, X11_sys::InputOutput as u32, self.visual_info.unwrap().visual, (X11_sys::CWBorderPixel | X11_sys::CWColormap | X11_sys::CWEventMask | X11_sys::CWOverrideRedirect) as c_ulong, self.attributes.as_mut().unwrap(), ); // Map the window to the screen //X11_sys::XMapWindow(display, window_dirty); (*self.xlib_app).window_map.insert(new_child, self); X11_sys::XMapWindow(display, new_child); X11_sys::XFlush(display); self.child_windows.push(XlibChildWindow { window: new_child, x: x, y: y, w: w, h: h, visible: true }); return Some(new_child) } } pub fn get_key_modifiers() -> KeyModifiers { //unsafe { KeyModifiers { control: false, shift: false, alt: false, logo: false } //} } pub fn update_ptrs(&mut self) { unsafe { (*self.xlib_app).window_map.insert(self.window.unwrap(), self); for i in 0..self.child_windows.len() { (*self.xlib_app).window_map.insert(self.child_windows[i].window, self); } } } pub fn on_mouse_move(&self) { } pub fn set_mouse_cursor(&mut self, _cursor: MouseCursor) { } fn restore_or_maximize(&self, add_remove: c_long) { unsafe { let xlib_app = &(*self.xlib_app); let default_screen = X11_sys::XDefaultScreen(xlib_app.display); let root_window = X11_sys::XRootWindow(xlib_app.display, default_screen); let mut xclient = X11_sys::XClientMessageEvent { type_: X11_sys::ClientMessage as i32, serial: 0, send_event: 0, display: xlib_app.display, window: self.window.unwrap(), message_type: xlib_app.atom_net_wm_state, format: 32, data: { let mut msg = mem::zeroed::<X11_sys::XClientMessageEvent__bindgen_ty_1>(); msg.l[0] = add_remove; msg.l[1] = xlib_app.atom_new_wm_state_maximized_horz as c_long; msg.l[2] = xlib_app.atom_new_wm_state_maximized_vert as c_long; msg } }; X11_sys::XSendEvent(xlib_app.display, root_window, 0, (X11_sys::SubstructureNotifyMask | X11_sys::SubstructureRedirectMask) as c_long, &mut xclient as *mut _ as *mut X11_sys::XEvent); } } pub fn restore(&self) { self.restore_or_maximize(_NET_WM_STATE_REMOVE); } pub fn maximize(&self) { self.restore_or_maximize(_NET_WM_STATE_ADD); } pub fn close_window(&mut self) { unsafe { let xlib_app = &(*self.xlib_app); X11_sys::XDestroyWindow(xlib_app.display, self.window.unwrap()); self.window = None; // lets remove us from the mapping } } pub fn minimize(&self) { unsafe { let xlib_app = &(*self.xlib_app); let default_screen = X11_sys::XDefaultScreen(xlib_app.display); X11_sys::XIconifyWindow(xlib_app.display, self.window.unwrap(), default_screen); X11_sys::XFlush(xlib_app.display); } } pub fn set_topmost(&self, _topmost: bool) { } pub fn get_is_topmost(&self) -> bool { false } pub fn get_window_geom(&self) -> WindowGeom { WindowGeom { xr_is_presenting: false, xr_can_present: false, can_fullscreen: false, is_topmost: self.get_is_topmost(), is_fullscreen: self.get_is_maximized(), inner_size: self.get_inner_size(), outer_size: self.get_outer_size(), dpi_factor: self.get_dpi_factor(), position: self.get_position() } } pub fn get_is_maximized(&self) -> bool { let mut maximized = false; unsafe { let xlib_app = &(*self.xlib_app); let mut prop_type = mem::MaybeUninit::uninit(); let mut format = mem::MaybeUninit::uninit(); let mut n_item = mem::MaybeUninit::uninit(); let mut bytes_after = mem::MaybeUninit::uninit(); let mut properties = mem::MaybeUninit::uninit(); let result = X11_sys::XGetWindowProperty( xlib_app.display, self.window.unwrap(), xlib_app.atom_net_wm_state, 0, !0, 0, X11_sys::AnyPropertyType as c_ulong, prop_type.as_mut_ptr(), format.as_mut_ptr(), n_item.as_mut_ptr(), bytes_after.as_mut_ptr(), properties.as_mut_ptr() ); //let prop_type = prop_type.assume_init(); //let format = format.assume_init(); let n_item = n_item.assume_init(); //let bytes_after = bytes_after.assume_init(); let properties = properties.assume_init(); if result == 0 && properties != ptr::null_mut() { let items = std::slice::from_raw_parts::<c_ulong>(properties as *mut _, n_item as usize); for item in items { if *item == xlib_app.atom_new_wm_state_maximized_horz || *item == xlib_app.atom_new_wm_state_maximized_vert { maximized = true; break; } } X11_sys::XFree(properties as *mut _); } } maximized } pub fn time_now(&self) -> f64 { let time_now = precise_time_ns(); (time_now - self.time_start) as f64 / 1_000_000_000.0 } pub fn set_ime_spot(&mut self, spot: Vec2) { self.ime_spot = spot; } pub fn get_position(&self) -> Vec2 { unsafe { let mut xwa = mem::MaybeUninit::uninit(); let display = (*self.xlib_app).display; X11_sys::XGetWindowAttributes(display, self.window.unwrap(), xwa.as_mut_ptr()); let xwa = xwa.assume_init(); return Vec2 {x: xwa.x as f32, y: xwa.y as f32} /* let mut child = mem::uninitialized(); let default_screen = X11_sys::XDefaultScreen(display); let root_window = X11_sys::XRootWindow(display, default_screen); let mut x:c_int = 0; let mut y:c_int = 0; X11_sys::XTranslateCoordinates(display, self.window.unwrap(), root_window, 0, 0, &mut x, &mut y, &mut child ); */ } } pub fn get_inner_size(&self) -> Vec2 { let dpi_factor = self.get_dpi_factor(); unsafe { let mut xwa = mem::MaybeUninit::uninit(); let display = (*self.xlib_app).display; X11_sys::XGetWindowAttributes(display, self.window.unwrap(), xwa.as_mut_ptr()); let xwa = xwa.assume_init(); return Vec2 {x: xwa.width as f32 / dpi_factor, y: xwa.height as f32 / dpi_factor} } } pub fn get_outer_size(&self) -> Vec2 { unsafe { let mut xwa = mem::MaybeUninit::uninit(); let display = (*self.xlib_app).display; X11_sys::XGetWindowAttributes(display, self.window.unwrap(), xwa.as_mut_ptr()); let xwa = xwa.assume_init(); return Vec2 {x: xwa.width as f32, y: xwa.height as f32} } } pub fn set_position(&mut self, _pos: Vec2) { } pub fn set_outer_size(&self, _size: Vec2) { } pub fn set_inner_size(&self, _size: Vec2) { } pub fn get_dpi_factor(&self) -> f32 { unsafe { //return 2.0; let display = (*self.xlib_app).display; let resource_string = X11_sys::XResourceManagerString(display); if resource_string == std::ptr::null_mut() { return 1.0 } let db = X11_sys::XrmGetStringDatabase(resource_string); let mut ty = mem::MaybeUninit::uninit(); let mut value = mem::MaybeUninit::uninit(); X11_sys::XrmGetResource( db, CString::new("Xft.dpi").unwrap().as_ptr(), CString::new("String").unwrap().as_ptr(), ty.as_mut_ptr(), value.as_mut_ptr() ); //let ty = ty.assume_init(); let value = value.assume_init(); if value.addr == std::ptr::null_mut() { return 1.0; // TODO find some other way to figure it out } else { let dpi: f32 = CStr::from_ptr(value.addr).to_str().unwrap().parse().unwrap(); return dpi / 96.0; } } } pub fn do_callback(&mut self, events: &mut Vec<Event>) { unsafe { (*self.xlib_app).do_callback(events); } } pub fn send_change_event(&mut self) { let mut new_geom = self.get_window_geom(); if new_geom.inner_size.x < self.last_window_geom.inner_size.x || new_geom.inner_size.y < self.last_window_geom.inner_size.y { new_geom.is_fullscreen = false; } let old_geom = self.last_window_geom.clone(); self.last_window_geom = new_geom.clone(); self.do_callback(&mut vec![ Event::WindowGeomChange(WindowGeomChangeEvent { window_id: self.window_id, old_geom: old_geom, new_geom: new_geom }), Event::Paint ]); } pub fn send_focus_event(&mut self) { self.do_callback(&mut vec![Event::AppFocus]); } pub fn send_focus_lost_event(&mut self) { self.do_callback(&mut vec![Event::AppFocusLost]); } pub fn send_finger_down(&mut self, digit: usize, modifiers: KeyModifiers) { let mut down_count = 0; for is_down in &self.fingers_down { if *is_down { down_count += 1; } } if down_count == 0 { //unsafe {winuser::SetCapture(self.hwnd.unwrap());} } self.fingers_down[digit] = true; self.do_callback(&mut vec![Event::FingerDown(FingerDownEvent { window_id: self.window_id, abs: self.last_mouse_pos, rel: self.last_mouse_pos, rect: Rect::default(), digit: digit, handled: false, input_type: FingerInputType::Mouse, modifiers: modifiers, tap_count: 0, time: self.time_now() })]); } pub fn send_finger_up(&mut self, digit: usize, modifiers: KeyModifiers) { self.fingers_down[digit] = false; let mut down_count = 0; for is_down in &self.fingers_down { if *is_down { down_count += 1; } } if down_count == 0 { // unsafe {winuser::ReleaseCapture();} } self.do_callback(&mut vec![Event::FingerUp(FingerUpEvent { window_id: self.window_id, abs: self.last_mouse_pos, rel: self.last_mouse_pos, rect: Rect::default(), abs_start: Vec2::default(), rel_start: Vec2::default(), digit: digit, is_over: false, input_type: FingerInputType::Mouse, modifiers: modifiers, time: self.time_now() })]); } pub fn send_finger_hover_and_move(&mut self, pos: Vec2, modifiers: KeyModifiers) { self.last_mouse_pos = pos; let mut events = Vec::new(); for (digit, down) in self.fingers_down.iter().enumerate() { if *down { events.push(Event::FingerMove(FingerMoveEvent { window_id: self.window_id, abs: pos, rel: pos, rect: Rect::default(), digit: digit, abs_start: Vec2::default(), rel_start: Vec2::default(), is_over: false, input_type: FingerInputType::Mouse, modifiers: modifiers.clone(), time: self.time_now() })); } }; events.push(Event::FingerHover(FingerHoverEvent { digit: 0, window_id: self.window_id, abs: pos, rel: pos, any_down: false, rect: Rect::default(), handled: false, hover_state: HoverState::Over, modifiers: modifiers, time: self.time_now() })); self.do_callback(&mut events); } pub fn send_close_requested_event(&mut self) -> bool { let mut events = vec![Event::WindowCloseRequested(WindowCloseRequestedEvent {window_id: self.window_id, accept_close: true})]; self.do_callback(&mut events); if let Event::WindowCloseRequested(cre) = &events[0] { return cre.accept_close } true } pub fn send_text_input(&mut self, input: String, replace_last: bool) { self.do_callback(&mut vec![Event::TextInput(TextInputEvent { input: input, was_paste: false, replace_last: replace_last })]) } } #[derive(Clone, Copy, PartialEq)] #[repr(C)] struct MwmHints { pub flags: c_ulong, pub functions: c_ulong, pub decorations: c_ulong, pub input_mode: c_long, pub status: c_ulong, } const MWM_HINTS_FUNCTIONS: c_ulong = 1 << 0; const MWM_HINTS_DECORATIONS: c_ulong = 1 << 1; const MWM_FUNC_ALL: c_ulong = 1 << 0; const MWM_FUNC_RESIZE: c_ulong = 1 << 1; const MWM_FUNC_MOVE: c_ulong = 1 << 2; const MWM_FUNC_MINIMIZE: c_ulong = 1 << 3; const MWM_FUNC_MAXIMIZE: c_ulong = 1 << 4; const MWM_FUNC_CLOSE: c_ulong = 1 << 5; const _NET_WM_MOVERESIZE_SIZE_TOPLEFT: c_long = 0; const _NET_WM_MOVERESIZE_SIZE_TOP: c_long = 1; const _NET_WM_MOVERESIZE_SIZE_TOPRIGHT: c_long = 2; const _NET_WM_MOVERESIZE_SIZE_RIGHT: c_long = 3; const _NET_WM_MOVERESIZE_SIZE_BOTTOMRIGHT: c_long = 4; const _NET_WM_MOVERESIZE_SIZE_BOTTOM: c_long = 5; const _NET_WM_MOVERESIZE_SIZE_BOTTOMLEFT: c_long = 6; const _NET_WM_MOVERESIZE_SIZE_LEFT: c_long = 7; const _NET_WM_MOVERESIZE_MOVE: c_long = 8;/* movement only */ const _NET_WM_MOVERESIZE_SIZE_KEYBOARD: c_long = 9;/* size via keyboard */ const _NET_WM_MOVERESIZE_MOVE_KEYBOARD: c_long = 10; const _NET_WM_STATE_REMOVE: c_long = 0;/* remove/unset property */ const _NET_WM_STATE_ADD: c_long = 1;/* add/set property */ const _NET_WM_STATE_TOGGLE: c_long = 2;/* toggle property */ /* move via keyboard */ pub struct Dnd { atoms: DndAtoms, display: *mut X11_sys::Display, type_list: Option<Vec<X11_sys::Atom >>, selection: Option<CString>, } impl Dnd { unsafe fn new(display: *mut X11_sys::Display) -> Dnd { Dnd { atoms: DndAtoms::new(display), display, type_list: None, selection: None, } } /// Enables drag-and-drop for the given window. unsafe fn enable_for_window(&mut self, window: X11_sys::Window) { // To enable drag-and-drop for a window, we need to set the XDndAware property of the window // to the version of XDnd we support. // I took this value from the Winit source code. Apparently, this is the latest version, and // hasn't changed since 2002. let version = 5 as c_ulong; X11_sys::XChangeProperty( self.display, window, self.atoms.aware, 4, // XA_ATOM 32, X11_sys::PropModeReplace as c_int, &version as *const c_ulong as *const c_uchar, 1 ); } /// Handles a XDndEnter event. unsafe fn handle_enter_event(&mut self, event: &X11_sys::XClientMessageEvent) { // The XDndEnter event is sent by the source window when a drag begins. That is, the mouse // enters the client rectangle of the target window. The target window is supposed to // respond to this by requesting the list of types supported by the source. let source_window = event.data.l[0] as X11_sys::Window; let has_more_types = event.data.l[1] & (1 << 0) != 0; // If the has_more_types flags is set, we have to obtain the list of supported types from // the XDndTypeList property. Otherwise, we can obtain the list of supported types from the // event itself. self.type_list = Some(if has_more_types { self.get_type_list_property(source_window) } else { event.data.l[2..4] .iter() .map( | &l | l as X11_sys::Atom) .filter( | &atom | atom != X11_sys::None as X11_sys::Atom) .collect() }); } /// Handles a XDndDrop event. unsafe fn handle_drop_event(&mut self, event: &X11_sys::XClientMessageEvent) { // The XDndLeave event is sent by the source window when a drag is confirmed. That is, the // mouse button is released while the mouse is inside the client rectangle of the target // window. The target window is supposed to respond to this by requesting that the selection // representing the thing being dragged is converted to the appropriate data type (in our // case, a URI list). The source window, in turn, is supposed to respond this by sending a // selection event containing the data to the source window. let target_window = event.window as X11_sys::Window; self.convert_selection(target_window); self.type_list = None; } /// Handles a XDndLeave event. unsafe fn handle_leave_event(&mut self, _event: &X11_sys::XClientMessageEvent) { // The XDndLeave event is sent by the source window when a drag is canceled. That is, the // mouse leaves the client rectangle of the target window. The target window is supposed to // repsond this this by pretending the drag never happened. self.type_list = None; } /// Handles a XDndPosition event. unsafe fn handle_position_event(&mut self, event: &X11_sys::XClientMessageEvent) { // The XDndPosition event is sent by the source window after the XDndEnter event, every time // the mouse is moved. The target window is supposed to respond to this by sending a status // event to the source window notifying whether it can accept the drag at this position. let target_window = event.window as X11_sys::Window; let source_window = event.data.l[0] as X11_sys::Window; // For now we accept te drag if and only if the list of types supported by the source // includes a uri list. // // TODO: Extend this test by taking into account the position of the mouse as well. let accepted = self.type_list.as_ref().map_or(false, | type_list | type_list.contains(&self.atoms.uri_list)); // Notify the source window whether we can accept the drag at this position. self.send_status_event(source_window, target_window, accepted); // If this is the first time we've accepted the drag, request that the drag-and-drop // selection be converted to a URI list. The target window is supposed to respond to this by // sending a XSelectionEvent containing the URI list. // Since this is an asynchronous operation, its possible for another XDndPosition event to // come in before the response to the first conversion request has been received. In this // case, a second conversion request will be sent, the response to which will be ignored. if accepted && self.selection.is_none() { } } /// Handles a XSelectionEvent. unsafe fn handle_selection_event(&mut self, _event: &X11_sys::XSelectionEvent) { // The XSelectionEvent is sent by the source window in response to a request by the source // window to convert the selection representing the thing being dragged to the appropriate // data type. This request is always sent in response to a XDndDrop event, so this event // should only be received after a drop operation has completed. //let source_window = event.requestor; //let selection = CString::new(self.get_selection_property(source_window)).unwrap(); // TODO: Actually use the selection } /// Gets the XDndSelection property from the source window. unsafe fn get_selection_property(&mut self, source_window: X11_sys::Window) -> Vec<c_uchar> { let mut selection = Vec::new(); let mut offset = 0; let length = 1024; let mut actual_type = 0; let mut actual_format = 0; let mut nitems = 0; let mut bytes_after = 0; let mut prop = ptr::null_mut(); loop { X11_sys::XGetWindowProperty( self.display, source_window, self.atoms.selection, offset, length, X11_sys::False as c_int, self.atoms.uri_list, &mut actual_type, &mut actual_format, &mut nitems, &mut bytes_after, &mut prop, ); selection.extend_from_slice(slice::from_raw_parts(prop as *mut c_uchar, nitems as usize)); X11_sys::XFree(prop as *mut c_void); if bytes_after == 0 { break; } offset += length; }; selection } /// Gets the XDndTypeList property from the source window. unsafe fn get_type_list_property(&mut self, source_window: X11_sys::Window) -> Vec<X11_sys::Atom> { let mut type_list = Vec::new(); let mut offset = 0; let length = 1024; let mut actual_type = 0; let mut actual_format = 0; let mut nitems = 0; let mut bytes_after = 0; let mut prop = ptr::null_mut(); loop { X11_sys::XGetWindowProperty( self.display, source_window, self.atoms.type_list, offset, length, X11_sys::False as c_int, 4, // XA_ATOM, &mut actual_type, &mut actual_format, &mut nitems, &mut bytes_after, &mut prop, ); type_list.extend_from_slice(slice::from_raw_parts(prop as *mut X11_sys::Atom, nitems as usize)); X11_sys::XFree(prop as *mut c_void); if bytes_after == 0 { break; } offset += length; }; type_list } /// Sends a XDndStatus event to the target window. unsafe fn send_status_event(&mut self, source_window: X11_sys::Window, target_window: X11_sys::Window, accepted: bool) { X11_sys::XSendEvent( self.display, source_window, X11_sys::False as c_int, X11_sys::NoEventMask as c_long, &mut X11_sys::XClientMessageEvent { type_: X11_sys::ClientMessage as c_int, serial: 0, send_event: 0, display: self.display, window: source_window, message_type: self.atoms.status, format: 32, data: { let mut data = mem::zeroed::<X11_sys::XClientMessageEvent__bindgen_ty_1>(); data.l[0] = target_window as c_long; data.l[1] = if accepted {1 << 0} else {0}; data.l[2] = 0; data.l[3] = 0; data.l[4] = if accepted {self.atoms.action_private} else {self.atoms.none} as c_long; data } } as *mut X11_sys::XClientMessageEvent as *mut X11_sys::XEvent ); X11_sys::XFlush(self.display); } // Requests that the selection representing the thing being dragged is converted to the // appropriate data type (in our case, a URI list). unsafe fn convert_selection(&self, target_window: X11_sys::Window) { X11_sys::XConvertSelection( self.display, self.atoms.selection, self.atoms.uri_list, self.atoms.selection, target_window, X11_sys::CurrentTime as X11_sys::Time, ); } } struct DndAtoms { action_private: X11_sys::Atom, aware: X11_sys::Atom, drop: X11_sys::Atom, enter: X11_sys::Atom, leave: X11_sys::Atom, none: X11_sys::Atom, position: X11_sys::Atom, selection: X11_sys::Atom, status: X11_sys::Atom, type_list: X11_sys::Atom, uri_list: X11_sys::Atom, } impl DndAtoms { unsafe fn new(display: *mut X11_sys::Display) -> DndAtoms { DndAtoms { action_private: X11_sys::XInternAtom(display, CString::new("XdndActionPrivate").unwrap().as_ptr(), 0), aware: X11_sys::XInternAtom(display, CString::new("XdndAware").unwrap().as_ptr(), 0), drop: X11_sys::XInternAtom(display, CString::new("XdndDrop").unwrap().as_ptr(), 0), enter: X11_sys::XInternAtom(display, CString::new("XdndEnter").unwrap().as_ptr(), 0), leave: X11_sys::XInternAtom(display, CString::new("XdndLeave").unwrap().as_ptr(), 0), none: X11_sys::XInternAtom(display, CString::new("None").unwrap().as_ptr(), 0), position: X11_sys::XInternAtom(display, CString::new("XdndPosition").unwrap().as_ptr(), 0), selection: X11_sys::XInternAtom(display, CString::new("XdndSelection").unwrap().as_ptr(), 0), status: X11_sys::XInternAtom(display, CString::new("XdndStatus").unwrap().as_ptr(), 0), type_list: X11_sys::XInternAtom(display, CString::new("XdndTypeList").unwrap().as_ptr(), 0), uri_list: X11_sys::XInternAtom(display, CString::new("text/uri-list").unwrap().as_ptr(), 0), } } }
46.294731
410
0.473481
8f77114744c38064f77476dab7ce2f8177674735
5,734
use crate::{RegisterBits, Register}; use core::marker; /// A 8-bit timer. pub trait Timer8 : Sized { /// The first compare register. /// For example, OCR0A. type CompareA: Register<T=u8>; /// The second compare register. /// For example, OCR0B. type CompareB: Register<T=u8>; /// The counter register. /// /// For example, TCNT0. type Counter: Register<T=u8>; /// The first control register. /// /// For example, TCCR0A. type ControlA: Register<T=u8>; /// The second control register. /// /// For example, TCCR0B. type ControlB: Register<T=u8>; /// The interrupt mask register. /// /// For example, TIMSK0. type InterruptMask: Register<T=u8>; /// The interrupt flag register. /// /// For example, TIFR0. type InterruptFlag: Register<T=u8>; /// Bit 0 of the clock select mask. const CS0: RegisterBits<Self::ControlB>; /// Bit 1 of the clock select mask. const CS1: RegisterBits<Self::ControlB>; /// Bit 2 of the clock select mask. const CS2: RegisterBits<Self::ControlB>; /// Bit 0 of the waveform generation mode mask. const WGM0: RegisterBits<Self::ControlA>; /// Bit 1 of the waveform generation mode mask. const WGM1: RegisterBits<Self::ControlA>; /// Bit 2 of the waveform generation mode mask. const WGM2: RegisterBits<Self::ControlB>; /// Output compare interrupt enable flag. const OCIEA: RegisterBits<Self::InterruptMask>; } pub enum ClockSource { None, Prescale1, Prescale8, Prescale64, Prescale256, Prescale1024, ExternalFalling, ExternalRising, } impl ClockSource { fn bits<T: Timer8>(&self) -> RegisterBits<T::ControlB> { use self::ClockSource::*; match *self { None => RegisterBits::zero() | RegisterBits::zero() | RegisterBits::zero(), Prescale1 => RegisterBits::zero() | RegisterBits::zero() | T::CS0, Prescale8 => RegisterBits::zero() | T::CS1 | RegisterBits::zero(), Prescale64 => RegisterBits::zero() | T::CS1 | T::CS0, Prescale256 => T::CS2 | RegisterBits::zero() | RegisterBits::zero(), Prescale1024 => T::CS2 | RegisterBits::zero() | T::CS0, ExternalFalling => T::CS2 | T::CS1 | RegisterBits::zero(), ExternalRising => T::CS2 | T::CS1 | T::CS0, } } #[inline] fn mask<T: Timer8>() -> RegisterBits<T::ControlB> { !(T::CS2 | T::CS1 | T::CS0) } } pub enum WaveformGenerationMode { Normal, PwmPhaseCorrect, ClearOnTimerMatchOutputCompare, FastPwm , PwmPhaseCorrectOutputCompare, FastPwmOutputCompare, } impl WaveformGenerationMode { /// Returns bits for TCCR0A, TCCR0B #[inline] fn bits<T: Timer8>(&self) -> (RegisterBits<T::ControlA>, RegisterBits<T::ControlB>) { use self::WaveformGenerationMode::*; // It makes more sense to return bytes (A,B), but the manual // lists the table as (B,A). We match the manual here for // inspection purposes and flip the values for sanity // purposes. let (b, a) = match *self { Normal => (RegisterBits::zero(), RegisterBits::zero() | RegisterBits::zero()), PwmPhaseCorrect => (RegisterBits::zero(), RegisterBits::zero() | T::WGM0), ClearOnTimerMatchOutputCompare => (RegisterBits::zero(), T::WGM1 | RegisterBits::zero()), FastPwm => (RegisterBits::zero(), T::WGM1 | T::WGM0), // Reserved => (T::WGM2, RegisterBits::zero() | RegisterBits::zero()), PwmPhaseCorrectOutputCompare => (T::WGM2, RegisterBits::zero() | T::WGM0), // Reserved => (T::WGM2, T::WGM1 | RegisterBits::zero())), FastPwmOutputCompare => (T::WGM2, T::WGM1 | T::WGM0), }; (a, b) } #[inline] fn mask<T: Timer8>() -> (RegisterBits<T::ControlA>, RegisterBits<T::ControlB>) { (!(T::WGM0 | T::WGM1), !(T::WGM2)) } } pub struct Timer8Setup<T: Timer8> { a: RegisterBits<T::ControlA>, b: RegisterBits<T::ControlB>, output_compare_1: Option<u8>, _phantom: marker::PhantomData<T>, } impl<T: Timer8> Timer8Setup<T> { #[inline] pub fn new() -> Self { Timer8Setup { a: RegisterBits::zero(), b: RegisterBits::zero(), output_compare_1: None, _phantom: marker::PhantomData, } } #[inline] pub fn clock_source(mut self, source: ClockSource) -> Self { self.b &= ClockSource::mask::<T>(); self.b |= source.bits::<T>(); self } #[inline] pub fn waveform_generation_mode(mut self, mode: WaveformGenerationMode) -> Self { let (a, b) = WaveformGenerationMode::mask::<T>(); self.a &= a; self.b &= b; let (a, b) = mode.bits::<T>(); self.a |= a; self.b |= b; self } #[inline] pub fn output_compare_1(mut self, value: Option<u8>) -> Self { self.output_compare_1 = value; self } #[inline] pub fn configure(self) { T::ControlA::write(self.a); T::ControlB::write(self.b); // Reset counter to zero T::Counter::write(0); if let Some(v) = self.output_compare_1 { // Set the match T::CompareA::write(v); // Enable compare interrupt T::InterruptMask::set(T::OCIEA); } } }
30.178947
114
0.550227
e638a46240a739c45b0051a41c66606ebf3877c1
8,585
#![cfg(feature = "full")] use { crate::{ ed25519_instruction::verify_signatures, hash::Hash, message::{v0, MappedAddresses, MappedMessage, SanitizedMessage, VersionedMessage}, nonce::NONCED_TX_MARKER_IX_INDEX, program_utils::limited_deserialize, pubkey::Pubkey, sanitize::Sanitize, secp256k1_instruction::verify_eth_addresses, secp256k1_program, signature::Signature, solana_sdk::feature_set, transaction::{Result, Transaction, TransactionError, VersionedTransaction}, }, solana_program::{system_instruction::SystemInstruction, system_program}, std::convert::TryFrom, std::sync::Arc, }; /// Sanitized transaction and the hash of its message #[derive(Debug, Clone)] pub struct SanitizedTransaction { message: SanitizedMessage, message_hash: Hash, signatures: Vec<Signature>, } /// Set of accounts that must be locked for safe transaction processing #[derive(Debug, Clone, Default)] pub struct TransactionAccountLocks<'a> { /// List of readonly account key locks pub readonly: Vec<&'a Pubkey>, /// List of writable account key locks pub writable: Vec<&'a Pubkey>, } impl TryFrom<Transaction> for SanitizedTransaction { type Error = TransactionError; fn try_from(tx: Transaction) -> Result<Self> { tx.sanitize()?; if tx.message.has_duplicates() { return Err(TransactionError::AccountLoadedTwice); } Ok(Self { message_hash: tx.message.hash(), message: SanitizedMessage::Legacy(tx.message), signatures: tx.signatures, }) } } impl SanitizedTransaction { /// Create a sanitized transaction from an unsanitized transaction. /// If the input transaction uses address maps, attempt to map the /// transaction keys to full addresses. pub fn try_create( tx: VersionedTransaction, message_hash: Hash, address_mapper: impl Fn(&v0::Message) -> Result<MappedAddresses>, ) -> Result<Self> { tx.sanitize()?; let signatures = tx.signatures; let message = match tx.message { VersionedMessage::Legacy(message) => SanitizedMessage::Legacy(message), VersionedMessage::V0(message) => SanitizedMessage::V0(MappedMessage { mapped_addresses: address_mapper(&message)?, message, }), }; if message.has_duplicates() { return Err(TransactionError::AccountLoadedTwice); } Ok(Self { message, message_hash, signatures, }) } /// Return the first signature for this transaction. /// /// Notes: /// /// Sanitized transactions must have at least one signature because the /// number of signatures must be greater than or equal to the message header /// value `num_required_signatures` which must be greater than 0 itself. pub fn signature(&self) -> &Signature { &self.signatures[0] } /// Return the list of signatures for this transaction pub fn signatures(&self) -> &[Signature] { &self.signatures } /// Return the signed message pub fn message(&self) -> &SanitizedMessage { &self.message } /// Return the hash of the signed message pub fn message_hash(&self) -> &Hash { &self.message_hash } /// Convert this sanitized transaction into a versioned transaction for /// recording in the ledger. pub fn to_versioned_transaction(&self) -> VersionedTransaction { let signatures = self.signatures.clone(); match &self.message { SanitizedMessage::V0(mapped_msg) => VersionedTransaction { signatures, message: VersionedMessage::V0(mapped_msg.message.clone()), }, SanitizedMessage::Legacy(message) => VersionedTransaction { signatures, message: VersionedMessage::Legacy(message.clone()), }, } } /// Return the list of accounts that must be locked during processing this transaction. pub fn get_account_locks(&self, demote_program_write_locks: bool) -> TransactionAccountLocks { let message = &self.message; let num_readonly_accounts = message.num_readonly_accounts(); let num_writable_accounts = message .account_keys_len() .saturating_sub(num_readonly_accounts); let mut account_locks = TransactionAccountLocks { writable: Vec::with_capacity(num_writable_accounts), readonly: Vec::with_capacity(num_readonly_accounts), }; for (i, key) in message.account_keys_iter().enumerate() { if message.is_writable(i, demote_program_write_locks) { account_locks.writable.push(key); } else { account_locks.readonly.push(key); } } account_locks } /// If the transaction uses a durable nonce, return the pubkey of the nonce account pub fn get_durable_nonce(&self) -> Option<&Pubkey> { self.message .instructions() .get(NONCED_TX_MARKER_IX_INDEX as usize) .filter( |ix| match self.message.get_account_key(ix.program_id_index as usize) { Some(program_id) => system_program::check_id(program_id), _ => false, }, ) .filter(|ix| { matches!( limited_deserialize(&ix.data), Ok(SystemInstruction::AdvanceNonceAccount) ) }) .and_then(|ix| { ix.accounts.get(0).and_then(|idx| { let idx = *idx as usize; self.message.get_account_key(idx) }) }) } /// Return the serialized message data to sign. fn message_data(&self) -> Vec<u8> { match &self.message { SanitizedMessage::Legacy(message) => message.serialize(), SanitizedMessage::V0(mapped_msg) => mapped_msg.message.serialize(), } } /// Verify the length of signatures matches the value in the message header pub fn verify_signatures_len(&self) -> bool { self.signatures.len() == self.message.header().num_required_signatures as usize } /// Verify the transaction signatures pub fn verify(&self) -> Result<()> { let message_bytes = self.message_data(); if self .signatures .iter() .zip(self.message.account_keys_iter()) .map(|(signature, pubkey)| signature.verify(pubkey.as_ref(), &message_bytes)) .any(|verified| !verified) { Err(TransactionError::SignatureFailure) } else { Ok(()) } } /// Verify the encoded secp256k1 signatures in this transaction pub fn verify_precompiles(&self, feature_set: &Arc<feature_set::FeatureSet>) -> Result<()> { for (program_id, instruction) in self.message.program_instructions_iter() { if secp256k1_program::check_id(program_id) { let instruction_datas: Vec<_> = self .message .instructions() .iter() .map(|instruction| instruction.data.as_ref()) .collect(); let data = &instruction.data; let e = verify_eth_addresses( data, &instruction_datas, feature_set.is_active(&feature_set::libsecp256k1_0_5_upgrade_enabled::id()), feature_set.is_active(&feature_set::libsecp256k1_fail_on_bad_count::id()), ); e.map_err(|_| TransactionError::InvalidAccountIndex)?; } else if crate::ed25519_program::check_id(program_id) && feature_set.is_active(&feature_set::ed25519_program_enabled::id()) { let instruction_datas: Vec<_> = self .message() .instructions() .iter() .map(|instruction| instruction.data.as_ref()) .collect(); let data = &instruction.data; let e = verify_signatures(data, &instruction_datas); e.map_err(|_| TransactionError::InvalidAccountIndex)?; } } Ok(()) } }
35.329218
98
0.587187
d6ae7b4735e05e513bdec5512cf7bc3d75a63c3b
736
// Copyright 2019 Bitwise IO, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use actix_web::{HttpRequest, HttpResponse}; pub fn index(_req: &HttpRequest) -> HttpResponse { HttpResponse::Ok().body("Hello world!") }
36.8
75
0.735054
3812e6d59f85bc7011399b777ad7f6bf2b0951e1
52,767
use std::cell::RefCell; use std::ops::Deref; use std::rc::Rc; use std::sync::Arc; use std::{mem, result}; use oxide_parser::expr::{ Assignment, Binary, BoolLiteral, Call, CallStruct, Expr, FloatLiteral, GetProp, GetStaticProp, Grouping, IntLiteral, Lambda, Match, NilLiteral, SelfStatic, Self_, SetIndex, SetProp, StrLiteral, TypeCast, Unary, Variable, VecIndex, Vec_, }; use oxide_parser::stmt::{ Block, ConstDecl, EnumDecl, FnDecl, FnSignatureDecl, ForIn, If, ImplDecl, Loop, Return, Stmt, StructDecl, TraitDecl, VarDecl, }; use oxide_parser::valtype::{TYPE_BOOL, TYPE_FLOAT, TYPE_FN, TYPE_INT, TYPE_STRUCT, TYPE_VEC}; use oxide_parser::{Ast, Token, TokenType, ValType}; use crate::env::{construct_static_name, Env, EnvVal, NameTarget, ResolvableName, ValuableName}; use crate::error::RuntimeError; use crate::val::{ try_vtype_from_val, vtype_conforms_val, Callable, Function, PropFuncVal, StmtVal, StructCallable, StructInstance, Val, VecInstance, }; use crate::{env, StreamProvider}; pub type InterpretedResult<T> = result::Result<T, RuntimeError>; #[derive(Clone)] enum Mode { /// Any statements allowed on the top-level. TopLevel, /// Only item declaration allowed on the top-level. /// Execution starts from "main". EntryPoint(Option<Box<env::Function>>), } pub struct Interpreter { streams: Box<dyn StreamProvider>, env: Rc<RefCell<Env>>, mode: Mode, args: Vec<Val>, /// Superglobals. Not used atm. #[allow(dead_code)] glob: Rc<RefCell<Env>>, } impl Interpreter { const ENTRY_POINT: &'static str = "main"; /// Returns an interpreter instance. pub(crate) fn new(stdlib: Env, streams: Box<dyn StreamProvider>, argv: &[String]) -> Self { let mut args = Vec::<Val>::with_capacity(argv.len()); for arg in argv { args.push(Val::Str(arg.clone())); } let glob = Rc::new(RefCell::new(stdlib)); let env = Rc::clone(&glob); Self { streams, glob, env, args, mode: Mode::EntryPoint(None), } } /// Interpret statements. pub fn interpret(&mut self, ast: &Ast) -> InterpretedResult<Val> { if ast.top_level() { self.mode = Mode::TopLevel; } for stmt in ast.tree() { self.evaluate_stmt(stmt)?; } match self.mode.clone() { Mode::EntryPoint(f) => { let f = f.map(|f| *f); match f { Some(f) => { self.mode = Mode::TopLevel; // FIXME: improve return value support let result = self.call_expr(f.val(), &[])?; Ok(result) } _ => Err(RuntimeError::Script( None, String::from(&format!( "No entry-point \"{}\" function found", Self::ENTRY_POINT )), )), } } Mode::TopLevel => Ok(Val::Nil), } } fn eval_enum_stmt(&mut self, stmt: &EnumDecl) -> InterpretedResult<StmtVal> { self.check_name(stmt.name())?; for (val, name) in stmt.vals().iter().enumerate() { let val_name_t = name.clone(); let name_t = stmt.name().clone(); let name = name_t.lexeme.clone(); let val_name = val_name_t.lexeme.clone(); self.env.borrow_mut().define_enum_value(env::EnumValue::new( val_name_t, Val::EnumValue(name, val_name, val), NameTarget(name_t.lexeme, true), )); } let enum_ = env::Enum::new(stmt.name().clone(), Val::Enum(stmt.name().clone())); self.env.borrow_mut().define_enum(enum_); Ok(StmtVal::None) } fn eval_struct_stmt(&mut self, stmt: &StructDecl) -> InterpretedResult<StmtVal> { self.check_name(stmt.name())?; let decl = stmt.clone(); let struct_ = env::Struct::new( stmt.name().lexeme.clone(), Val::Struct( stmt.name().clone(), *StructCallable::new_boxed( decl.props().len(), Arc::new(move |inter, args| { let impls = inter.env.borrow_mut().get_impls(decl.name()); let impls = if let Some(impls) = impls { impls } else { vec![] }; let instance = StructInstance::new(decl.clone(), impls); for (prop, param) in args { let mut instance_borrowed = instance.borrow_mut(); if let Some((_, v_type, public)) = instance_borrowed.props().get(&prop.lexeme) { let public = *public; let v_type = v_type.clone(); if vtype_conforms_val(&v_type, param) { instance_borrowed.props_mut().insert( prop.lexeme.clone(), (param.clone(), v_type, public), ); } else { return Err(RuntimeError::Type( Some(prop.clone()), format!( "Expected argument \"{}\" of type \"{}\"", v_type, param.get_type() ), )); } } else { return Err(RuntimeError::Definition( Some(prop.clone()), format!("Unknown property name \"{}\"", prop.lexeme), )); } } let instance = Val::StructInstance(instance); Ok(instance) }), ), ), ); self.env.borrow_mut().define_struct(struct_); Ok(StmtVal::None) } fn eval_impl_stmt(&mut self, stmt: &ImplDecl) -> InterpretedResult<StmtVal> { #[inline] fn validate_signature( method: &FnDecl, signature: &FnSignatureDecl, ) -> InterpretedResult<()> { if method.lambda().ret_type() != signature.ret_type() || method.lambda().params() != signature.params() { return Err(RuntimeError::Definition( Some(method.name().clone()), format!( "Mismatched signature of method \"{}\"", method.name().lexeme ), )); } Ok(()) } let decl = stmt.clone(); let (impl_name, trait_name) = if let Some(for_name) = decl.for_name() { let trait_ = self.env.borrow_mut().get(decl.impl_name())?; let trait_ = trait_.borrow_mut().deref().clone(); match trait_ { EnvVal::Trait(t) => { for signature in t.methods() { let mut found = false; for (method, _pub) in decl.methods() { if method.name() == signature.name() { validate_signature(method, signature)?; found = true; } } if !found { return Err(RuntimeError::Definition( Some(signature.name().clone()), format!( "Method \"{}\" must be implemented", signature.name().lexeme ), )); } } } _ => { return Err(RuntimeError::Definition( Some(for_name.clone()), String::from("Expected trait name"), )) } } ( for_name.clone().lexeme, Some(decl.impl_name().lexeme.clone()), ) } else { (decl.impl_name().lexeme.clone(), None) }; for (const_decl, pub_) in decl.consts() { let val = self.evaluate(const_decl.init())?; Self::validate_const_type(const_decl, &val)?; self.env .borrow_mut() .define_constant(env::Constant::with_struct( const_decl.name().clone(), val, NameTarget(stmt.impl_name().lexeme.clone(), *pub_), ))?; } for (fn_, pub_) in decl.fns() { let val = self.eval_fn_expr( fn_.lambda(), None, Some(stmt.impl_name().lexeme.clone()), false, ); self.env .borrow_mut() .define_function(env::Function::with_struct( fn_.name().clone(), val, NameTarget(stmt.impl_name().lexeme.clone(), *pub_), ))?; } for (fn_, pub_) in decl.methods() { let val = self.eval_fn_expr( fn_.lambda(), None, Some(stmt.impl_name().lexeme.clone()), true, ); let static_name = if let Some(name) = stmt.for_name().clone() { name.lexeme } else { stmt.impl_name().lexeme.clone() }; self.env .borrow_mut() .define_function(env::Function::with_struct( fn_.name().clone(), val, NameTarget(static_name, *pub_), ))?; } self.env.borrow_mut().define_impl(env::Impl::new( impl_name, trait_name, decl.methods().to_vec(), decl.fns().to_vec(), decl.consts().to_vec(), ))?; Ok(StmtVal::None) } fn eval_trait_stmt(&mut self, stmt: &TraitDecl) -> InterpretedResult<StmtVal> { self.check_name(stmt.name())?; self.env.borrow_mut().define_trait(env::Trait::new( stmt.name().lexeme.clone(), stmt.method_signs().to_vec(), Val::Trait(stmt.name().clone()), )); Ok(StmtVal::None) } fn eval_expr_stmt(&mut self, expr: &Expr) -> InterpretedResult<StmtVal> { self.evaluate(expr)?; Ok(StmtVal::None) } fn eval_var_stmt(&mut self, stmt: &VarDecl) -> InterpretedResult<StmtVal> { let val: Val = match stmt.init() { Some(init) => self.evaluate(init)?, None => Val::Uninit, }; let v_type: ValType; if stmt.v_type().is_some() { v_type = stmt.v_type().clone().unwrap(); if !vtype_conforms_val(&v_type, &val) { return Err(RuntimeError::Type( Some(stmt.name().clone()), format!( "Trying to initialise variable of type \"{}\" with value of type \"{}\"", v_type, val.get_type() ), )); } } else { v_type = match try_vtype_from_val(&val) { Some(v_type) => v_type, None => { return Err(RuntimeError::Type( Some(stmt.name().clone()), format!( "Unrecognised value type in initialisation \"{}\"", val.get_type() ), )); } } } self.env.borrow_mut().define_variable(env::Variable::new( stmt.name().lexeme.clone(), val, stmt.mutable(), v_type, )); Ok(StmtVal::None) } fn eval_const_stmt(&mut self, stmt: &ConstDecl) -> InterpretedResult<StmtVal> { let val: Val = self.evaluate(stmt.init())?; Self::validate_const_type(stmt, &val)?; self.env .borrow_mut() .define_constant(env::Constant::without_struct(stmt.name().clone(), val))?; Ok(StmtVal::None) } fn eval_if_stmt(&mut self, stmt: &If) -> InterpretedResult<StmtVal> { if Self::is_true(&self.evaluate(stmt.condition())?)? { self.evaluate_stmt(stmt.then_stmt()) } else { self.evaluate_stmt(stmt.else_stmt()) } } fn eval_loop_stmt(&mut self, stmt: &Loop) -> InterpretedResult<StmtVal> { while Self::is_true(&self.evaluate(stmt.condition())?)? { let v = self.evaluate_stmt(stmt.body())?; match v { StmtVal::None => {} StmtVal::Continue => { self.evaluate(stmt.inc())?; continue; } StmtVal::Break => { return Ok(StmtVal::None); } stmt_val @ StmtVal::Return(_) => { return Ok(stmt_val); } } self.evaluate(stmt.inc())?; } Ok(StmtVal::None) } fn eval_for_in_stmt(&mut self, stmt: &ForIn) -> InterpretedResult<StmtVal> { let iter = &self.evaluate(stmt.iter())?; match iter { Val::VecInstance(v) => { let env = Rc::new(RefCell::new(Env::with_enclosing(self.env.clone()))); env.borrow_mut().define_variable(env::Variable::new( stmt.iter_value().lexeme.clone(), Val::Uninit, true, v.borrow().val_type().clone(), )); if stmt.index_value().is_some() { env.borrow_mut().define_variable(env::Variable::new( stmt.index_value().clone().unwrap().lexeme, Val::Uninit, true, ValType::Int, )); } for (pos, val) in v.borrow().vals().iter().enumerate() { env.borrow_mut().assign(stmt.iter_value().clone(), val)?; if stmt.index_value().is_some() { env.borrow_mut() .assign(stmt.index_value().clone().unwrap(), &Val::Int(pos as isize))?; } let v = self.evaluate_block(stmt.body(), Some(env.clone()))?; match v { StmtVal::None => {} StmtVal::Continue => { continue; } StmtVal::Break => { return Ok(StmtVal::None); } stmt_val @ StmtVal::Return(_) => { return Ok(stmt_val); } } } } v => { return Err(RuntimeError::Type( Some(stmt.iter_value().clone()), format!( "Trying to iterate over a non-iterable value of type \"{}\"", v.get_type() ), )) } } Ok(StmtVal::None) } fn eval_match_expr(&mut self, expr: &Match) -> InterpretedResult<Val> { let cond: Val = self.evaluate(expr.expr())?; for arm in expr.arms() { let br_cond: Val = self.evaluate(arm.expr())?; if let Val::Bool(true) = Val::equal(&cond, &br_cond, expr.keyword())? { return self.evaluate(arm.body()); } } Err(RuntimeError::Runtime( expr.keyword().clone(), String::from("Match expression must be exhaustive"), )) } fn eval_var_expr(&mut self, expr: &Variable) -> InterpretedResult<Val> { let env_val = self.env.borrow_mut().get(expr.name())?; let env_val = env_val.borrow_mut(); use EnvVal::*; match env_val.deref() { Function(f) => Ok(f.val().clone()), Constant(c) => Ok(c.val().clone()), Variable(v) => { let val = v.val(); if let Val::Uninit = val { Err(RuntimeError::Runtime( expr.name().clone(), format!( "Trying to access an uninitialized variable \"{}\"", expr.name().lexeme ), )) } else { Ok(val) } } Enum(e) => Ok(e.val().clone()), EnumValue(e) => Ok(e.val().clone()), Struct(s) => Ok(s.val().clone()), Trait(t) => Ok(t.val().clone()), } } fn eval_assign_expr(&mut self, expr: &Assignment) -> InterpretedResult<Val> { let val = self.evaluate(expr.expr())?; let val = match expr.operator().token_type { TokenType::Equal => val, TokenType::PlusEqual | TokenType::MinusEqual | TokenType::AsteriskEqual | TokenType::SlashEqual | TokenType::ModulusEqual | TokenType::BitwiseAndEqual | TokenType::BitwiseOrEqual | TokenType::BitwiseXorEqual => { let env_val = self.env.borrow_mut().get(expr.name())?; let env_val = env_val.borrow_mut(); match env_val.deref() { EnvVal::Variable(v) => { Self::evaluate_two_operands(expr.operator(), &v.val(), &val)? } _ => { return Err(RuntimeError::Operator( expr.name().clone(), format!( "Operator \"{}\" can be used only with a variables", expr.operator().lexeme ), )) } } } _ => { return Err(RuntimeError::Operator( expr.operator().clone(), str::to_string("Unrecognised token in an assignment expression"), )) } }; self.env.borrow_mut().assign(expr.name().clone(), &val)?; Ok(val) } fn eval_fn_stmt(&mut self, fn_decl: &FnDecl) -> InterpretedResult<StmtVal> { let fn_val = self.eval_fn_expr(fn_decl.lambda(), None, None, false); let func: env::Function = env::Function::without_struct(fn_decl.name().clone(), fn_val); if fn_decl.name().lexeme == Self::ENTRY_POINT { if let Mode::EntryPoint(entry_point) = &self.mode { match entry_point { None => self.mode = Mode::EntryPoint(Some(Box::new(func))), Some(_) => { return Err(RuntimeError::Script( Some(fn_decl.name().clone()), format!( "Entry-point function \"{}\" cannot be declared twice.", Self::ENTRY_POINT ), )) } } } } else { self.env.borrow_mut().define_function(func)?; } Ok(StmtVal::None) } fn eval_fn_expr( &mut self, expr: &Lambda, self_: Option<Rc<RefCell<StructInstance>>>, self_static: Option<String>, self_argument: bool, ) -> Val { let copy = Rc::clone(&self.env); let func = Function::new( expr.clone(), Rc::new(RefCell::new(Env::with_enclosing(copy))), ); let ret_type = func.lambda().ret_type().clone(); let mut param_types: Vec<ValType> = func .lambda() .params() .to_vec() .into_iter() .map(|(_, vt, _)| vt) .collect(); let mut self_type = None; if self_argument { assert!( !self_static.is_none(), "Function cannot have \"self\" as an argument without \"self_static\" being set" ); self_type = Some(ValType::Instance(self_static.clone().unwrap())); param_types.insert(0, self_type.clone().unwrap()); } Val::Callable(*Callable::new_boxed( param_types, ret_type, Arc::new(move |inter, args| { let copy = Rc::clone(func.env()); let glob = Rc::new(RefCell::new(Env::with_enclosing(copy))); let mut env = Env::with_enclosing(glob); if self_.is_some() { let cur_instance = self_.clone().unwrap(); env.define_static_bind(cur_instance.borrow().struct_name().to_string()); env.define_self(cur_instance); } if self_static.is_some() { let static_bind = self_static.clone().unwrap(); env.define_static_bind(static_bind); } if self_argument { assert!(!(self_static.is_none() || self_type.is_none()), "Function cannot have \"self\" as an argument without \"self_static\" or \"self_type\" being set"); let cur_instance = args[0].clone(); let self_type = self_type.as_ref().unwrap(); if !vtype_conforms_val(self_type, &cur_instance) { return Err(RuntimeError::Type( None, format!( "Expected argument \"{}\" of type \"{}\", got \"{}\"", 0, self_type, cur_instance.get_type() ), )); } let cur_instance = if let Val::StructInstance(cur_instance) = cur_instance { cur_instance } else { panic!("Expected to have StructInstance as a current instance value."); }; env.define_static_bind(cur_instance.borrow().struct_name().to_string()); env.define_self(cur_instance); } for (i, param) in func.lambda().params().iter().enumerate() { let arg_index = if self_argument { i + 1 } else { i }; let arg = args[arg_index].clone(); if !vtype_conforms_val(&param.1, &arg) { return Err(RuntimeError::Type( Some(param.0.clone()), format!( "Expected argument \"{}\" of type \"{}\", got \"{}\"", arg_index, param.1, arg.get_type() ), )); } let var = env::Variable::new( param.0.lexeme.clone(), args[arg_index].clone(), param.2, param.1.clone(), ); env.define_variable(var); } let new_env = Rc::new(RefCell::new(env)); let stmt_val = inter.evaluate_block(func.lambda().body(), Some(new_env))?; let val = match stmt_val { StmtVal::None => Val::Nil, StmtVal::Return(val) => val, _ => { return Err(RuntimeError::Script( None, str::to_string("Unknown statement value"), )) } }; if vtype_conforms_val(func.lambda().ret_type(), &val) { Ok(val) } else { Err(RuntimeError::Type( None, format!( "Function must return \"{}\", got \"{}\"", func.lambda().ret_type(), val.get_type() ), )) } }), )) } fn eval_block_stmt(&mut self, stmt: &Block) -> InterpretedResult<StmtVal> { self.evaluate_block(stmt, None) } fn eval_break_stmt(&mut self) -> StmtVal { StmtVal::Break } fn eval_continue_stmt(&mut self) -> StmtVal { StmtVal::Continue } fn eval_return_stmt(&mut self, expr: &Return) -> InterpretedResult<StmtVal> { let val = match expr.expr() { Expr::EmptyExpr => Val::Nil, expr => self.evaluate(expr)?, }; Ok(StmtVal::Return(val)) } fn eval_unary_expr(&mut self, expr: &Unary) -> InterpretedResult<Val> { let un_expr: Val = self.evaluate(expr.expr())?; let val = match expr.operator().token_type { TokenType::Bang => match un_expr { Val::Bool(b) => Val::Bool(!b), val => { return Err(RuntimeError::Type( Some(expr.operator().clone()), format!( "Expected \"{}\" value, got \"{}\"", TYPE_BOOL, val.get_type() ), )) } }, TokenType::Minus => match un_expr { Val::Float(n) => Val::Float(-n), Val::Int(n) => Val::Int(-n), val => { return Err(RuntimeError::Type( Some(expr.operator().clone()), format!( "Expected \"{}\" or \"{}\" value, got \"{}\"", TYPE_INT, TYPE_FLOAT, val.get_type() ), )) } }, _ => { return Err(RuntimeError::Runtime( expr.operator().clone(), format!("Unknown unary \"{}\" operator", expr.operator().lexeme), )) } }; Ok(val) } fn eval_call_expr(&mut self, expr: &Call) -> InterpretedResult<Val> { let callee = self.evaluate(expr.callee())?; self.call_expr(&callee, expr.args()) } fn call_expr(&mut self, callee: &Val, args: &[Expr]) -> InterpretedResult<Val> { match callee { Val::Callable(callee) => { let mut eval_args = vec![]; for arg in args { eval_args.push(self.evaluate(arg)?); } if eval_args.len() != callee.arity() { return Err(RuntimeError::Definition( None, format!( "Expected {} arguments but got {}", callee.arity(), eval_args.len() ), )); } (callee.call())(self, &eval_args) } _ => Err(RuntimeError::Type( None, format!( "Callable value must be of type \"{}\", got \"{}\"", TYPE_FN, callee.get_type() ), )), } } fn eval_self_static_expr(&mut self, expr: &SelfStatic) -> InterpretedResult<Val> { let self_static = self.env.borrow_mut().get_static_bind(); match self_static { Some(s) => { let self_token = Token::from_token(expr.self_static(), s); let struct_ = self.env.borrow_mut().get(&self_token)?; let struct_ = struct_.borrow_mut().deref().clone(); match struct_ { EnvVal::Struct(s) => Ok(s.val().clone()), EnvVal::Enum(e) => Ok(e.val().clone()), _ => Err(RuntimeError::Runtime( expr.self_static().clone(), str::to_string("Wrong static bind target"), )), } } None => Err(RuntimeError::Runtime( expr.self_static().clone(), str::to_string("Value \"Self\" can be used in methods only"), )), } } fn eval_self_expr(&mut self, expr: &Self_) -> InterpretedResult<Val> { let self_ = self.env.borrow_mut().get_self(); let self_ = match self_ { Some(s) => s, None => { return Err(RuntimeError::Runtime( expr.self_().clone(), str::to_string("Value \"self\" can be used in non-static methods only"), )) } }; Ok(Val::StructInstance(self_)) } fn eval_call_struct_expr(&mut self, expr: &CallStruct) -> InterpretedResult<Val> { let callee = self.evaluate(expr.callee())?; match callee { Val::Struct(token, callee) => { let mut args = vec![]; for (token, arg) in expr.args() { args.push((token.clone(), self.evaluate(arg)?)); } if args.len() != callee.arity() { return Err(RuntimeError::Definition( Some(token), format!( "Expected {} arguments but got {}", callee.arity(), args.len() ), )); } (callee.call())(self, &args) } _ => Err(RuntimeError::Type( None, format!( "Callable value must be of type \"{}\", got \"{}\"", TYPE_STRUCT, callee.get_type() ), )), } } fn eval_vec_expr(&mut self, expr: &Vec_) -> InterpretedResult<Val> { let mut values = vec![]; let val_type = if expr.vals().is_empty() { expr.val_type().clone().unwrap_or(ValType::Any) } else if expr.val_type().is_some() { let val_type = expr.val_type().clone().unwrap(); for val_expr in expr.vals() { let val = self.evaluate(val_expr)?; if !vtype_conforms_val(&val_type, &val) { return Err(RuntimeError::Type( Some(expr.token().clone()), format!( "Expected values of type \"{}\", got \"{}\"", val_type, try_vtype_from_val(&val).unwrap() ), )); } values.push(val); } val_type } else { let mut val_type = None; for val_expr in expr.vals() { let val = self.evaluate(val_expr)?; if val_type.is_none() { val_type = try_vtype_from_val(&val); } else if !vtype_conforms_val(&val_type.clone().unwrap(), &val) { val_type = Some(ValType::Any); } values.push(val); } val_type.unwrap() }; let vec_val = Val::VecInstance(Rc::new(RefCell::new(VecInstance::new(values, val_type)))); Ok(vec_val) } fn eval_vec_index(&mut self, expr: &VecIndex) -> InterpretedResult<Val> { let val = self.evaluate(expr.callee())?; let pos = self.evaluate(expr.index())?; let pos = if let Val::Int(int) = pos { int as usize } else { return Err(RuntimeError::Type( None, format!( "Values of type \"{}\" can have indices of type \"{}\", got \"{}\"", TYPE_VEC, TYPE_INT, pos.get_type() ), )); }; match val { Val::VecInstance(vec) => vec.borrow().get(pos), _ => Err(RuntimeError::Type( None, format!( "Indexing works with values of type \"{}\", got \"{}\"", TYPE_VEC, val.get_type() ), )), } } fn eval_get_static_prop_expr(&mut self, expr: &GetStaticProp) -> InterpretedResult<Val> { fn extract_static_value( name: &(impl ResolvableName + ValuableName), public_access: bool, token: Token, static_name: &str, ) -> InterpretedResult<Val> { if let Some(NameTarget(_name, pub_)) = name.for_target() { if StructInstance::can_access(*pub_, public_access) { Ok(name.val().clone()) } else { Err(RuntimeError::Definition( Some(token), format!("Cannot access private static member \"{}\"", static_name), )) } } else { panic!("Static access value must not be a standalone one."); } } let static_caller = self.evaluate(expr.name())?; match static_caller { Val::Struct(token, _) | Val::Enum(token) | Val::Trait(token) => { let static_name = construct_static_name(&token.lexeme, &expr.prop_name().lexeme); let public_access = self.is_public_static_access(token.lexeme.clone()); let static_val = self .env .borrow_mut() .get(&Token::from_token(&token, static_name.clone()))?; let env_val = static_val.borrow_mut(); match env_val.deref() { EnvVal::EnumValue(e) => Ok(e.val().clone()), EnvVal::Constant(c) => { extract_static_value(c, public_access, token, &static_name) } EnvVal::Function(f) => { extract_static_value(f, public_access, token, &static_name) } _ => Err(RuntimeError::Definition( Some(token), String::from("Unknown static access value"), )), } } _ => Err(RuntimeError::Type( None, format!( "Unknown static callee type \"{}\"", static_caller.get_type() ), )), } } fn eval_get_prop_expr(&mut self, expr: &GetProp) -> InterpretedResult<Val> { let instance = self.evaluate(expr.name())?; match instance { Val::StructInstance(i) => { let struct_name = i.borrow().struct_name().to_string(); let public_access = self.is_public_access(struct_name); let val = i.borrow_mut().get_prop(expr.prop_name(), public_access)?; match val { PropFuncVal::Prop(val) => Ok(val), PropFuncVal::Func((func, self_, _pub)) => { Ok(self.eval_fn_expr(&func, Some(self_), None, false)) } } } Val::VecInstance(vec) => VecInstance::get_method(expr.prop_name(), vec), // FIXME: add instance methods for enums _ => Err(RuntimeError::Type( None, format!("Must be a struct instance, got \"{}\"", instance.get_type()), )), } } fn eval_set_prop_expr(&mut self, expr: &SetProp) -> InterpretedResult<Val> { let instance = self.evaluate(expr.name())?; let instance = if let Val::StructInstance(i) = instance { i } else { return Err(RuntimeError::Type( None, str::to_string("Must be a struct instance"), )); }; let val = self.evaluate(expr.expr())?; let val = match expr.operator().token_type { TokenType::Equal => val, TokenType::PlusEqual | TokenType::MinusEqual | TokenType::AsteriskEqual | TokenType::SlashEqual | TokenType::ModulusEqual | TokenType::BitwiseAndEqual | TokenType::BitwiseOrEqual | TokenType::BitwiseXorEqual => { let struct_name = instance.borrow().struct_name().to_string(); let public_access = self.is_public_access(struct_name); let r_val = instance .borrow_mut() .get_prop(expr.prop_name(), public_access)?; let r_val = match r_val { PropFuncVal::Prop(val) => val, _ => { return Err(RuntimeError::Definition( Some(expr.operator().clone()), str::to_string("Must be a property"), )) } }; Self::evaluate_two_operands(expr.operator(), &val, &r_val)? } _ => { return Err(RuntimeError::Operator( expr.operator().clone(), str::to_string("Unrecognised token in an assignment expression"), )) } }; let struct_name = instance.borrow().struct_name().to_string(); let public_access = self.is_public_access(struct_name); let mut instance = instance.borrow_mut(); instance.set_prop(expr.prop_name(), val.clone(), public_access)?; Ok(val) } fn eval_set_index_expr(&mut self, expr: &SetIndex) -> InterpretedResult<Val> { let vec = self.evaluate(expr.name())?; let vec = if let Val::VecInstance(v) = vec { v } else if let Val::Uninit = vec { return Err(RuntimeError::Runtime( expr.operator().clone(), str::to_string("Out of bounds"), )); } else { return Err(RuntimeError::Type( None, format!("Must be a vec instance, got \"{}\"", vec), )); }; let index = self.evaluate(expr.index())?; let index = if let Val::Int(int) = index { int as usize } else { return Err(RuntimeError::Type( Some(expr.operator().clone()), format!( "Values of type \"{}\" can have indices of type \"{}\", got \"{}\"", TYPE_VEC, TYPE_INT, index.get_type() ), )); }; let val = self.evaluate(expr.expr())?; let mut vec = vec.borrow_mut(); if !vtype_conforms_val(vec.val_type(), &val) { return Err(RuntimeError::Type( Some(expr.operator().clone()), format!( "Cannot assign value of type \"{}\" to a vector of type \"{}\"", val.get_type(), vec.val_type() ), )); } let val = match expr.operator().token_type { TokenType::Equal => val, TokenType::PlusEqual | TokenType::MinusEqual | TokenType::AsteriskEqual | TokenType::SlashEqual | TokenType::ModulusEqual | TokenType::BitwiseAndEqual | TokenType::BitwiseOrEqual | TokenType::BitwiseXorEqual => { let l_val = vec.get(index)?; Self::evaluate_two_operands(expr.operator(), &l_val, &val)? } _ => { return Err(RuntimeError::Operator( expr.operator().clone(), str::to_string("Unrecognised token in an assignment expression"), )) } }; vec.set(index, val.clone())?; Ok(val) } fn eval_logical_binary_expr(&mut self, expr: &Binary) -> InterpretedResult<Val> { let left = self.evaluate(expr.left())?; if let Val::Bool(l_val) = left { if expr.operator().token_type == TokenType::LogicOr { if l_val { return Ok(left); } } else if !l_val { return Ok(left); } } else { return Err(RuntimeError::Type( Some(expr.operator().clone()), format!( "Only boolean values can be used in logical expressions, got \"{}\"", left.get_type() ), )); } let right = self.evaluate(expr.right())?; if let Val::Bool(_) = right { Ok(right) } else { Err(RuntimeError::Type( Some(expr.operator().clone()), format!( "Only boolean values can be used in logical expressions, got \"{}\"", right.get_type() ), )) } } fn eval_type_cast_expr(&mut self, expr: &TypeCast) -> InterpretedResult<Val> { let left = self.evaluate(expr.left())?; let cast = left.cast_to(expr.to_type(), expr.operator())?; Ok(cast) } fn eval_binary_expr(&mut self, expr: &Binary) -> InterpretedResult<Val> { let left = self.evaluate(expr.left())?; let right = self.evaluate(expr.right())?; let val = Self::evaluate_two_operands(expr.operator(), &left, &right)?; Ok(val) } fn evaluate_two_operands(operator: &Token, lhs: &Val, rhs: &Val) -> InterpretedResult<Val> { match operator.token_type { //equality TokenType::EqualEqual => Val::equal(lhs, rhs, operator), TokenType::BangEqual => Val::not_equal(lhs, rhs, operator), // comparison TokenType::Greater => Val::greater(lhs, rhs, operator), TokenType::GreaterEqual => Val::greater_equal(lhs, rhs, operator), TokenType::Less => Val::less(lhs, rhs, operator), TokenType::LessEqual => Val::less_equal(lhs, rhs, operator), // math TokenType::Minus | TokenType::MinusEqual => Val::subtract(lhs, rhs, operator), TokenType::Plus | TokenType::PlusEqual => Val::add(lhs, rhs, operator), TokenType::Slash | TokenType::SlashEqual => Val::divide(lhs, rhs, operator), TokenType::Modulus | TokenType::ModulusEqual => Val::modulus(lhs, rhs, operator), TokenType::Asterisk | TokenType::AsteriskEqual => Val::multiply(lhs, rhs, operator), // bitwise TokenType::BitwiseAnd | TokenType::BitwiseAndEqual => { Val::bitwise_and(lhs, rhs, operator) } TokenType::BitwiseOr | TokenType::BitwiseOrEqual => Val::bitwise_or(lhs, rhs, operator), TokenType::BitwiseXor | TokenType::BitwiseXorEqual => { Val::bitwise_xor(lhs, rhs, operator) } TokenType::DotDot => Val::range(lhs, rhs, operator), TokenType::DotDotEqual => Val::range_equal(lhs, rhs, operator), _ => Err(RuntimeError::Operator( operator.clone(), format!("Unknown binary operator \"{}\"", operator.lexeme), )), } } fn eval_grouping_expr(&mut self, expr: &Grouping) -> InterpretedResult<Val> { self.evaluate(expr.expr()) } fn eval_nil_literal(&self, _expr: &NilLiteral) -> Val { Val::Nil } fn eval_bool_literal(&self, expr: &BoolLiteral) -> Val { Val::Bool(expr.0) } fn eval_int_literal(&self, expr: &IntLiteral) -> Val { Val::Int(expr.0) } fn eval_float_literal(&self, expr: &FloatLiteral) -> Val { Val::Float(expr.0) } fn eval_str_literal(&self, expr: &StrLiteral) -> Val { Val::Str(expr.0.clone()) } fn evaluate(&mut self, expr: &Expr) -> InterpretedResult<Val> { use Expr::*; let val = match expr { EmptyExpr => Val::Nil, NilLiteralExpr(literal) => self.eval_nil_literal(literal), BoolLiteralExpr(literal) => self.eval_bool_literal(literal), IntLiteralExpr(literal) => self.eval_int_literal(literal), FloatLiteralExpr(literal) => self.eval_float_literal(literal), StrLiteralExpr(literal) => self.eval_str_literal(literal), UnaryExpr(unary) => self.eval_unary_expr(unary)?, SelfStaticExpr(self_static) => self.eval_self_static_expr(self_static)?, SelfExpr(self_) => self.eval_self_expr(self_)?, CallExpr(call) => self.eval_call_expr(call)?, CallStructExpr(call_struct) => self.eval_call_struct_expr(call_struct)?, VecExpr(call_vec) => self.eval_vec_expr(call_vec)?, VecIndexExpr(vec_index) => self.eval_vec_index(vec_index)?, GetStaticExpr(get_static_prop) => self.eval_get_static_prop_expr(get_static_prop)?, GetPropExpr(get_prop) => self.eval_get_prop_expr(get_prop)?, SetPropExpr(set_prop) => self.eval_set_prop_expr(set_prop)?, SetIndexExpr(set_index) => self.eval_set_index_expr(set_index)?, BinaryExpr(binary) => self.eval_binary_expr(binary)?, LogicalBinaryExpr(l_binary) => self.eval_logical_binary_expr(l_binary)?, TypeCastExpr(type_cast) => self.eval_type_cast_expr(type_cast)?, GroupingExpr(grouping) => self.eval_grouping_expr(grouping)?, VariableExpr(variable) => self.eval_var_expr(variable)?, AssignmentExpr(assignment) => self.eval_assign_expr(assignment)?, FnExpr(lambda) => self.eval_fn_expr(lambda, None, None, false), MatchExpr(match_expr) => self.eval_match_expr(match_expr)?, }; Ok(val) } fn evaluate_stmt(&mut self, stmt: &Stmt) -> InterpretedResult<StmtVal> { match (&self.mode, stmt) { (Mode::EntryPoint(_), Stmt::Expr(_)) | (Mode::EntryPoint(_), Stmt::Let(_)) | (Mode::EntryPoint(_), Stmt::Break) | (Mode::EntryPoint(_), Stmt::Continue) | (Mode::EntryPoint(_), Stmt::Return(_)) | (Mode::EntryPoint(_), Stmt::BlockStmt(_)) | (Mode::EntryPoint(_), Stmt::IfStmt(_)) | (Mode::EntryPoint(_), Stmt::LoopStmt(_)) | (Mode::EntryPoint(_), Stmt::ForInStmt(_)) => Err(RuntimeError::Script( None, String::from( "Only item (\"const\", \"impl\", \"struct\", \"fn\", \"enum\", \ \"trait\") declarations are allowed on the top-level", ), )), (Mode::TopLevel, Stmt::Expr(expr_stmt)) => self.eval_expr_stmt(expr_stmt), (Mode::TopLevel, Stmt::Let(var_decl)) => self.eval_var_stmt(var_decl), (Mode::TopLevel, Stmt::Break) => Ok(self.eval_break_stmt()), (Mode::TopLevel, Stmt::Continue) => Ok(self.eval_continue_stmt()), (Mode::TopLevel, Stmt::Return(return_stmt)) => self.eval_return_stmt(return_stmt), (Mode::TopLevel, Stmt::BlockStmt(block)) => self.eval_block_stmt(block), (Mode::TopLevel, Stmt::IfStmt(if_stmt)) => self.eval_if_stmt(if_stmt), (Mode::TopLevel, Stmt::LoopStmt(loop_stmt)) => self.eval_loop_stmt(loop_stmt), (Mode::TopLevel, Stmt::ForInStmt(for_in_stmt)) => self.eval_for_in_stmt(for_in_stmt), (_, Stmt::Const(const_decl)) => self.eval_const_stmt(const_decl), (_, Stmt::Fn(f_decl)) => self.eval_fn_stmt(f_decl), (_, Stmt::Enum(enum_decl)) => self.eval_enum_stmt(enum_decl), (_, Stmt::Struct(struct_decl)) => self.eval_struct_stmt(struct_decl), (_, Stmt::Impl(impl_decl)) => self.eval_impl_stmt(impl_decl), (_, Stmt::Trait(trait_decl)) => self.eval_trait_stmt(trait_decl), } } fn evaluate_block( &mut self, block: &Block, env: Option<Rc<RefCell<Env>>>, ) -> InterpretedResult<StmtVal> { let new_env = if let Some(env) = env { env } else { Rc::new(RefCell::new(Env::with_enclosing(Rc::clone(&self.env)))) }; let old_env = mem::replace(&mut self.env, new_env); for stmt in block.stmts() { let stmt_val = self.evaluate_stmt(stmt)?; match &stmt_val { StmtVal::None => {} StmtVal::Break | StmtVal::Continue | StmtVal::Return(_) => { self.env = old_env; return Ok(stmt_val); } } } self.env = old_env; Ok(StmtVal::None) } fn is_public_access(&self, struct_name: String) -> bool { if let Some(self_) = self.env.borrow().get_self() { self_.borrow().struct_name() != struct_name } else { self.is_public_static_access(struct_name) } } fn is_public_static_access(&self, struct_name: String) -> bool { if let Some(static_bind) = self.env.borrow().get_static_bind() { static_bind != struct_name } else { true } } fn check_name(&self, name: &Token) -> InterpretedResult<()> { if self.env.borrow().has_definition(&name.lexeme) { Err(RuntimeError::Definition( Some(name.clone()), format!("Name \"{}\" is already in use", &name.lexeme), )) } else { Ok(()) } } fn is_true(val: &Val) -> InterpretedResult<bool> { match val { Val::Bool(true) => Ok(true), Val::Bool(false) => Ok(false), _ => Err(RuntimeError::Type( None, format!( "Trying to evaluate value of type \"{}\" as boolean", val.get_type() ), )), } } fn validate_const_type(const_decl: &ConstDecl, val: &Val) -> InterpretedResult<()> { if let Some(v_type) = const_decl.v_type() { if !vtype_conforms_val(v_type, val) { return Err(RuntimeError::Type( Some(const_decl.name().clone()), format!( "Constant type \"{}\" and init value type \"{}\" mismatch", v_type, val.get_type() ), )); } } Ok(()) } pub(crate) fn args(&self) -> &[Val] { &self.args } #[allow(clippy::borrowed_box)] pub(crate) fn streams(&self) -> &Box<dyn StreamProvider> { &self.streams } }
36.01843
176
0.453408
f715119e052f43f0417bf9461459228b7723f111
4,454
use self::kmeansclustering::KMeansProblem; use super::zle::ZleSymbol; mod kmeansclustering; #[derive(Clone, Copy)] #[non_exhaustive] /// Strategy for using Huffman Tables /// /// * [EncodingStrategy::Single] - every block of 900k uses a single huffman code table /// * [EncodingStrategy::BlockWise] - usage of code tables in 50 byte chunks is optimized using Lloyd's algorithm with given parameters pub enum EncodingStrategy { BlockWise { num_clusters: usize, num_iterations: usize, }, Single, } pub(crate) struct SinglePropabilityMap { frequencies: Vec<usize>, symbol_count: usize, } impl SinglePropabilityMap { pub(crate) fn create(size: usize) -> Self { SinglePropabilityMap { frequencies: vec![0; size + 1], symbol_count: 0, } } } impl SymbolReporter for SinglePropabilityMap { fn report_symbol(&mut self, symbol: &ZleSymbol) { self.symbol_count += 1; match symbol { ZleSymbol::RunA => { self.frequencies[0] += 1; } ZleSymbol::RunB => { self.frequencies[1] += 1; } ZleSymbol::Number(i) => { self.frequencies[*i as usize + 1] += 1; } } } fn finalize(&mut self) -> ReportedSymbols { let table = IntoFrequencyTable { frequencies: self.frequencies.clone(), }; ReportedSymbols { reported_frequencies: vec![table.clone(), table], selectors: vec![0; (self.symbol_count as f32 / 50.0).ceil() as usize], } } } pub(crate) struct BlockWisePropabilityMap { current_frequencies: Vec<u8>, pub(crate) maps: Vec<Vec<u8>>, pub(crate) size: usize, counter: usize, num_iterations: usize, num_clusters: usize, } impl BlockWisePropabilityMap { pub(crate) fn create(size: usize, num_clusters: usize, num_iterations: usize) -> Self { Self { current_frequencies: vec![0; size + 1], maps: vec![], counter: 0, size, num_clusters, num_iterations, } } } impl SymbolReporter for BlockWisePropabilityMap { fn report_symbol(&mut self, symbol: &ZleSymbol) { match symbol { ZleSymbol::RunA => { self.current_frequencies[0] += 1; } ZleSymbol::RunB => { self.current_frequencies[1] += 1; } ZleSymbol::Number(i) => { self.current_frequencies[*i as usize + 1] += 1; } } self.counter += 1; if self.counter >= 50 { self.maps.push(std::mem::replace( &mut self.current_frequencies, vec![0; self.size + 1], )); self.counter = 0; } } fn finalize(&mut self) -> ReportedSymbols { self.maps.push(std::mem::replace( &mut self.current_frequencies, vec![0; self.size + 1], )); let p = KMeansProblem { dimension: self.size + 1, data: &self.maps, num_iterations: self.num_iterations, num_clusters: self.num_clusters, }; let tables = p.solve(); ReportedSymbols { reported_frequencies: tables .means .iter() .map(|x| IntoFrequencyTable { frequencies: x.iter().map(|x| *x as usize).collect::<Vec<_>>(), }) .collect::<Vec<_>>(), selectors: tables.assignments, } } } pub(crate) trait SymbolReporter { fn report_symbol(&mut self, symbol: &ZleSymbol); fn finalize(&mut self) -> ReportedSymbols; } pub(crate) struct ReportedSymbols { pub(crate) reported_frequencies: Vec<IntoFrequencyTable>, pub(crate) selectors: Vec<u8>, } #[derive(Clone)] pub(crate) struct IntoFrequencyTable { pub(crate) frequencies: Vec<usize>, } impl IntoFrequencyTable { pub(crate) fn iterate(self) -> impl Iterator<Item = (ZleSymbol, usize)> { self.frequencies .into_iter() .enumerate() .map(|(symbol, frequency)| match symbol { 0 => (ZleSymbol::RunA, frequency), 1 => (ZleSymbol::RunB, frequency), x => (ZleSymbol::Number((x - 1) as u8), frequency), }) } }
27.8375
135
0.545577
675de74f9c23f28b8cef2366e1b7e00989126def
3,045
// This does practically the same thing that TryFrom<&str> does. // Additionally, upon implementing FromStr, you can use the `parse` method // on strings to generate an object of the implementor type. // You can read more about it at https://doc.rust-lang.org/std/str/trait.FromStr.html use std::error; use std::str::FromStr; #[derive(Debug)] struct Person { name: String, age: usize, } // Steps: // 1. If the length of the provided string is 0, an error should be returned // 2. Split the given string on the commas present in it // 3. Only 2 elements should be returned from the split, otherwise return an error // 4. Extract the first element from the split operation and use it as the name // 5. Extract the other element from the split operation and parse it into a `usize` as the age // with something like `"4".parse::<usize>()` // 5. If while extracting the name and the age something goes wrong, an error should be returned // If everything goes well, then return a Result of a Person object impl FromStr for Person { type Err = Box<dyn error::Error>; fn from_str(s: &str) -> Result<Person, Self::Err> { if s.len() == 0 { Err("Argument can not be empty")? } else { let data = s.split(',').collect::<Vec<&str>>(); let name = data[0]; if data.len() != 2 || name.len() == 0 { Err("There should be two values separates by a ','")? } else { let parsedAge = data[1].parse::<usize>(); match parsedAge { Ok(age) => Ok(Person { name: name.to_string(), age: age }), _ => { Err("Age could not be parsed")? } } } } } } fn main() { let p = "Mark,20".parse::<Person>().unwrap(); println!("{:?}", p); } #[cfg(test)] mod tests { use super::*; #[test] fn empty_input() { assert!("".parse::<Person>().is_err()); } #[test] fn good_input() { let p = "John,32".parse::<Person>(); assert!(p.is_ok()); let p = p.unwrap(); assert_eq!(p.name, "John"); assert_eq!(p.age, 32); } #[test] fn missing_age() { assert!("John,".parse::<Person>().is_err()); } #[test] fn invalid_age() { assert!("John,twenty".parse::<Person>().is_err()); } #[test] fn missing_comma_and_age() { assert!("John".parse::<Person>().is_err()); } #[test] fn missing_name() { assert!(",1".parse::<Person>().is_err()); } #[test] fn missing_name_and_age() { assert!(",".parse::<Person>().is_err()); } #[test] fn missing_name_and_invalid_age() { assert!(",one".parse::<Person>().is_err()); } #[test] fn trailing_comma() { assert!("John,32,".parse::<Person>().is_err()); } #[test] fn trailing_comma_and_some_string() { assert!("John,32,man".parse::<Person>().is_err()); } }
27.93578
96
0.546141
7576b44d322eb95476438f33c152c1fd89230209
2,483
use glutin::dpi::*; use glutin::event_loop::EventLoop; use glutin::window::WindowBuilder; use glutin::ContextBuilder; use femtovg::{renderer::OpenGl, Canvas, Color}; use tuix_core::WindowDescription; pub struct Window { pub handle: glutin::WindowedContext<glutin::PossiblyCurrent>, pub canvas: Canvas<OpenGl>, } impl Window { pub fn new(events_loop: &EventLoop<()>, window_description: &WindowDescription) -> Self { //Windows COM doesn't play nicely with winit's drag and drop right now #[cfg(target_os = "windows")] let mut window_builder = { use glutin::platform::windows::WindowBuilderExtWindows; WindowBuilder::new() .with_drag_and_drop(false) }; #[cfg(not(target_os = "windows"))] let mut window_builder = WindowBuilder::new(); window_builder = window_builder.with_title(&window_description.title) .with_inner_size(PhysicalSize::new( window_description.inner_size.width, window_description.inner_size.height, )) .with_min_inner_size(PhysicalSize::new( window_description.min_inner_size.width, window_description.min_inner_size.height, )) .with_window_icon(if let Some(icon) = &window_description.icon { Some( glutin::window::Icon::from_rgba( icon.clone(), window_description.icon_width, window_description.icon_height, ) .unwrap(), ) } else { None }); let handle = ContextBuilder::new() .with_vsync(true) // .with_srgb(true) .build_windowed(window_builder, &events_loop) .expect("Window context creation failed!"); let handle = unsafe { handle.make_current().unwrap() }; let renderer = OpenGl::new(|s| handle.context().get_proc_address(s) as *const _) .expect("Cannot create renderer"); let mut canvas = Canvas::new(renderer).expect("Cannot create canvas"); let dpi_factor = handle.window().scale_factor(); let size = handle.window().inner_size(); canvas.set_size(size.width as u32, size.height as u32, dpi_factor as f32); canvas.clear_rect( 0, 0, size.width as u32, size.height as u32, Color::rgb(255, 80, 80), ); // let height = size.height as f32; // let width = size.width as f32; Window { handle, canvas } } }
31.43038
93
0.618607
71537d4a9f07f146e67265de61b570be7dde2032
5,082
use crate::utils::{attr_by_name, in_macro, match_path_ast, span_lint_and_help}; use rustc_lint::{EarlyContext, EarlyLintPass}; use rustc_session::{declare_tool_lint, impl_lint_pass}; use rustc_span::Span; use syntax::ast::{AssocItemKind, Extern, FnSig, Item, ItemKind, Ty, TyKind}; use std::convert::TryInto; declare_clippy_lint! { /// **What it does:** Checks for excessive /// use of bools in structs. /// /// **Why is this bad?** Excessive bools in a struct /// is often a sign that it's used as a state machine, /// which is much better implemented as an enum. /// If it's not the case, excessive bools usually benefit /// from refactoring into two-variant enums for better /// readability and API. /// /// **Known problems:** None. /// /// **Example:** /// Bad: /// ```rust /// struct S { /// is_pending: bool, /// is_processing: bool, /// is_finished: bool, /// } /// ``` /// /// Good: /// ```rust /// enum S { /// Pending, /// Processing, /// Finished, /// } /// ``` pub STRUCT_EXCESSIVE_BOOLS, pedantic, "using too many bools in a struct" } declare_clippy_lint! { /// **What it does:** Checks for excessive use of /// bools in function definitions. /// /// **Why is this bad?** Calls to such functions /// are confusing and error prone, because it's /// hard to remember argument order and you have /// no type system support to back you up. Using /// two-variant enums instead of bools often makes /// API easier to use. /// /// **Known problems:** None. /// /// **Example:** /// Bad: /// ```rust,ignore /// fn f(is_round: bool, is_hot: bool) { ... } /// ``` /// /// Good: /// ```rust,ignore /// enum Shape { /// Round, /// Spiky, /// } /// /// enum Temperature { /// Hot, /// IceCold, /// } /// /// fn f(shape: Shape, temperature: Temperature) { ... } /// ``` pub FN_PARAMS_EXCESSIVE_BOOLS, pedantic, "using too many bools in function parameters" } pub struct ExcessiveBools { max_struct_bools: u64, max_fn_params_bools: u64, } impl ExcessiveBools { #[must_use] pub fn new(max_struct_bools: u64, max_fn_params_bools: u64) -> Self { Self { max_struct_bools, max_fn_params_bools, } } fn check_fn_sig(&self, cx: &EarlyContext<'_>, fn_sig: &FnSig, span: Span) { match fn_sig.header.ext { Extern::Implicit | Extern::Explicit(_) => return, Extern::None => (), } let fn_sig_bools = fn_sig .decl .inputs .iter() .filter(|param| is_bool_ty(&param.ty)) .count() .try_into() .unwrap(); if self.max_fn_params_bools < fn_sig_bools { span_lint_and_help( cx, FN_PARAMS_EXCESSIVE_BOOLS, span, &format!("more than {} bools in function parameters", self.max_fn_params_bools), "consider refactoring bools into two-variant enums", ); } } } impl_lint_pass!(ExcessiveBools => [STRUCT_EXCESSIVE_BOOLS, FN_PARAMS_EXCESSIVE_BOOLS]); fn is_bool_ty(ty: &Ty) -> bool { if let TyKind::Path(None, path) = &ty.kind { return match_path_ast(path, &["bool"]); } false } impl EarlyLintPass for ExcessiveBools { fn check_item(&mut self, cx: &EarlyContext<'_>, item: &Item) { if in_macro(item.span) { return; } match &item.kind { ItemKind::Struct(variant_data, _) => { if attr_by_name(&item.attrs, "repr").is_some() { return; } let struct_bools = variant_data .fields() .iter() .filter(|field| is_bool_ty(&field.ty)) .count() .try_into() .unwrap(); if self.max_struct_bools < struct_bools { span_lint_and_help( cx, STRUCT_EXCESSIVE_BOOLS, item.span, &format!("more than {} bools in a struct", self.max_struct_bools), "consider using a state machine or refactoring bools into two-variant enums", ); } }, ItemKind::Impl { of_trait: None, items, .. } | ItemKind::Trait(_, _, _, _, items) => { for item in items { if let AssocItemKind::Fn(_, fn_sig, _, _) = &item.kind { self.check_fn_sig(cx, fn_sig, item.span); } } }, ItemKind::Fn(_, fn_sig, _, _) => self.check_fn_sig(cx, fn_sig, item.span), _ => (), } } }
29.04
101
0.506494
21ce3d71b14f725f428d878ab0925d924c42bb78
531
extern crate piston_window; use piston_window::*; fn main() { let mut window: PistonWindow = WindowSettings::new("Hello world!", [640, 480]) .exit_on_esc(true).build().unwrap(); while let Some(event) = window.next() { window.draw_2d(&event, |context, graphics, _device| { clear([1.0; 4], graphics); rectangle([0.0, 1.0, 0.5, 1.0], [0.0, 0.0, 100.0, 100.0], context.transform, graphics); }); } }
27.947368
61
0.499058
67b8a2195b7f6377f6422828e7f6a438363093fb
3,945
// Copyright 2020-2021 IOTA Stiftung // SPDX-License-Identifier: Apache-2.0 use multiaddr::{Multiaddr, Protocol}; use serde::Deserialize; use std::net::{IpAddr, SocketAddr, ToSocketAddrs}; const DEFAULT_SESSION_TIMEOUT: u64 = 86400; const DEFAULT_USER: &str = "admin"; const DEFAULT_PASSWORD_SALT: &str = "0000000000000000000000000000000000000000000000000000000000000000"; const DEFAULT_PASSWORD_HASH: &str = "0000000000000000000000000000000000000000000000000000000000000000"; const DEFAULT_BIND_ADDRESS: &str = "/ip4/0.0.0.0/tcp/8081"; #[derive(Default, Deserialize)] pub struct DashboardAuthConfigBuilder { session_timeout: Option<u64>, user: Option<String>, password_salt: Option<String>, password_hash: Option<String>, } impl DashboardAuthConfigBuilder { pub fn new() -> Self { Self::default() } pub fn finish(self) -> DashboardAuthConfig { DashboardAuthConfig { session_timeout: self.session_timeout.unwrap_or(DEFAULT_SESSION_TIMEOUT), user: self.user.unwrap_or_else(|| DEFAULT_USER.to_owned()), password_salt: self.password_salt.unwrap_or_else(|| DEFAULT_PASSWORD_SALT.to_owned()), password_hash: self.password_hash.unwrap_or_else(|| DEFAULT_PASSWORD_HASH.to_owned()), } } } #[derive(Clone)] pub struct DashboardAuthConfig { session_timeout: u64, user: String, password_salt: String, password_hash: String, } impl DashboardAuthConfig { pub fn build() -> DashboardAuthConfigBuilder { DashboardAuthConfigBuilder::new() } pub fn session_timeout(&self) -> u64 { self.session_timeout } pub fn user(&self) -> &str { &self.user } pub fn password_salt(&self) -> &str { &self.password_salt } pub fn password_hash(&self) -> &str { &self.password_hash } } #[derive(Default, Deserialize)] pub struct DashboardConfigBuilder { bind_address: Option<Multiaddr>, auth: Option<DashboardAuthConfigBuilder>, } impl DashboardConfigBuilder { pub fn new() -> Self { Self::default() } pub fn finish(self) -> DashboardConfig { let multi_addr = self .bind_address // We made sure that the default value is valid and therefore parseable. .unwrap_or_else(|| DEFAULT_BIND_ADDRESS.parse().unwrap()); let address = multi_addr .iter() .find_map(|x| match x { Protocol::Dns(address) => Some( (address.to_string(), 0) .to_socket_addrs() .unwrap_or_else(|error| panic!("error resolving '{}':{}", address, error)) .nth(0) // Unwrapping here is fine, because to_socket-addrs() didn't return an error, // thus we can be sure that the iterator contains at least 1 element. .unwrap() .ip(), ), Protocol::Ip4(ip) => Some(IpAddr::V4(ip)), Protocol::Ip6(ip) => Some(IpAddr::V6(ip)), _ => None, }) .expect("Unsupported address"); let port = multi_addr .iter() .find_map(|x| if let Protocol::Tcp(port) = x { Some(port) } else { None }) .expect("Unsupported protocol"); DashboardConfig { bind_socket_addr: SocketAddr::new(address, port), auth: self.auth.unwrap_or_default().finish(), } } } #[derive(Clone)] pub struct DashboardConfig { bind_socket_addr: SocketAddr, auth: DashboardAuthConfig, } impl DashboardConfig { pub fn build() -> DashboardConfigBuilder { DashboardConfigBuilder::new() } pub fn bind_socket_addr(&self) -> SocketAddr { self.bind_socket_addr } pub fn auth(&self) -> &DashboardAuthConfig { &self.auth } }
29.440299
103
0.610393
e25fe343410fd620757a24533ee9fa5c47cbad75
1,951
use color_eyre::{eyre, Report}; use structopt::StructOpt; use tsunami::Tsunami; #[derive(Debug)] enum Providers { AWS, Azure, } impl std::str::FromStr for Providers { type Err = Report; fn from_str(s: &str) -> Result<Self, Self::Err> { Ok(match s { "aws" => Providers::AWS, "azure" => Providers::Azure, x => eyre::bail!("unknown provider {:?}", x), }) } } #[derive(StructOpt)] struct Opt { #[structopt(short = "p", long = "provider")] provider: Providers, #[structopt(short = "r", long = "region")] region: String, } fn wait_for_continue() { eprintln!("pausing for manual instance inspection, press enter to continue"); use std::io::prelude::*; let stdin = std::io::stdin(); let mut iterator = stdin.lock().lines(); iterator.next().unwrap().unwrap(); } // just launch an instance in the specified region and wait. #[tokio::main] async fn main() -> Result<(), Report> { let opt = Opt::from_args(); tracing_subscriber::fmt::init(); color_eyre::install()?; match opt.provider { Providers::AWS => { let mut l: tsunami::providers::aws::Launcher<_> = Default::default(); l.open_ports(); let m = tsunami::providers::aws::Setup::default() .region_with_ubuntu_ami(opt.region.parse()?) .await? .instance_type("t3.medium"); l.spawn(vec![(String::from(""), m)], None).await?; wait_for_continue(); l.terminate_all().await?; } Providers::Azure => { let mut l: tsunami::providers::azure::Launcher = Default::default(); let m = tsunami::providers::azure::Setup::default().region(opt.region.parse()?); l.spawn(vec![(String::from(""), m)], None).await?; wait_for_continue(); l.terminate_all().await?; } } Ok(()) }
27.097222
92
0.554075
e20fa72b61aff5341a51041aa2efc70f5638bb0a
2,438
extern crate tcod; mod entity; mod render; mod tile; mod map; use tcod::console::{Root, Console}; use tcod::FontLayout; use tcod::FontType; use tcod::colors; use tcod::input::Key; use tcod::input::KeyCode; use entity::Entity; use map::GameMap; enum Action { MovePlayer(i32, i32), Fullscreen, Exit, } fn handle_keys(key: Option<Key>) -> Option<Action> { match key { Some(Key { code: KeyCode::Left, .. }) => Some(Action::MovePlayer(-1, 0)), Some(Key { code: KeyCode::Right, .. }) => Some(Action::MovePlayer(1, 0)), Some(Key { code: KeyCode::Up, .. }) => Some(Action::MovePlayer(0, -1)), Some(Key { code: KeyCode::Down, .. }) => Some(Action::MovePlayer(0, 1)), Some(Key { code: KeyCode::Escape, .. }) => Some(Action::Exit), Some(Key { code: KeyCode::Enter, alt: true, .. }) => Some(Action::Fullscreen), _ => None } } fn main() { let screen_width = 80; let screen_height = 50; let map_width = 80; let map_height = 45; let mut entities = vec![ Entity::new(screen_width / 2, screen_height / 2, '@', colors::WHITE), Entity::new(screen_width / 2 - 5, screen_height / 2, '@', colors::YELLOW), ]; let player_entity_index: usize = 0; let mut root = Root::initializer() .size(screen_width, screen_height) .title("/r/roguelikedev Tutorial Part2") .font("arial10x10.png", FontLayout::Tcod) .font_type(FontType::Greyscale) .init(); root.set_default_foreground(colors::WHITE); let mut map = GameMap::new(map_width, map_height); while !root.window_closed() { ::render::render_all(&entities, &map,&mut root, screen_width, screen_height); root.flush(); ::render::clear_all(&entities, &mut root); let action = handle_keys(root.check_for_keypress(tcod::input::KEY_PRESSED)); match action { Some(Action::Exit) => break, Some(Action::Fullscreen) => { let is_fullscreen = root.is_fullscreen(); root.set_fullscreen(!is_fullscreen) } Some(Action::MovePlayer(move_x, move_y)) => { let mut player = &mut entities[player_entity_index]; if !map.is_move_blocked(player.pos.0 + move_x, player.pos.1 + move_y) { player.mv((move_x, move_y)) } } _ => () } } }
29.731707
87
0.576702
9c831a649551c7578b6d26f9497651e4e66e2790
132,379
#![doc = "generated by AutoRust 0.1.0"] #![allow(non_camel_case_types)] #![allow(unused_imports)] use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BigDataPoolResourceInfoListResult { #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<BigDataPoolResourceInfo>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BigDataPoolPatchInfo { #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BigDataPoolResourceInfo { #[serde(flatten)] pub tracked_resource: TrackedResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<BigDataPoolResourceProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BigDataPoolResourceProperties { #[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")] pub provisioning_state: Option<String>, #[serde(rename = "autoScale", default, skip_serializing_if = "Option::is_none")] pub auto_scale: Option<AutoScaleProperties>, #[serde(rename = "creationDate", default, skip_serializing_if = "Option::is_none")] pub creation_date: Option<String>, #[serde(rename = "autoPause", default, skip_serializing_if = "Option::is_none")] pub auto_pause: Option<AutoPauseProperties>, #[serde(rename = "isComputeIsolationEnabled", default, skip_serializing_if = "Option::is_none")] pub is_compute_isolation_enabled: Option<bool>, #[serde(rename = "sessionLevelPackagesEnabled", default, skip_serializing_if = "Option::is_none")] pub session_level_packages_enabled: Option<bool>, #[serde(rename = "cacheSize", default, skip_serializing_if = "Option::is_none")] pub cache_size: Option<i32>, #[serde(rename = "dynamicExecutorAllocation", default, skip_serializing_if = "Option::is_none")] pub dynamic_executor_allocation: Option<DynamicExecutorAllocation>, #[serde(rename = "sparkEventsFolder", default, skip_serializing_if = "Option::is_none")] pub spark_events_folder: Option<String>, #[serde(rename = "nodeCount", default, skip_serializing_if = "Option::is_none")] pub node_count: Option<i32>, #[serde(rename = "libraryRequirements", default, skip_serializing_if = "Option::is_none")] pub library_requirements: Option<LibraryRequirements>, #[serde(rename = "customLibraries", default, skip_serializing_if = "Vec::is_empty")] pub custom_libraries: Vec<LibraryInfo>, #[serde(rename = "sparkConfigProperties", default, skip_serializing_if = "Option::is_none")] pub spark_config_properties: Option<LibraryRequirements>, #[serde(rename = "sparkVersion", default, skip_serializing_if = "Option::is_none")] pub spark_version: Option<String>, #[serde(rename = "defaultSparkLogFolder", default, skip_serializing_if = "Option::is_none")] pub default_spark_log_folder: Option<String>, #[serde(rename = "nodeSize", default, skip_serializing_if = "Option::is_none")] pub node_size: Option<big_data_pool_resource_properties::NodeSize>, #[serde(rename = "nodeSizeFamily", default, skip_serializing_if = "Option::is_none")] pub node_size_family: Option<big_data_pool_resource_properties::NodeSizeFamily>, #[serde(rename = "lastSucceededTimestamp", default, skip_serializing_if = "Option::is_none")] pub last_succeeded_timestamp: Option<String>, } pub mod big_data_pool_resource_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum NodeSize { None, Small, Medium, Large, XLarge, #[serde(rename = "XXLarge")] XxLarge, #[serde(rename = "XXXLarge")] XxxLarge, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum NodeSizeFamily { None, MemoryOptimized, #[serde(rename = "HardwareAcceleratedFPGA")] HardwareAcceleratedFpga, #[serde(rename = "HardwareAcceleratedGPU")] HardwareAcceleratedGpu, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AutoScaleProperties { #[serde(rename = "minNodeCount", default, skip_serializing_if = "Option::is_none")] pub min_node_count: Option<i32>, #[serde(default, skip_serializing_if = "Option::is_none")] pub enabled: Option<bool>, #[serde(rename = "maxNodeCount", default, skip_serializing_if = "Option::is_none")] pub max_node_count: Option<i32>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AutoPauseProperties { #[serde(rename = "delayInMinutes", default, skip_serializing_if = "Option::is_none")] pub delay_in_minutes: Option<i32>, #[serde(default, skip_serializing_if = "Option::is_none")] pub enabled: Option<bool>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DynamicExecutorAllocation { #[serde(default, skip_serializing_if = "Option::is_none")] pub enabled: Option<bool>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct LibraryInfo { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub path: Option<String>, #[serde(rename = "containerName", default, skip_serializing_if = "Option::is_none")] pub container_name: Option<String>, #[serde(rename = "uploadedTimestamp", default, skip_serializing_if = "Option::is_none")] pub uploaded_timestamp: Option<String>, #[serde(rename = "type", default, skip_serializing_if = "Option::is_none")] pub type_: Option<String>, #[serde(rename = "provisioningStatus", default, skip_serializing_if = "Option::is_none")] pub provisioning_status: Option<String>, #[serde(rename = "creatorId", default, skip_serializing_if = "Option::is_none")] pub creator_id: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct LibraryRequirements { #[serde(default, skip_serializing_if = "Option::is_none")] pub time: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub content: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub filename: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AadAdminProperties { #[serde(rename = "tenantId", default, skip_serializing_if = "Option::is_none")] pub tenant_id: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub login: Option<String>, #[serde(rename = "administratorType", default, skip_serializing_if = "Option::is_none")] pub administrator_type: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub sid: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct WorkspaceInfoListResult { #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<Workspace>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DataLakeStorageAccountDetails { #[serde(rename = "accountUrl", default, skip_serializing_if = "Option::is_none")] pub account_url: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub filesystem: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EncryptionDetails { #[serde(rename = "doubleEncryptionEnabled", default, skip_serializing_if = "Option::is_none")] pub double_encryption_enabled: Option<bool>, #[serde(default, skip_serializing_if = "Option::is_none")] pub cmk: Option<CustomerManagedKeyDetails>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CustomerManagedKeyDetails { #[serde(default, skip_serializing_if = "Option::is_none")] pub status: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub key: Option<WorkspaceKeyDetails>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct WorkspaceKeyDetails { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(rename = "keyVaultUrl", default, skip_serializing_if = "Option::is_none")] pub key_vault_url: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ManagedIdentity { #[serde(rename = "principalId", default, skip_serializing_if = "Option::is_none")] pub principal_id: Option<String>, #[serde(rename = "tenantId", default, skip_serializing_if = "Option::is_none")] pub tenant_id: Option<String>, #[serde(rename = "type", default, skip_serializing_if = "Option::is_none")] pub type_: Option<managed_identity::Type>, } pub mod managed_identity { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Type { None, SystemAssigned, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct VirtualNetworkProfile { #[serde(rename = "computeSubnetId", default, skip_serializing_if = "Option::is_none")] pub compute_subnet_id: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ManagedVirtualNetworkSettings { #[serde(rename = "preventDataExfiltration", default, skip_serializing_if = "Option::is_none")] pub prevent_data_exfiltration: Option<bool>, #[serde(rename = "linkedAccessCheckOnTargetResource", default, skip_serializing_if = "Option::is_none")] pub linked_access_check_on_target_resource: Option<bool>, #[serde(rename = "allowedAadTenantIdsForLinking", default, skip_serializing_if = "Vec::is_empty")] pub allowed_aad_tenant_ids_for_linking: Vec<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct WorkspaceRepositoryConfiguration { #[serde(rename = "type", default, skip_serializing_if = "Option::is_none")] pub type_: Option<String>, #[serde(rename = "hostName", default, skip_serializing_if = "Option::is_none")] pub host_name: Option<String>, #[serde(rename = "accountName", default, skip_serializing_if = "Option::is_none")] pub account_name: Option<String>, #[serde(rename = "projectName", default, skip_serializing_if = "Option::is_none")] pub project_name: Option<String>, #[serde(rename = "repositoryName", default, skip_serializing_if = "Option::is_none")] pub repository_name: Option<String>, #[serde(rename = "collaborationBranch", default, skip_serializing_if = "Option::is_none")] pub collaboration_branch: Option<String>, #[serde(rename = "rootFolder", default, skip_serializing_if = "Option::is_none")] pub root_folder: Option<String>, #[serde(rename = "lastCommitId", default, skip_serializing_if = "Option::is_none")] pub last_commit_id: Option<String>, #[serde(rename = "tenantId", default, skip_serializing_if = "Option::is_none")] pub tenant_id: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PurviewConfiguration { #[serde(rename = "purviewResourceId", default, skip_serializing_if = "Option::is_none")] pub purview_resource_id: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct WorkspaceAadAdminInfo { #[serde(flatten)] pub proxy_resource: ProxyResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<AadAdminProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Workspace { #[serde(flatten)] pub tracked_resource: TrackedResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<WorkspaceProperties>, #[serde(default, skip_serializing_if = "Option::is_none")] pub identity: Option<ManagedIdentity>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct WorkspaceProperties { #[serde(rename = "defaultDataLakeStorage", default, skip_serializing_if = "Option::is_none")] pub default_data_lake_storage: Option<DataLakeStorageAccountDetails>, #[serde(rename = "sqlAdministratorLoginPassword", default, skip_serializing_if = "Option::is_none")] pub sql_administrator_login_password: Option<String>, #[serde(rename = "managedResourceGroupName", default, skip_serializing_if = "Option::is_none")] pub managed_resource_group_name: Option<String>, #[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")] pub provisioning_state: Option<String>, #[serde(rename = "sqlAdministratorLogin", default, skip_serializing_if = "Option::is_none")] pub sql_administrator_login: Option<String>, #[serde(rename = "virtualNetworkProfile", default, skip_serializing_if = "Option::is_none")] pub virtual_network_profile: Option<VirtualNetworkProfile>, #[serde(rename = "connectivityEndpoints", default, skip_serializing_if = "Option::is_none")] pub connectivity_endpoints: Option<serde_json::Value>, #[serde(rename = "managedVirtualNetwork", default, skip_serializing_if = "Option::is_none")] pub managed_virtual_network: Option<String>, #[serde(rename = "privateEndpointConnections", default, skip_serializing_if = "Vec::is_empty")] pub private_endpoint_connections: Vec<PrivateEndpointConnection>, #[serde(default, skip_serializing_if = "Option::is_none")] pub encryption: Option<EncryptionDetails>, #[serde(rename = "workspaceUID", default, skip_serializing_if = "Option::is_none")] pub workspace_uid: Option<String>, #[serde(rename = "extraProperties", default, skip_serializing_if = "Option::is_none")] pub extra_properties: Option<serde_json::Value>, #[serde(rename = "managedVirtualNetworkSettings", default, skip_serializing_if = "Option::is_none")] pub managed_virtual_network_settings: Option<ManagedVirtualNetworkSettings>, #[serde(rename = "workspaceRepositoryConfiguration", default, skip_serializing_if = "Option::is_none")] pub workspace_repository_configuration: Option<WorkspaceRepositoryConfiguration>, #[serde(rename = "purviewConfiguration", default, skip_serializing_if = "Option::is_none")] pub purview_configuration: Option<PurviewConfiguration>, #[serde(rename = "adlaResourceId", default, skip_serializing_if = "Option::is_none")] pub adla_resource_id: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct WorkspacePatchInfo { #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, #[serde(default, skip_serializing_if = "Option::is_none")] pub identity: Option<ManagedIdentity>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<WorkspacePatchProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct WorkspacePatchProperties { #[serde(rename = "sqlAdministratorLoginPassword", default, skip_serializing_if = "Option::is_none")] pub sql_administrator_login_password: Option<String>, #[serde(rename = "managedVirtualNetworkSettings", default, skip_serializing_if = "Option::is_none")] pub managed_virtual_network_settings: Option<ManagedVirtualNetworkSettings>, #[serde(rename = "workspaceRepositoryConfiguration", default, skip_serializing_if = "Option::is_none")] pub workspace_repository_configuration: Option<WorkspaceRepositoryConfiguration>, #[serde(rename = "purviewConfiguration", default, skip_serializing_if = "Option::is_none")] pub purview_configuration: Option<PurviewConfiguration>, #[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")] pub provisioning_state: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub encryption: Option<EncryptionDetails>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ManagedIdentitySqlControlSettingsModel { #[serde(flatten)] pub proxy_resource: ProxyResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<managed_identity_sql_control_settings_model::Properties>, } pub mod managed_identity_sql_control_settings_model { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Properties { #[serde(rename = "grantSqlControlToManagedIdentity", default, skip_serializing_if = "Option::is_none")] pub grant_sql_control_to_managed_identity: Option<properties::GrantSqlControlToManagedIdentity>, } pub mod properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct GrantSqlControlToManagedIdentity { #[serde(rename = "desiredState", default, skip_serializing_if = "Option::is_none")] pub desired_state: Option<grant_sql_control_to_managed_identity::DesiredState>, #[serde(rename = "actualState", default, skip_serializing_if = "Option::is_none")] pub actual_state: Option<grant_sql_control_to_managed_identity::ActualState>, } pub mod grant_sql_control_to_managed_identity { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum DesiredState { Enabled, Disabled, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ActualState { Enabling, Enabled, Disabling, Disabled, Unknown, } } } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RestorableDroppedSqlPoolProperties { #[serde(rename = "databaseName", default, skip_serializing_if = "Option::is_none")] pub database_name: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub edition: Option<String>, #[serde(rename = "maxSizeBytes", default, skip_serializing_if = "Option::is_none")] pub max_size_bytes: Option<String>, #[serde(rename = "serviceLevelObjective", default, skip_serializing_if = "Option::is_none")] pub service_level_objective: Option<String>, #[serde(rename = "elasticPoolName", default, skip_serializing_if = "Option::is_none")] pub elastic_pool_name: Option<String>, #[serde(rename = "creationDate", default, skip_serializing_if = "Option::is_none")] pub creation_date: Option<String>, #[serde(rename = "deletionDate", default, skip_serializing_if = "Option::is_none")] pub deletion_date: Option<String>, #[serde(rename = "earliestRestoreDate", default, skip_serializing_if = "Option::is_none")] pub earliest_restore_date: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RestorableDroppedSqlPool { #[serde(flatten)] pub proxy_resource: ProxyResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub location: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<RestorableDroppedSqlPoolProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RestorableDroppedSqlPoolListResult { pub value: Vec<RestorableDroppedSqlPool>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CheckNameAvailabilityRequest { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(rename = "type", default, skip_serializing_if = "Option::is_none")] pub type_: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CheckNameAvailabilityResponse { #[serde(default, skip_serializing_if = "Option::is_none")] pub message: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub available: Option<bool>, #[serde(default, skip_serializing_if = "Option::is_none")] pub reason: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IpFirewallRuleInfo { #[serde(flatten)] pub proxy_resource: ProxyResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<IpFirewallRuleProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ReplaceAllIpFirewallRulesRequest { #[serde(rename = "ipFirewallRules", default, skip_serializing_if = "Option::is_none")] pub ip_firewall_rules: Option<serde_json::Value>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IpFirewallRuleProperties { #[serde(rename = "endIpAddress", default, skip_serializing_if = "Option::is_none")] pub end_ip_address: Option<String>, #[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")] pub provisioning_state: Option<ip_firewall_rule_properties::ProvisioningState>, #[serde(rename = "startIpAddress", default, skip_serializing_if = "Option::is_none")] pub start_ip_address: Option<String>, } pub mod ip_firewall_rule_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ProvisioningState { Provisioning, Succeeded, Deleting, Failed, DeleteError, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IpFirewallRuleInfoListResult { #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<IpFirewallRuleInfo>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ReplaceAllFirewallRulesOperationResponse { #[serde(rename = "operationId", default, skip_serializing_if = "Option::is_none")] pub operation_id: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AvailableRpOperation { #[serde(default, skip_serializing_if = "Option::is_none")] pub display: Option<AvailableRpOperationDisplayInfo>, #[serde(rename = "isDataAction", default, skip_serializing_if = "Option::is_none")] pub is_data_action: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<OperationMetaPropertyInfo>, #[serde(default, skip_serializing_if = "Option::is_none")] pub origin: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OperationResource { #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub status: Option<operation_resource::Status>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<serde_json::Value>, #[serde(default, skip_serializing_if = "Option::is_none")] pub error: Option<ErrorDetail>, #[serde(rename = "startTime", default, skip_serializing_if = "Option::is_none")] pub start_time: Option<String>, #[serde(rename = "endTime", default, skip_serializing_if = "Option::is_none")] pub end_time: Option<String>, #[serde(rename = "percentComplete", default, skip_serializing_if = "Option::is_none")] pub percent_complete: Option<f64>, } pub mod operation_resource { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Status { InProgress, Succeeded, Failed, Canceled, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AvailableRpOperationDisplayInfo { #[serde(default, skip_serializing_if = "Option::is_none")] pub description: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub resource: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub provider: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub operation: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OperationMetaPropertyInfo { #[serde(rename = "serviceSpecification", default, skip_serializing_if = "Option::is_none")] pub service_specification: Option<OperationMetaServiceSpecification>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OperationMetaServiceSpecification { #[serde(rename = "metricSpecifications", default, skip_serializing_if = "Vec::is_empty")] pub metric_specifications: Vec<OperationMetaMetricSpecification>, #[serde(rename = "logSpecifications", default, skip_serializing_if = "Vec::is_empty")] pub log_specifications: Vec<OperationMetaLogSpecification>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OperationMetaMetricSpecification { #[serde(rename = "sourceMdmNamespace", default, skip_serializing_if = "Option::is_none")] pub source_mdm_namespace: Option<String>, #[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")] pub display_name: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(rename = "aggregationType", default, skip_serializing_if = "Option::is_none")] pub aggregation_type: Option<String>, #[serde(rename = "displayDescription", default, skip_serializing_if = "Option::is_none")] pub display_description: Option<String>, #[serde(rename = "sourceMdmAccount", default, skip_serializing_if = "Option::is_none")] pub source_mdm_account: Option<String>, #[serde(rename = "enableRegionalMdmAccount", default, skip_serializing_if = "Option::is_none")] pub enable_regional_mdm_account: Option<bool>, #[serde(default, skip_serializing_if = "Option::is_none")] pub unit: Option<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub dimensions: Vec<OperationMetaMetricDimensionSpecification>, #[serde(rename = "supportsInstanceLevelAggregation", default, skip_serializing_if = "Option::is_none")] pub supports_instance_level_aggregation: Option<bool>, #[serde(rename = "metricFilterPattern", default, skip_serializing_if = "Option::is_none")] pub metric_filter_pattern: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OperationMetaLogSpecification { #[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")] pub display_name: Option<String>, #[serde(rename = "blobDuration", default, skip_serializing_if = "Option::is_none")] pub blob_duration: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OperationMetaMetricDimensionSpecification { #[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")] pub display_name: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(rename = "toBeExportedForShoebox", default, skip_serializing_if = "Option::is_none")] pub to_be_exported_for_shoebox: Option<bool>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SqlPoolInfoListResult { #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<SqlPool>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SqlPool { #[serde(flatten)] pub tracked_resource: TrackedResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub sku: Option<Sku>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<SqlPoolResourceProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SqlPoolPatchInfo { #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, #[serde(default, skip_serializing_if = "Option::is_none")] pub location: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub sku: Option<Sku>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<SqlPoolResourceProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Sku { #[serde(default, skip_serializing_if = "Option::is_none")] pub tier: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub capacity: Option<i32>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SqlPoolResourceProperties { #[serde(rename = "maxSizeBytes", default, skip_serializing_if = "Option::is_none")] pub max_size_bytes: Option<i64>, #[serde(default, skip_serializing_if = "Option::is_none")] pub collation: Option<String>, #[serde(rename = "sourceDatabaseId", default, skip_serializing_if = "Option::is_none")] pub source_database_id: Option<String>, #[serde(rename = "recoverableDatabaseId", default, skip_serializing_if = "Option::is_none")] pub recoverable_database_id: Option<String>, #[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")] pub provisioning_state: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub status: Option<String>, #[serde(rename = "restorePointInTime", default, skip_serializing_if = "Option::is_none")] pub restore_point_in_time: Option<String>, #[serde(rename = "createMode", default, skip_serializing_if = "Option::is_none")] pub create_mode: Option<sql_pool_resource_properties::CreateMode>, #[serde(rename = "creationDate", default, skip_serializing_if = "Option::is_none")] pub creation_date: Option<String>, #[serde(rename = "storageAccountType", default, skip_serializing_if = "Option::is_none")] pub storage_account_type: Option<sql_pool_resource_properties::StorageAccountType>, } pub mod sql_pool_resource_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum CreateMode { Default, PointInTimeRestore, Recovery, Restore, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum StorageAccountType { #[serde(rename = "GRS")] Grs, #[serde(rename = "LRS")] Lrs, #[serde(rename = "ZRS")] Zrs, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct MetadataSyncConfig { #[serde(flatten)] pub proxy_resource: ProxyResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<metadata_sync_config::Properties>, } pub mod metadata_sync_config { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Properties { #[serde(default, skip_serializing_if = "Option::is_none")] pub enabled: Option<bool>, #[serde(rename = "syncIntervalInMinutes", default, skip_serializing_if = "Option::is_none")] pub sync_interval_in_minutes: Option<i32>, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct GeoBackupPolicyProperties { pub state: geo_backup_policy_properties::State, #[serde(rename = "storageType", default, skip_serializing_if = "Option::is_none")] pub storage_type: Option<String>, } pub mod geo_backup_policy_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum State { Disabled, Enabled, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct GeoBackupPolicy { #[serde(flatten)] pub proxy_resource: ProxyResource, pub properties: GeoBackupPolicyProperties, #[serde(default, skip_serializing_if = "Option::is_none")] pub kind: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub location: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct GeoBackupPolicyListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<GeoBackupPolicy>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct TopQueries { #[serde(rename = "aggregationFunction", default, skip_serializing_if = "Option::is_none")] pub aggregation_function: Option<top_queries::AggregationFunction>, #[serde(rename = "executionType", default, skip_serializing_if = "Option::is_none")] pub execution_type: Option<top_queries::ExecutionType>, #[serde(rename = "intervalType", default, skip_serializing_if = "Option::is_none")] pub interval_type: Option<String>, #[serde(rename = "numberOfTopQueries", default, skip_serializing_if = "Option::is_none")] pub number_of_top_queries: Option<f64>, #[serde(rename = "observationStartTime", default, skip_serializing_if = "Option::is_none")] pub observation_start_time: Option<String>, #[serde(rename = "observationEndTime", default, skip_serializing_if = "Option::is_none")] pub observation_end_time: Option<String>, #[serde(rename = "observedMetric", default, skip_serializing_if = "Option::is_none")] pub observed_metric: Option<top_queries::ObservedMetric>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub queries: Vec<QueryStatistic>, } pub mod top_queries { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum AggregationFunction { #[serde(rename = "min")] Min, #[serde(rename = "max")] Max, #[serde(rename = "avg")] Avg, #[serde(rename = "sum")] Sum, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ExecutionType { #[serde(rename = "any")] Any, #[serde(rename = "regular")] Regular, #[serde(rename = "irregular")] Irregular, #[serde(rename = "aborted")] Aborted, #[serde(rename = "exception")] Exception, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ObservedMetric { #[serde(rename = "cpu")] Cpu, #[serde(rename = "io")] Io, #[serde(rename = "logio")] Logio, #[serde(rename = "duration")] Duration, #[serde(rename = "executionCount")] ExecutionCount, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct QueryStatistic { #[serde(rename = "queryId", default, skip_serializing_if = "Option::is_none")] pub query_id: Option<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub intervals: Vec<QueryInterval>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct QueryInterval { #[serde(rename = "intervalStartTime", default, skip_serializing_if = "Option::is_none")] pub interval_start_time: Option<String>, #[serde(rename = "executionCount", default, skip_serializing_if = "Option::is_none")] pub execution_count: Option<f64>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub metrics: Vec<QueryMetric>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct QueryMetric { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")] pub display_name: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub unit: Option<query_metric::Unit>, #[serde(default, skip_serializing_if = "Option::is_none")] pub value: Option<f64>, } pub mod query_metric { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Unit { #[serde(rename = "percentage")] Percentage, #[serde(rename = "KB")] Kb, #[serde(rename = "microseconds")] Microseconds, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct TopQueriesListResult { pub value: Vec<TopQueries>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DataWarehouseUserActivitiesProperties { #[serde(rename = "activeQueriesCount", default, skip_serializing_if = "Option::is_none")] pub active_queries_count: Option<i32>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DataWarehouseUserActivities { #[serde(flatten)] pub proxy_resource: ProxyResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<DataWarehouseUserActivitiesProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RestorePointListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<RestorePoint>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RestorePointProperties { #[serde(rename = "restorePointType", default, skip_serializing_if = "Option::is_none")] pub restore_point_type: Option<restore_point_properties::RestorePointType>, #[serde(rename = "earliestRestoreDate", default, skip_serializing_if = "Option::is_none")] pub earliest_restore_date: Option<String>, #[serde(rename = "restorePointCreationDate", default, skip_serializing_if = "Option::is_none")] pub restore_point_creation_date: Option<String>, #[serde(rename = "restorePointLabel", default, skip_serializing_if = "Option::is_none")] pub restore_point_label: Option<String>, } pub mod restore_point_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum RestorePointType { #[serde(rename = "CONTINUOUS")] Continuous, #[serde(rename = "DISCRETE")] Discrete, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RestorePoint { #[serde(flatten)] pub proxy_resource: ProxyResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub location: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<RestorePointProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ReplicationLinkProperties { #[serde(rename = "isTerminationAllowed", default, skip_serializing_if = "Option::is_none")] pub is_termination_allowed: Option<bool>, #[serde(rename = "replicationMode", default, skip_serializing_if = "Option::is_none")] pub replication_mode: Option<String>, #[serde(rename = "partnerServer", default, skip_serializing_if = "Option::is_none")] pub partner_server: Option<String>, #[serde(rename = "partnerDatabase", default, skip_serializing_if = "Option::is_none")] pub partner_database: Option<String>, #[serde(rename = "partnerLocation", default, skip_serializing_if = "Option::is_none")] pub partner_location: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub role: Option<replication_link_properties::Role>, #[serde(rename = "partnerRole", default, skip_serializing_if = "Option::is_none")] pub partner_role: Option<replication_link_properties::PartnerRole>, #[serde(rename = "startTime", default, skip_serializing_if = "Option::is_none")] pub start_time: Option<String>, #[serde(rename = "percentComplete", default, skip_serializing_if = "Option::is_none")] pub percent_complete: Option<i32>, #[serde(rename = "replicationState", default, skip_serializing_if = "Option::is_none")] pub replication_state: Option<replication_link_properties::ReplicationState>, } pub mod replication_link_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Role { Primary, Secondary, NonReadableSecondary, Source, Copy, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum PartnerRole { Primary, Secondary, NonReadableSecondary, Source, Copy, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ReplicationState { #[serde(rename = "PENDING")] Pending, #[serde(rename = "SEEDING")] Seeding, #[serde(rename = "CATCH_UP")] CatchUp, #[serde(rename = "SUSPENDED")] Suspended, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ReplicationLink { #[serde(flatten)] pub proxy_resource: ProxyResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub location: Option<String>, #[serde(rename = "type", default, skip_serializing_if = "Option::is_none")] pub type_: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<ReplicationLinkProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ReplicationLinkListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<ReplicationLink>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct MaintenanceWindowOptionsProperties { #[serde(rename = "isEnabled", default, skip_serializing_if = "Option::is_none")] pub is_enabled: Option<bool>, #[serde(rename = "maintenanceWindowCycles", default, skip_serializing_if = "Vec::is_empty")] pub maintenance_window_cycles: Vec<MaintenanceWindowTimeRange>, #[serde(rename = "minDurationInMinutes", default, skip_serializing_if = "Option::is_none")] pub min_duration_in_minutes: Option<i32>, #[serde(rename = "defaultDurationInMinutes", default, skip_serializing_if = "Option::is_none")] pub default_duration_in_minutes: Option<i32>, #[serde(rename = "minCycles", default, skip_serializing_if = "Option::is_none")] pub min_cycles: Option<i32>, #[serde(rename = "timeGranularityInMinutes", default, skip_serializing_if = "Option::is_none")] pub time_granularity_in_minutes: Option<i32>, #[serde( rename = "allowMultipleMaintenanceWindowsPerCycle", default, skip_serializing_if = "Option::is_none" )] pub allow_multiple_maintenance_windows_per_cycle: Option<bool>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct MaintenanceWindowOptions { #[serde(flatten)] pub proxy_resource: ProxyResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<MaintenanceWindowOptionsProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct MaintenanceWindowsProperties { #[serde(rename = "timeRanges", default, skip_serializing_if = "Vec::is_empty")] pub time_ranges: Vec<MaintenanceWindowTimeRange>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct MaintenanceWindowTimeRange { #[serde(rename = "dayOfWeek", default, skip_serializing_if = "Option::is_none")] pub day_of_week: Option<maintenance_window_time_range::DayOfWeek>, #[serde(rename = "startTime", default, skip_serializing_if = "Option::is_none")] pub start_time: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub duration: Option<String>, } pub mod maintenance_window_time_range { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum DayOfWeek { Sunday, Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct MaintenanceWindows { #[serde(flatten)] pub proxy_resource: ProxyResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<MaintenanceWindowsProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct TransparentDataEncryptionProperties { #[serde(default, skip_serializing_if = "Option::is_none")] pub status: Option<transparent_data_encryption_properties::Status>, } pub mod transparent_data_encryption_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Status { Enabled, Disabled, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct TransparentDataEncryption { #[serde(flatten)] pub proxy_resource: ProxyResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub location: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<TransparentDataEncryptionProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct TransparentDataEncryptionListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<TransparentDataEncryption>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SqlPoolBlobAuditingPolicyProperties { pub state: sql_pool_blob_auditing_policy_properties::State, #[serde(rename = "storageEndpoint", default, skip_serializing_if = "Option::is_none")] pub storage_endpoint: Option<String>, #[serde(rename = "storageAccountAccessKey", default, skip_serializing_if = "Option::is_none")] pub storage_account_access_key: Option<String>, #[serde(rename = "retentionDays", default, skip_serializing_if = "Option::is_none")] pub retention_days: Option<i32>, #[serde(rename = "auditActionsAndGroups", default, skip_serializing_if = "Vec::is_empty")] pub audit_actions_and_groups: Vec<String>, #[serde(rename = "storageAccountSubscriptionId", default, skip_serializing_if = "Option::is_none")] pub storage_account_subscription_id: Option<String>, #[serde(rename = "isStorageSecondaryKeyInUse", default, skip_serializing_if = "Option::is_none")] pub is_storage_secondary_key_in_use: Option<bool>, #[serde(rename = "isAzureMonitorTargetEnabled", default, skip_serializing_if = "Option::is_none")] pub is_azure_monitor_target_enabled: Option<bool>, } pub mod sql_pool_blob_auditing_policy_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum State { Enabled, Disabled, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SqlPoolBlobAuditingPolicy { #[serde(flatten)] pub proxy_resource: ProxyResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub kind: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<SqlPoolBlobAuditingPolicyProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SqlPoolBlobAuditingPolicySqlPoolOperationListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<SqlPoolOperation>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SqlPoolOperationProperties { #[serde(rename = "databaseName", default, skip_serializing_if = "Option::is_none")] pub database_name: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub operation: Option<String>, #[serde(rename = "operationFriendlyName", default, skip_serializing_if = "Option::is_none")] pub operation_friendly_name: Option<String>, #[serde(rename = "percentComplete", default, skip_serializing_if = "Option::is_none")] pub percent_complete: Option<i32>, #[serde(rename = "serverName", default, skip_serializing_if = "Option::is_none")] pub server_name: Option<String>, #[serde(rename = "startTime", default, skip_serializing_if = "Option::is_none")] pub start_time: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub state: Option<sql_pool_operation_properties::State>, #[serde(rename = "errorCode", default, skip_serializing_if = "Option::is_none")] pub error_code: Option<i32>, #[serde(rename = "errorDescription", default, skip_serializing_if = "Option::is_none")] pub error_description: Option<String>, #[serde(rename = "errorSeverity", default, skip_serializing_if = "Option::is_none")] pub error_severity: Option<i32>, #[serde(rename = "isUserError", default, skip_serializing_if = "Option::is_none")] pub is_user_error: Option<bool>, #[serde(rename = "estimatedCompletionTime", default, skip_serializing_if = "Option::is_none")] pub estimated_completion_time: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub description: Option<String>, #[serde(rename = "isCancellable", default, skip_serializing_if = "Option::is_none")] pub is_cancellable: Option<bool>, } pub mod sql_pool_operation_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum State { Pending, InProgress, Succeeded, Failed, CancelInProgress, Cancelled, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SqlPoolOperation { #[serde(flatten)] pub proxy_resource: ProxyResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<SqlPoolOperationProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SqlPoolUsage { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(rename = "resourceName", default, skip_serializing_if = "Option::is_none")] pub resource_name: Option<String>, #[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")] pub display_name: Option<String>, #[serde(rename = "currentValue", default, skip_serializing_if = "Option::is_none")] pub current_value: Option<f64>, #[serde(default, skip_serializing_if = "Option::is_none")] pub limit: Option<f64>, #[serde(default, skip_serializing_if = "Option::is_none")] pub unit: Option<String>, #[serde(rename = "nextResetTime", default, skip_serializing_if = "Option::is_none")] pub next_reset_time: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SqlPoolUsageListResult { pub value: Vec<SqlPoolUsage>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SensitivityLabelProperties { #[serde(rename = "schemaName", default, skip_serializing_if = "Option::is_none")] pub schema_name: Option<String>, #[serde(rename = "tableName", default, skip_serializing_if = "Option::is_none")] pub table_name: Option<String>, #[serde(rename = "columnName", default, skip_serializing_if = "Option::is_none")] pub column_name: Option<String>, #[serde(rename = "labelName", default, skip_serializing_if = "Option::is_none")] pub label_name: Option<String>, #[serde(rename = "labelId", default, skip_serializing_if = "Option::is_none")] pub label_id: Option<String>, #[serde(rename = "informationType", default, skip_serializing_if = "Option::is_none")] pub information_type: Option<String>, #[serde(rename = "informationTypeId", default, skip_serializing_if = "Option::is_none")] pub information_type_id: Option<String>, #[serde(rename = "isDisabled", default, skip_serializing_if = "Option::is_none")] pub is_disabled: Option<bool>, #[serde(default, skip_serializing_if = "Option::is_none")] pub rank: Option<sensitivity_label_properties::Rank>, } pub mod sensitivity_label_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Rank { None, Low, Medium, High, Critical, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SensitivityLabel { #[serde(flatten)] pub proxy_resource: ProxyResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<SensitivityLabelProperties>, #[serde(rename = "managedBy", default, skip_serializing_if = "Option::is_none")] pub managed_by: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SensitivityLabelListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<SensitivityLabel>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SqlPoolSchema { #[serde(flatten)] pub proxy_resource: ProxyResource, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SqlPoolSchemaListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<SqlPoolSchema>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SqlPoolTable { #[serde(flatten)] pub proxy_resource: ProxyResource, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SqlPoolTableListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<SqlPoolTable>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SqlPoolColumnProperties { #[serde(rename = "columnType", default, skip_serializing_if = "Option::is_none")] pub column_type: Option<sql_pool_column_properties::ColumnType>, #[serde(rename = "isComputed", default, skip_serializing_if = "Option::is_none")] pub is_computed: Option<bool>, } pub mod sql_pool_column_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ColumnType { #[serde(rename = "image")] Image, #[serde(rename = "text")] Text, #[serde(rename = "uniqueidentifier")] Uniqueidentifier, #[serde(rename = "date")] Date, #[serde(rename = "time")] Time, #[serde(rename = "datetime2")] Datetime2, #[serde(rename = "datetimeoffset")] Datetimeoffset, #[serde(rename = "tinyint")] Tinyint, #[serde(rename = "smallint")] Smallint, #[serde(rename = "int")] Int, #[serde(rename = "smalldatetime")] Smalldatetime, #[serde(rename = "real")] Real, #[serde(rename = "money")] Money, #[serde(rename = "datetime")] Datetime, #[serde(rename = "float")] Float, #[serde(rename = "sql_variant")] SqlVariant, #[serde(rename = "ntext")] Ntext, #[serde(rename = "bit")] Bit, #[serde(rename = "decimal")] Decimal, #[serde(rename = "numeric")] Numeric, #[serde(rename = "smallmoney")] Smallmoney, #[serde(rename = "bigint")] Bigint, #[serde(rename = "hierarchyid")] Hierarchyid, #[serde(rename = "geometry")] Geometry, #[serde(rename = "geography")] Geography, #[serde(rename = "varbinary")] Varbinary, #[serde(rename = "varchar")] Varchar, #[serde(rename = "binary")] Binary, #[serde(rename = "char")] Char, #[serde(rename = "timestamp")] Timestamp, #[serde(rename = "nvarchar")] Nvarchar, #[serde(rename = "nchar")] Nchar, #[serde(rename = "xml")] Xml, #[serde(rename = "sysname")] Sysname, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SqlPoolColumn { #[serde(flatten)] pub proxy_resource: ProxyResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<SqlPoolColumnProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SqlPoolColumnListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<SqlPoolColumn>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SqlPoolConnectionPolicyProperties { #[serde(rename = "securityEnabledAccess", default, skip_serializing_if = "Option::is_none")] pub security_enabled_access: Option<String>, #[serde(rename = "proxyDnsName", default, skip_serializing_if = "Option::is_none")] pub proxy_dns_name: Option<String>, #[serde(rename = "proxyPort", default, skip_serializing_if = "Option::is_none")] pub proxy_port: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub visibility: Option<String>, #[serde(rename = "useServerDefault", default, skip_serializing_if = "Option::is_none")] pub use_server_default: Option<String>, #[serde(rename = "redirectionState", default, skip_serializing_if = "Option::is_none")] pub redirection_state: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub state: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SqlPoolConnectionPolicy { #[serde(flatten)] pub proxy_resource: ProxyResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub kind: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub location: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<SqlPoolConnectionPolicyProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SqlPoolVulnerabilityAssessmentProperties { #[serde(rename = "storageContainerPath", default, skip_serializing_if = "Option::is_none")] pub storage_container_path: Option<String>, #[serde(rename = "storageContainerSasKey", default, skip_serializing_if = "Option::is_none")] pub storage_container_sas_key: Option<String>, #[serde(rename = "storageAccountAccessKey", default, skip_serializing_if = "Option::is_none")] pub storage_account_access_key: Option<String>, #[serde(rename = "recurringScans", default, skip_serializing_if = "Option::is_none")] pub recurring_scans: Option<VulnerabilityAssessmentRecurringScansProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct VulnerabilityAssessmentRecurringScansProperties { #[serde(rename = "isEnabled", default, skip_serializing_if = "Option::is_none")] pub is_enabled: Option<bool>, #[serde(rename = "emailSubscriptionAdmins", default, skip_serializing_if = "Option::is_none")] pub email_subscription_admins: Option<bool>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub emails: Vec<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SqlPoolVulnerabilityAssessment { #[serde(flatten)] pub proxy_resource: ProxyResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<SqlPoolVulnerabilityAssessmentProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SqlPoolVulnerabilityAssessmentListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<SqlPoolVulnerabilityAssessment>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct VulnerabilityAssessmentScanRecordListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<VulnerabilityAssessmentScanRecord>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct VulnerabilityAssessmentScanRecord { #[serde(flatten)] pub proxy_resource: ProxyResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<VulnerabilityAssessmentScanRecordProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct VulnerabilityAssessmentScanRecordProperties { #[serde(rename = "scanId", default, skip_serializing_if = "Option::is_none")] pub scan_id: Option<String>, #[serde(rename = "triggerType", default, skip_serializing_if = "Option::is_none")] pub trigger_type: Option<vulnerability_assessment_scan_record_properties::TriggerType>, #[serde(default, skip_serializing_if = "Option::is_none")] pub state: Option<vulnerability_assessment_scan_record_properties::State>, #[serde(rename = "startTime", default, skip_serializing_if = "Option::is_none")] pub start_time: Option<String>, #[serde(rename = "endTime", default, skip_serializing_if = "Option::is_none")] pub end_time: Option<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub errors: Vec<VulnerabilityAssessmentScanError>, #[serde(rename = "storageContainerPath", default, skip_serializing_if = "Option::is_none")] pub storage_container_path: Option<String>, #[serde(rename = "numberOfFailedSecurityChecks", default, skip_serializing_if = "Option::is_none")] pub number_of_failed_security_checks: Option<i32>, } pub mod vulnerability_assessment_scan_record_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum TriggerType { OnDemand, Recurring, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum State { Passed, Failed, FailedToRun, InProgress, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct VulnerabilityAssessmentScanError { #[serde(default, skip_serializing_if = "Option::is_none")] pub code: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub message: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SecurityAlertPolicyProperties { pub state: security_alert_policy_properties::State, #[serde(rename = "disabledAlerts", default, skip_serializing_if = "Vec::is_empty")] pub disabled_alerts: Vec<String>, #[serde(rename = "emailAddresses", default, skip_serializing_if = "Vec::is_empty")] pub email_addresses: Vec<String>, #[serde(rename = "emailAccountAdmins", default, skip_serializing_if = "Option::is_none")] pub email_account_admins: Option<bool>, #[serde(rename = "storageEndpoint", default, skip_serializing_if = "Option::is_none")] pub storage_endpoint: Option<String>, #[serde(rename = "storageAccountAccessKey", default, skip_serializing_if = "Option::is_none")] pub storage_account_access_key: Option<String>, #[serde(rename = "retentionDays", default, skip_serializing_if = "Option::is_none")] pub retention_days: Option<i32>, #[serde(rename = "creationTime", default, skip_serializing_if = "Option::is_none")] pub creation_time: Option<String>, } pub mod security_alert_policy_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum State { New, Enabled, Disabled, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SqlPoolSecurityAlertPolicy { #[serde(flatten)] pub proxy_resource: ProxyResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<SecurityAlertPolicyProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ListSqlPoolSecurityAlertPolicies { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<SqlPoolSecurityAlertPolicy>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SqlPoolVulnerabilityAssessmentRuleBaselineProperties { #[serde(rename = "baselineResults")] pub baseline_results: Vec<SqlPoolVulnerabilityAssessmentRuleBaselineItem>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SqlPoolVulnerabilityAssessmentRuleBaselineItem { pub result: Vec<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SqlPoolVulnerabilityAssessmentRuleBaseline { #[serde(flatten)] pub proxy_resource: ProxyResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<SqlPoolVulnerabilityAssessmentRuleBaselineProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SqlPoolVulnerabilityAssessmentScanExportProperties { #[serde(rename = "exportedReportLocation", default, skip_serializing_if = "Option::is_none")] pub exported_report_location: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SqlPoolVulnerabilityAssessmentScansExport { #[serde(flatten)] pub proxy_resource: ProxyResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<SqlPoolVulnerabilityAssessmentScanExportProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ResourceMoveDefinition { pub id: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CreateSqlPoolRestorePointDefinition { #[serde(rename = "restorePointLabel")] pub restore_point_label: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct WorkloadGroupProperties { #[serde(rename = "minResourcePercent")] pub min_resource_percent: i32, #[serde(rename = "maxResourcePercent")] pub max_resource_percent: i32, #[serde(rename = "minResourcePercentPerRequest")] pub min_resource_percent_per_request: f64, #[serde(rename = "maxResourcePercentPerRequest", default, skip_serializing_if = "Option::is_none")] pub max_resource_percent_per_request: Option<f64>, #[serde(default, skip_serializing_if = "Option::is_none")] pub importance: Option<String>, #[serde(rename = "queryExecutionTimeout", default, skip_serializing_if = "Option::is_none")] pub query_execution_timeout: Option<i32>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct WorkloadGroup { #[serde(flatten)] pub proxy_resource: ProxyResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<WorkloadGroupProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct WorkloadGroupListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<WorkloadGroup>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct WorkloadClassifierProperties { #[serde(rename = "memberName")] pub member_name: String, #[serde(default, skip_serializing_if = "Option::is_none")] pub label: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub context: Option<String>, #[serde(rename = "startTime", default, skip_serializing_if = "Option::is_none")] pub start_time: Option<String>, #[serde(rename = "endTime", default, skip_serializing_if = "Option::is_none")] pub end_time: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub importance: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct WorkloadClassifier { #[serde(flatten)] pub proxy_resource: ProxyResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<WorkloadClassifierProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct WorkloadClassifierListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<WorkloadClassifier>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DataMaskingPolicyProperties { #[serde(rename = "dataMaskingState")] pub data_masking_state: data_masking_policy_properties::DataMaskingState, #[serde(rename = "exemptPrincipals", default, skip_serializing_if = "Option::is_none")] pub exempt_principals: Option<String>, #[serde(rename = "applicationPrincipals", default, skip_serializing_if = "Option::is_none")] pub application_principals: Option<String>, #[serde(rename = "maskingLevel", default, skip_serializing_if = "Option::is_none")] pub masking_level: Option<String>, } pub mod data_masking_policy_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum DataMaskingState { Disabled, Enabled, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DataMaskingPolicy { #[serde(flatten)] pub proxy_resource: ProxyResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<DataMaskingPolicyProperties>, #[serde(default, skip_serializing_if = "Option::is_none")] pub location: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub kind: Option<String>, #[serde(rename = "managedBy", default, skip_serializing_if = "Option::is_none")] pub managed_by: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ExtendedSqlPoolBlobAuditingPolicy { #[serde(flatten)] pub proxy_resource: ProxyResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<ExtendedSqlPoolBlobAuditingPolicyProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ExtendedSqlPoolBlobAuditingPolicyProperties { #[serde(rename = "predicateExpression", default, skip_serializing_if = "Option::is_none")] pub predicate_expression: Option<String>, pub state: extended_sql_pool_blob_auditing_policy_properties::State, #[serde(rename = "storageEndpoint", default, skip_serializing_if = "Option::is_none")] pub storage_endpoint: Option<String>, #[serde(rename = "storageAccountAccessKey", default, skip_serializing_if = "Option::is_none")] pub storage_account_access_key: Option<String>, #[serde(rename = "retentionDays", default, skip_serializing_if = "Option::is_none")] pub retention_days: Option<i32>, #[serde(rename = "auditActionsAndGroups", default, skip_serializing_if = "Vec::is_empty")] pub audit_actions_and_groups: Vec<String>, #[serde(rename = "storageAccountSubscriptionId", default, skip_serializing_if = "Option::is_none")] pub storage_account_subscription_id: Option<String>, #[serde(rename = "isStorageSecondaryKeyInUse", default, skip_serializing_if = "Option::is_none")] pub is_storage_secondary_key_in_use: Option<bool>, #[serde(rename = "isAzureMonitorTargetEnabled", default, skip_serializing_if = "Option::is_none")] pub is_azure_monitor_target_enabled: Option<bool>, #[serde(rename = "queueDelayMs", default, skip_serializing_if = "Option::is_none")] pub queue_delay_ms: Option<i32>, } pub mod extended_sql_pool_blob_auditing_policy_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum State { Enabled, Disabled, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ExtendedSqlPoolBlobAuditingPolicyListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<ExtendedSqlPoolBlobAuditingPolicy>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DataMaskingRule { #[serde(flatten)] pub proxy_resource: ProxyResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<DataMaskingRuleProperties>, #[serde(default, skip_serializing_if = "Option::is_none")] pub location: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub kind: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DataMaskingRuleProperties { #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option<String>, #[serde(rename = "aliasName", default, skip_serializing_if = "Option::is_none")] pub alias_name: Option<String>, #[serde(rename = "ruleState", default, skip_serializing_if = "Option::is_none")] pub rule_state: Option<data_masking_rule_properties::RuleState>, #[serde(rename = "schemaName")] pub schema_name: String, #[serde(rename = "tableName")] pub table_name: String, #[serde(rename = "columnName")] pub column_name: String, #[serde(rename = "maskingFunction")] pub masking_function: data_masking_rule_properties::MaskingFunction, #[serde(rename = "numberFrom", default, skip_serializing_if = "Option::is_none")] pub number_from: Option<String>, #[serde(rename = "numberTo", default, skip_serializing_if = "Option::is_none")] pub number_to: Option<String>, #[serde(rename = "prefixSize", default, skip_serializing_if = "Option::is_none")] pub prefix_size: Option<String>, #[serde(rename = "suffixSize", default, skip_serializing_if = "Option::is_none")] pub suffix_size: Option<String>, #[serde(rename = "replacementString", default, skip_serializing_if = "Option::is_none")] pub replacement_string: Option<String>, } pub mod data_masking_rule_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum RuleState { Disabled, Enabled, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum MaskingFunction { Default, #[serde(rename = "CCN")] Ccn, Email, Number, #[serde(rename = "SSN")] Ssn, Text, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DataMaskingRuleListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<DataMaskingRule>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SqlPoolBlobAuditingPolicyListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<SqlPoolBlobAuditingPolicy>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SensitivityLabelUpdateProperties { pub op: sensitivity_label_update_properties::Op, pub schema: String, pub table: String, pub column: String, #[serde(rename = "sensitivityLabel", default, skip_serializing_if = "Option::is_none")] pub sensitivity_label: Option<SensitivityLabel>, } pub mod sensitivity_label_update_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Op { #[serde(rename = "set")] Set, #[serde(rename = "remove")] Remove, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SensitivityLabelUpdate { #[serde(flatten)] pub proxy_resource: ProxyResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<SensitivityLabelUpdateProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SensitivityLabelUpdateList { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub operations: Vec<SensitivityLabelUpdate>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RecommendedSensitivityLabelUpdateProperties { pub op: recommended_sensitivity_label_update_properties::Op, pub schema: String, pub table: String, pub column: String, } pub mod recommended_sensitivity_label_update_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Op { #[serde(rename = "enable")] Enable, #[serde(rename = "disable")] Disable, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RecommendedSensitivityLabelUpdate { #[serde(flatten)] pub proxy_resource: ProxyResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<RecommendedSensitivityLabelUpdateProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RecommendedSensitivityLabelUpdateList { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub operations: Vec<RecommendedSensitivityLabelUpdate>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SubResource { #[serde(flatten)] pub azure_entity_resource: AzureEntityResource, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IntegrationRuntime { #[serde(rename = "type")] pub type_: IntegrationRuntimeType, #[serde(default, skip_serializing_if = "Option::is_none")] pub description: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum IntegrationRuntimeType { Managed, SelfHosted, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ManagedIntegrationRuntime { #[serde(flatten)] pub integration_runtime: IntegrationRuntime, #[serde(default, skip_serializing_if = "Option::is_none")] pub state: Option<IntegrationRuntimeState>, #[serde(rename = "typeProperties")] pub type_properties: ManagedIntegrationRuntimeTypeProperties, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ManagedIntegrationRuntimeTypeProperties { #[serde(rename = "computeProperties", default, skip_serializing_if = "Option::is_none")] pub compute_properties: Option<IntegrationRuntimeComputeProperties>, #[serde(rename = "ssisProperties", default, skip_serializing_if = "Option::is_none")] pub ssis_properties: Option<IntegrationRuntimeSsisProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IntegrationRuntimeComputeProperties { #[serde(default, skip_serializing_if = "Option::is_none")] pub location: Option<String>, #[serde(rename = "nodeSize", default, skip_serializing_if = "Option::is_none")] pub node_size: Option<String>, #[serde(rename = "numberOfNodes", default, skip_serializing_if = "Option::is_none")] pub number_of_nodes: Option<i32>, #[serde(rename = "maxParallelExecutionsPerNode", default, skip_serializing_if = "Option::is_none")] pub max_parallel_executions_per_node: Option<i32>, #[serde(rename = "dataFlowProperties", default, skip_serializing_if = "Option::is_none")] pub data_flow_properties: Option<IntegrationRuntimeDataFlowProperties>, #[serde(rename = "vNetProperties", default, skip_serializing_if = "Option::is_none")] pub v_net_properties: Option<IntegrationRuntimeVNetProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IntegrationRuntimeDataFlowProperties { #[serde(rename = "computeType", default, skip_serializing_if = "Option::is_none")] pub compute_type: Option<integration_runtime_data_flow_properties::ComputeType>, #[serde(rename = "coreCount", default, skip_serializing_if = "Option::is_none")] pub core_count: Option<i32>, #[serde(rename = "timeToLive", default, skip_serializing_if = "Option::is_none")] pub time_to_live: Option<i32>, } pub mod integration_runtime_data_flow_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ComputeType { General, MemoryOptimized, ComputeOptimized, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IntegrationRuntimeVNetProperties { #[serde(rename = "vNetId", default, skip_serializing_if = "Option::is_none")] pub v_net_id: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub subnet: Option<String>, #[serde(rename = "publicIPs", default, skip_serializing_if = "Vec::is_empty")] pub public_i_ps: Vec<String>, #[serde(rename = "subnetId", default, skip_serializing_if = "Option::is_none")] pub subnet_id: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IntegrationRuntimeSsisProperties { #[serde(rename = "catalogInfo", default, skip_serializing_if = "Option::is_none")] pub catalog_info: Option<IntegrationRuntimeSsisCatalogInfo>, #[serde(rename = "licenseType", default, skip_serializing_if = "Option::is_none")] pub license_type: Option<integration_runtime_ssis_properties::LicenseType>, #[serde(rename = "customSetupScriptProperties", default, skip_serializing_if = "Option::is_none")] pub custom_setup_script_properties: Option<IntegrationRuntimeCustomSetupScriptProperties>, #[serde(rename = "dataProxyProperties", default, skip_serializing_if = "Option::is_none")] pub data_proxy_properties: Option<IntegrationRuntimeDataProxyProperties>, #[serde(default, skip_serializing_if = "Option::is_none")] pub edition: Option<integration_runtime_ssis_properties::Edition>, #[serde(rename = "expressCustomSetupProperties", default, skip_serializing_if = "Vec::is_empty")] pub express_custom_setup_properties: Vec<CustomSetupBase>, } pub mod integration_runtime_ssis_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum LicenseType { BasePrice, LicenseIncluded, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Edition { Standard, Enterprise, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SecretBase { #[serde(rename = "type")] pub type_: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SecureString { #[serde(flatten)] pub secret_base: SecretBase, pub value: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IntegrationRuntimeSsisCatalogInfo { #[serde(rename = "catalogServerEndpoint", default, skip_serializing_if = "Option::is_none")] pub catalog_server_endpoint: Option<String>, #[serde(rename = "catalogAdminUserName", default, skip_serializing_if = "Option::is_none")] pub catalog_admin_user_name: Option<String>, #[serde(rename = "catalogAdminPassword", default, skip_serializing_if = "Option::is_none")] pub catalog_admin_password: Option<SecureString>, #[serde(rename = "catalogPricingTier", default, skip_serializing_if = "Option::is_none")] pub catalog_pricing_tier: Option<integration_runtime_ssis_catalog_info::CatalogPricingTier>, } pub mod integration_runtime_ssis_catalog_info { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum CatalogPricingTier { Basic, Standard, Premium, #[serde(rename = "PremiumRS")] PremiumRs, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IntegrationRuntimeCustomSetupScriptProperties { #[serde(rename = "blobContainerUri", default, skip_serializing_if = "Option::is_none")] pub blob_container_uri: Option<String>, #[serde(rename = "sasToken", default, skip_serializing_if = "Option::is_none")] pub sas_token: Option<SecureString>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IntegrationRuntimeDataProxyProperties { #[serde(rename = "connectVia", default, skip_serializing_if = "Option::is_none")] pub connect_via: Option<EntityReference>, #[serde(rename = "stagingLinkedService", default, skip_serializing_if = "Option::is_none")] pub staging_linked_service: Option<EntityReference>, #[serde(default, skip_serializing_if = "Option::is_none")] pub path: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CustomSetupBase { #[serde(rename = "type")] pub type_: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CmdkeySetup { #[serde(flatten)] pub custom_setup_base: CustomSetupBase, #[serde(rename = "typeProperties")] pub type_properties: CmdkeySetupTypeProperties, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CmdkeySetupTypeProperties { #[serde(rename = "targetName")] pub target_name: serde_json::Value, #[serde(rename = "userName")] pub user_name: serde_json::Value, pub password: SecretBase, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EnvironmentVariableSetup { #[serde(flatten)] pub custom_setup_base: CustomSetupBase, #[serde(rename = "typeProperties")] pub type_properties: EnvironmentVariableSetupTypeProperties, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EnvironmentVariableSetupTypeProperties { #[serde(rename = "variableName")] pub variable_name: String, #[serde(rename = "variableValue")] pub variable_value: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ComponentSetup { #[serde(flatten)] pub custom_setup_base: CustomSetupBase, #[serde(rename = "typeProperties")] pub type_properties: LicensedComponentSetupTypeProperties, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct LicensedComponentSetupTypeProperties { #[serde(rename = "componentName")] pub component_name: String, #[serde(rename = "licenseKey", default, skip_serializing_if = "Option::is_none")] pub license_key: Option<SecretBase>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EntityReference { #[serde(rename = "type", default, skip_serializing_if = "Option::is_none")] pub type_: Option<entity_reference::Type>, #[serde(rename = "referenceName", default, skip_serializing_if = "Option::is_none")] pub reference_name: Option<String>, } pub mod entity_reference { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Type { IntegrationRuntimeReference, LinkedServiceReference, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SelfHostedIntegrationRuntime { #[serde(flatten)] pub integration_runtime: IntegrationRuntime, #[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")] pub type_properties: Option<SelfHostedIntegrationRuntimeTypeProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SelfHostedIntegrationRuntimeTypeProperties { #[serde(rename = "linkedInfo", default, skip_serializing_if = "Option::is_none")] pub linked_info: Option<LinkedIntegrationRuntimeType>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct LinkedIntegrationRuntimeType { #[serde(rename = "authorizationType")] pub authorization_type: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct LinkedIntegrationRuntimeKeyAuthorization { #[serde(flatten)] pub linked_integration_runtime_type: LinkedIntegrationRuntimeType, pub key: SecureString, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct LinkedIntegrationRuntimeRbacAuthorization { #[serde(flatten)] pub linked_integration_runtime_type: LinkedIntegrationRuntimeType, #[serde(rename = "resourceId")] pub resource_id: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IntegrationRuntimeStatus { #[serde(rename = "type")] pub type_: IntegrationRuntimeType, #[serde(rename = "dataFactoryName", default, skip_serializing_if = "Option::is_none")] pub data_factory_name: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub state: Option<IntegrationRuntimeState>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum IntegrationRuntimeState { Initial, Stopped, Started, Starting, Stopping, NeedRegistration, Online, Limited, Offline, AccessDenied, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ManagedIntegrationRuntimeStatus { #[serde(flatten)] pub integration_runtime_status: IntegrationRuntimeStatus, #[serde(rename = "typeProperties")] pub type_properties: ManagedIntegrationRuntimeStatusTypeProperties, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ManagedIntegrationRuntimeStatusTypeProperties { #[serde(rename = "createTime", default, skip_serializing_if = "Option::is_none")] pub create_time: Option<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub nodes: Vec<ManagedIntegrationRuntimeNode>, #[serde(rename = "otherErrors", default, skip_serializing_if = "Vec::is_empty")] pub other_errors: Vec<ManagedIntegrationRuntimeError>, #[serde(rename = "lastOperation", default, skip_serializing_if = "Option::is_none")] pub last_operation: Option<ManagedIntegrationRuntimeOperationResult>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ManagedIntegrationRuntimeOperationResult { #[serde(rename = "type", default, skip_serializing_if = "Option::is_none")] pub type_: Option<String>, #[serde(rename = "startTime", default, skip_serializing_if = "Option::is_none")] pub start_time: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub result: Option<String>, #[serde(rename = "errorCode", default, skip_serializing_if = "Option::is_none")] pub error_code: Option<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub parameters: Vec<String>, #[serde(rename = "activityId", default, skip_serializing_if = "Option::is_none")] pub activity_id: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ManagedIntegrationRuntimeNode { #[serde(rename = "nodeId", default, skip_serializing_if = "Option::is_none")] pub node_id: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub status: Option<managed_integration_runtime_node::Status>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub errors: Vec<ManagedIntegrationRuntimeError>, } pub mod managed_integration_runtime_node { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Status { Starting, Available, Recycling, Unavailable, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ManagedIntegrationRuntimeError { #[serde(default, skip_serializing_if = "Option::is_none")] pub time: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub code: Option<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub parameters: Vec<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub message: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SelfHostedIntegrationRuntimeStatus { #[serde(flatten)] pub integration_runtime_status: IntegrationRuntimeStatus, #[serde(rename = "typeProperties")] pub type_properties: SelfHostedIntegrationRuntimeStatusTypeProperties, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SelfHostedIntegrationRuntimeStatusTypeProperties { #[serde(rename = "createTime", default, skip_serializing_if = "Option::is_none")] pub create_time: Option<String>, #[serde(rename = "taskQueueId", default, skip_serializing_if = "Option::is_none")] pub task_queue_id: Option<String>, #[serde( rename = "nodeCommunicationChannelEncryptionMode", default, skip_serializing_if = "Option::is_none" )] pub node_communication_channel_encryption_mode: Option<String>, #[serde(rename = "internalChannelEncryption", default, skip_serializing_if = "Option::is_none")] pub internal_channel_encryption: Option<self_hosted_integration_runtime_status_type_properties::InternalChannelEncryption>, #[serde(default, skip_serializing_if = "Option::is_none")] pub version: Option<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub nodes: Vec<SelfHostedIntegrationRuntimeNode>, #[serde(rename = "scheduledUpdateDate", default, skip_serializing_if = "Option::is_none")] pub scheduled_update_date: Option<String>, #[serde(rename = "updateDelayOffset", default, skip_serializing_if = "Option::is_none")] pub update_delay_offset: Option<String>, #[serde(rename = "localTimeZoneOffset", default, skip_serializing_if = "Option::is_none")] pub local_time_zone_offset: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub capabilities: Option<serde_json::Value>, #[serde(rename = "serviceUrls", default, skip_serializing_if = "Vec::is_empty")] pub service_urls: Vec<String>, #[serde(rename = "autoUpdate", default, skip_serializing_if = "Option::is_none")] pub auto_update: Option<IntegrationRuntimeAutoUpdate>, #[serde(rename = "versionStatus", default, skip_serializing_if = "Option::is_none")] pub version_status: Option<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub links: Vec<LinkedIntegrationRuntime>, #[serde(rename = "pushedVersion", default, skip_serializing_if = "Option::is_none")] pub pushed_version: Option<String>, #[serde(rename = "latestVersion", default, skip_serializing_if = "Option::is_none")] pub latest_version: Option<String>, #[serde(rename = "autoUpdateETA", default, skip_serializing_if = "Option::is_none")] pub auto_update_eta: Option<String>, } pub mod self_hosted_integration_runtime_status_type_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum InternalChannelEncryption { NotSet, SslEncrypted, NotEncrypted, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum IntegrationRuntimeAutoUpdate { On, Off, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct LinkedIntegrationRuntime { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(rename = "subscriptionId", default, skip_serializing_if = "Option::is_none")] pub subscription_id: Option<String>, #[serde(rename = "dataFactoryName", default, skip_serializing_if = "Option::is_none")] pub data_factory_name: Option<String>, #[serde(rename = "dataFactoryLocation", default, skip_serializing_if = "Option::is_none")] pub data_factory_location: Option<String>, #[serde(rename = "createTime", default, skip_serializing_if = "Option::is_none")] pub create_time: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SelfHostedIntegrationRuntimeNode { #[serde(rename = "nodeName", default, skip_serializing_if = "Option::is_none")] pub node_name: Option<String>, #[serde(rename = "machineName", default, skip_serializing_if = "Option::is_none")] pub machine_name: Option<String>, #[serde(rename = "hostServiceUri", default, skip_serializing_if = "Option::is_none")] pub host_service_uri: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub status: Option<self_hosted_integration_runtime_node::Status>, #[serde(default, skip_serializing_if = "Option::is_none")] pub capabilities: Option<serde_json::Value>, #[serde(rename = "versionStatus", default, skip_serializing_if = "Option::is_none")] pub version_status: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub version: Option<String>, #[serde(rename = "registerTime", default, skip_serializing_if = "Option::is_none")] pub register_time: Option<String>, #[serde(rename = "lastConnectTime", default, skip_serializing_if = "Option::is_none")] pub last_connect_time: Option<String>, #[serde(rename = "expiryTime", default, skip_serializing_if = "Option::is_none")] pub expiry_time: Option<String>, #[serde(rename = "lastStartTime", default, skip_serializing_if = "Option::is_none")] pub last_start_time: Option<String>, #[serde(rename = "lastStopTime", default, skip_serializing_if = "Option::is_none")] pub last_stop_time: Option<String>, #[serde(rename = "lastUpdateResult", default, skip_serializing_if = "Option::is_none")] pub last_update_result: Option<self_hosted_integration_runtime_node::LastUpdateResult>, #[serde(rename = "lastStartUpdateTime", default, skip_serializing_if = "Option::is_none")] pub last_start_update_time: Option<String>, #[serde(rename = "lastEndUpdateTime", default, skip_serializing_if = "Option::is_none")] pub last_end_update_time: Option<String>, #[serde(rename = "isActiveDispatcher", default, skip_serializing_if = "Option::is_none")] pub is_active_dispatcher: Option<bool>, #[serde(rename = "concurrentJobsLimit", default, skip_serializing_if = "Option::is_none")] pub concurrent_jobs_limit: Option<i32>, #[serde(rename = "maxConcurrentJobs", default, skip_serializing_if = "Option::is_none")] pub max_concurrent_jobs: Option<i32>, } pub mod self_hosted_integration_runtime_node { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Status { NeedRegistration, Online, Limited, Offline, Upgrading, Initializing, InitializeFailed, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum LastUpdateResult { None, Succeed, Fail, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IntegrationRuntimeConnectionInfo { #[serde(rename = "serviceToken", default, skip_serializing_if = "Option::is_none")] pub service_token: Option<String>, #[serde(rename = "identityCertThumbprint", default, skip_serializing_if = "Option::is_none")] pub identity_cert_thumbprint: Option<String>, #[serde(rename = "hostServiceUri", default, skip_serializing_if = "Option::is_none")] pub host_service_uri: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub version: Option<String>, #[serde(rename = "publicKey", default, skip_serializing_if = "Option::is_none")] pub public_key: Option<String>, #[serde(rename = "isIdentityCertExprired", default, skip_serializing_if = "Option::is_none")] pub is_identity_cert_exprired: Option<bool>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IntegrationRuntimeRegenerateKeyParameters { #[serde(rename = "keyName", default, skip_serializing_if = "Option::is_none")] pub key_name: Option<integration_runtime_regenerate_key_parameters::KeyName>, } pub mod integration_runtime_regenerate_key_parameters { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum KeyName { #[serde(rename = "authKey1")] AuthKey1, #[serde(rename = "authKey2")] AuthKey2, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IntegrationRuntimeAuthKeys { #[serde(rename = "authKey1", default, skip_serializing_if = "Option::is_none")] pub auth_key1: Option<String>, #[serde(rename = "authKey2", default, skip_serializing_if = "Option::is_none")] pub auth_key2: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IntegrationRuntimeMonitoringData { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub nodes: Vec<IntegrationRuntimeNodeMonitoringData>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IntegrationRuntimeNodeMonitoringData { #[serde(rename = "nodeName", default, skip_serializing_if = "Option::is_none")] pub node_name: Option<String>, #[serde(rename = "availableMemoryInMB", default, skip_serializing_if = "Option::is_none")] pub available_memory_in_mb: Option<i32>, #[serde(rename = "cpuUtilization", default, skip_serializing_if = "Option::is_none")] pub cpu_utilization: Option<i32>, #[serde(rename = "concurrentJobsLimit", default, skip_serializing_if = "Option::is_none")] pub concurrent_jobs_limit: Option<i32>, #[serde(rename = "concurrentJobsRunning", default, skip_serializing_if = "Option::is_none")] pub concurrent_jobs_running: Option<i32>, #[serde(rename = "maxConcurrentJobs", default, skip_serializing_if = "Option::is_none")] pub max_concurrent_jobs: Option<i32>, #[serde(rename = "sentBytes", default, skip_serializing_if = "Option::is_none")] pub sent_bytes: Option<f64>, #[serde(rename = "receivedBytes", default, skip_serializing_if = "Option::is_none")] pub received_bytes: Option<f64>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IntegrationRuntimeNodeIpAddress { #[serde(rename = "ipAddress", default, skip_serializing_if = "Option::is_none")] pub ip_address: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SsisObjectMetadataListResponse { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<SsisObjectMetadata>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SsisObjectMetadata { #[serde(rename = "type")] pub type_: SsisObjectMetadataType, #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option<i64>, #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub description: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum SsisObjectMetadataType { Folder, Project, Package, Environment, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SsisFolder { #[serde(flatten)] pub ssis_object_metadata: SsisObjectMetadata, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SsisProject { #[serde(flatten)] pub ssis_object_metadata: SsisObjectMetadata, #[serde(rename = "folderId", default, skip_serializing_if = "Option::is_none")] pub folder_id: Option<i64>, #[serde(default, skip_serializing_if = "Option::is_none")] pub version: Option<i64>, #[serde(rename = "environmentRefs", default, skip_serializing_if = "Vec::is_empty")] pub environment_refs: Vec<SsisEnvironmentReference>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub parameters: Vec<SsisParameter>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SsisPackage { #[serde(flatten)] pub ssis_object_metadata: SsisObjectMetadata, #[serde(rename = "folderId", default, skip_serializing_if = "Option::is_none")] pub folder_id: Option<i64>, #[serde(rename = "projectVersion", default, skip_serializing_if = "Option::is_none")] pub project_version: Option<i64>, #[serde(rename = "projectId", default, skip_serializing_if = "Option::is_none")] pub project_id: Option<i64>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub parameters: Vec<SsisParameter>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SsisEnvironment { #[serde(flatten)] pub ssis_object_metadata: SsisObjectMetadata, #[serde(rename = "folderId", default, skip_serializing_if = "Option::is_none")] pub folder_id: Option<i64>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub variables: Vec<SsisVariable>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SsisParameter { #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option<i64>, #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub description: Option<String>, #[serde(rename = "dataType", default, skip_serializing_if = "Option::is_none")] pub data_type: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub required: Option<bool>, #[serde(default, skip_serializing_if = "Option::is_none")] pub sensitive: Option<bool>, #[serde(rename = "designDefaultValue", default, skip_serializing_if = "Option::is_none")] pub design_default_value: Option<String>, #[serde(rename = "defaultValue", default, skip_serializing_if = "Option::is_none")] pub default_value: Option<String>, #[serde(rename = "sensitiveDefaultValue", default, skip_serializing_if = "Option::is_none")] pub sensitive_default_value: Option<String>, #[serde(rename = "valueType", default, skip_serializing_if = "Option::is_none")] pub value_type: Option<String>, #[serde(rename = "valueSet", default, skip_serializing_if = "Option::is_none")] pub value_set: Option<bool>, #[serde(default, skip_serializing_if = "Option::is_none")] pub variable: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SsisVariable { #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option<i64>, #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub description: Option<String>, #[serde(rename = "dataType", default, skip_serializing_if = "Option::is_none")] pub data_type: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub sensitive: Option<bool>, #[serde(default, skip_serializing_if = "Option::is_none")] pub value: Option<String>, #[serde(rename = "sensitiveValue", default, skip_serializing_if = "Option::is_none")] pub sensitive_value: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SsisEnvironmentReference { #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option<i64>, #[serde(rename = "environmentFolderName", default, skip_serializing_if = "Option::is_none")] pub environment_folder_name: Option<String>, #[serde(rename = "environmentName", default, skip_serializing_if = "Option::is_none")] pub environment_name: Option<String>, #[serde(rename = "referenceType", default, skip_serializing_if = "Option::is_none")] pub reference_type: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct UpdateIntegrationRuntimeRequest { #[serde(rename = "autoUpdate", default, skip_serializing_if = "Option::is_none")] pub auto_update: Option<IntegrationRuntimeAutoUpdate>, #[serde(rename = "updateDelayOffset", default, skip_serializing_if = "Option::is_none")] pub update_delay_offset: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IntegrationRuntimeResource { #[serde(flatten)] pub sub_resource: SubResource, pub properties: IntegrationRuntime, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct GetSsisObjectMetadataRequest { #[serde(rename = "metadataPath", default, skip_serializing_if = "Option::is_none")] pub metadata_path: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct UpdateIntegrationRuntimeNodeRequest { #[serde(rename = "concurrentJobsLimit", default, skip_serializing_if = "Option::is_none")] pub concurrent_jobs_limit: Option<i32>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IntegrationRuntimeListResponse { pub value: Vec<IntegrationRuntimeResource>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IntegrationRuntimeStatusResponse { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, pub properties: IntegrationRuntimeStatus, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SsisObjectMetadataStatusResponse { #[serde(default, skip_serializing_if = "Option::is_none")] pub status: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub error: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IntegrationRuntimeOutboundNetworkDependenciesEndpointsResponse { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<IntegrationRuntimeOutboundNetworkDependenciesCategoryEndpoint>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IntegrationRuntimeOutboundNetworkDependenciesCategoryEndpoint { #[serde(default, skip_serializing_if = "Option::is_none")] pub category: Option<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub endpoints: Vec<IntegrationRuntimeOutboundNetworkDependenciesEndpoint>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IntegrationRuntimeOutboundNetworkDependenciesEndpoint { #[serde(rename = "domainName", default, skip_serializing_if = "Option::is_none")] pub domain_name: Option<String>, #[serde(rename = "endpointDetails", default, skip_serializing_if = "Vec::is_empty")] pub endpoint_details: Vec<IntegrationRuntimeOutboundNetworkDependenciesEndpointDetails>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IntegrationRuntimeOutboundNetworkDependenciesEndpointDetails { #[serde(default, skip_serializing_if = "Option::is_none")] pub port: Option<i32>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PrivateLinkResourceListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<PrivateLinkResource>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PrivateLinkResource { #[serde(flatten)] pub proxy_resource: ProxyResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<PrivateLinkResourceProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PrivateLinkResourceProperties { #[serde(rename = "groupId", default, skip_serializing_if = "Option::is_none")] pub group_id: Option<String>, #[serde(rename = "requiredMembers", default, skip_serializing_if = "Vec::is_empty")] pub required_members: Vec<String>, #[serde(rename = "requiredZoneNames", default, skip_serializing_if = "Vec::is_empty")] pub required_zone_names: Vec<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PrivateLinkHubInfoListResult { #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<PrivateLinkHub>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PrivateLinkHub { #[serde(flatten)] pub tracked_resource: TrackedResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<PrivateLinkHubProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PrivateLinkHubPatchInfo { #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PrivateLinkHubProperties { #[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")] pub provisioning_state: Option<String>, #[serde(rename = "privateEndpointConnections", default, skip_serializing_if = "Vec::is_empty")] pub private_endpoint_connections: Vec<PrivateEndpointConnectionForPrivateLinkHubBasic>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PrivateEndpointConnectionForPrivateLinkHubBasic { #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<PrivateEndpointConnectionProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ServerBlobAuditingPolicyProperties { pub state: server_blob_auditing_policy_properties::State, #[serde(rename = "storageEndpoint", default, skip_serializing_if = "Option::is_none")] pub storage_endpoint: Option<String>, #[serde(rename = "storageAccountAccessKey", default, skip_serializing_if = "Option::is_none")] pub storage_account_access_key: Option<String>, #[serde(rename = "retentionDays", default, skip_serializing_if = "Option::is_none")] pub retention_days: Option<i32>, #[serde(rename = "auditActionsAndGroups", default, skip_serializing_if = "Vec::is_empty")] pub audit_actions_and_groups: Vec<String>, #[serde(rename = "storageAccountSubscriptionId", default, skip_serializing_if = "Option::is_none")] pub storage_account_subscription_id: Option<String>, #[serde(rename = "isStorageSecondaryKeyInUse", default, skip_serializing_if = "Option::is_none")] pub is_storage_secondary_key_in_use: Option<bool>, #[serde(rename = "isAzureMonitorTargetEnabled", default, skip_serializing_if = "Option::is_none")] pub is_azure_monitor_target_enabled: Option<bool>, #[serde(rename = "queueDelayMs", default, skip_serializing_if = "Option::is_none")] pub queue_delay_ms: Option<i32>, } pub mod server_blob_auditing_policy_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum State { Enabled, Disabled, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ServerBlobAuditingPolicy { #[serde(flatten)] pub proxy_resource: ProxyResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<ServerBlobAuditingPolicyProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ServerBlobAuditingPolicyListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<ServerBlobAuditingPolicy>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ExtendedServerBlobAuditingPolicyProperties { #[serde(rename = "predicateExpression", default, skip_serializing_if = "Option::is_none")] pub predicate_expression: Option<String>, pub state: extended_server_blob_auditing_policy_properties::State, #[serde(rename = "storageEndpoint", default, skip_serializing_if = "Option::is_none")] pub storage_endpoint: Option<String>, #[serde(rename = "storageAccountAccessKey", default, skip_serializing_if = "Option::is_none")] pub storage_account_access_key: Option<String>, #[serde(rename = "retentionDays", default, skip_serializing_if = "Option::is_none")] pub retention_days: Option<i32>, #[serde(rename = "auditActionsAndGroups", default, skip_serializing_if = "Vec::is_empty")] pub audit_actions_and_groups: Vec<String>, #[serde(rename = "storageAccountSubscriptionId", default, skip_serializing_if = "Option::is_none")] pub storage_account_subscription_id: Option<String>, #[serde(rename = "isStorageSecondaryKeyInUse", default, skip_serializing_if = "Option::is_none")] pub is_storage_secondary_key_in_use: Option<bool>, #[serde(rename = "isAzureMonitorTargetEnabled", default, skip_serializing_if = "Option::is_none")] pub is_azure_monitor_target_enabled: Option<bool>, #[serde(rename = "queueDelayMs", default, skip_serializing_if = "Option::is_none")] pub queue_delay_ms: Option<i32>, } pub mod extended_server_blob_auditing_policy_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum State { Enabled, Disabled, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ExtendedServerBlobAuditingPolicy { #[serde(flatten)] pub proxy_resource: ProxyResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<ExtendedServerBlobAuditingPolicyProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ExtendedServerBlobAuditingPolicyListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<ExtendedServerBlobAuditingPolicy>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ServerSecurityAlertPolicyProperties { pub state: server_security_alert_policy_properties::State, #[serde(rename = "disabledAlerts", default, skip_serializing_if = "Vec::is_empty")] pub disabled_alerts: Vec<String>, #[serde(rename = "emailAddresses", default, skip_serializing_if = "Vec::is_empty")] pub email_addresses: Vec<String>, #[serde(rename = "emailAccountAdmins", default, skip_serializing_if = "Option::is_none")] pub email_account_admins: Option<bool>, #[serde(rename = "storageEndpoint", default, skip_serializing_if = "Option::is_none")] pub storage_endpoint: Option<String>, #[serde(rename = "storageAccountAccessKey", default, skip_serializing_if = "Option::is_none")] pub storage_account_access_key: Option<String>, #[serde(rename = "retentionDays", default, skip_serializing_if = "Option::is_none")] pub retention_days: Option<i32>, #[serde(rename = "creationTime", default, skip_serializing_if = "Option::is_none")] pub creation_time: Option<String>, } pub mod server_security_alert_policy_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum State { New, Enabled, Disabled, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ServerSecurityAlertPolicy { #[serde(flatten)] pub proxy_resource: ProxyResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<ServerSecurityAlertPolicyProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ServerSecurityAlertPolicyListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<ServerSecurityAlertPolicy>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ServerVulnerabilityAssessmentProperties { #[serde(rename = "storageContainerPath")] pub storage_container_path: String, #[serde(rename = "storageContainerSasKey", default, skip_serializing_if = "Option::is_none")] pub storage_container_sas_key: Option<String>, #[serde(rename = "storageAccountAccessKey", default, skip_serializing_if = "Option::is_none")] pub storage_account_access_key: Option<String>, #[serde(rename = "recurringScans", default, skip_serializing_if = "Option::is_none")] pub recurring_scans: Option<VulnerabilityAssessmentRecurringScansProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ServerVulnerabilityAssessment { #[serde(flatten)] pub proxy_resource: ProxyResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<ServerVulnerabilityAssessmentProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ServerVulnerabilityAssessmentListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<ServerVulnerabilityAssessment>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EncryptionProtectorListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<EncryptionProtector>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EncryptionProtectorProperties { #[serde(default, skip_serializing_if = "Option::is_none")] pub subregion: Option<String>, #[serde(rename = "serverKeyName", default, skip_serializing_if = "Option::is_none")] pub server_key_name: Option<String>, #[serde(rename = "serverKeyType")] pub server_key_type: encryption_protector_properties::ServerKeyType, #[serde(default, skip_serializing_if = "Option::is_none")] pub uri: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub thumbprint: Option<String>, } pub mod encryption_protector_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ServerKeyType { ServiceManaged, AzureKeyVault, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EncryptionProtector { #[serde(flatten)] pub proxy_resource: ProxyResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub kind: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub location: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<EncryptionProtectorProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ServerUsage { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(rename = "resourceName", default, skip_serializing_if = "Option::is_none")] pub resource_name: Option<String>, #[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")] pub display_name: Option<String>, #[serde(rename = "currentValue", default, skip_serializing_if = "Option::is_none")] pub current_value: Option<f64>, #[serde(default, skip_serializing_if = "Option::is_none")] pub limit: Option<f64>, #[serde(default, skip_serializing_if = "Option::is_none")] pub unit: Option<String>, #[serde(rename = "nextResetTime", default, skip_serializing_if = "Option::is_none")] pub next_reset_time: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ServerUsageListResult { pub value: Vec<ServerUsage>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RecoverableSqlPoolProperties { #[serde(default, skip_serializing_if = "Option::is_none")] pub edition: Option<String>, #[serde(rename = "serviceLevelObjective", default, skip_serializing_if = "Option::is_none")] pub service_level_objective: Option<String>, #[serde(rename = "elasticPoolName", default, skip_serializing_if = "Option::is_none")] pub elastic_pool_name: Option<String>, #[serde(rename = "lastAvailableBackupDate", default, skip_serializing_if = "Option::is_none")] pub last_available_backup_date: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RecoverableSqlPool { #[serde(flatten)] pub proxy_resource: ProxyResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<RecoverableSqlPoolProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RecoverableSqlPoolListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<RecoverableSqlPool>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct KeyInfoListResult { #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<Key>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Key { #[serde(flatten)] pub proxy_resource: ProxyResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<KeyProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct KeyProperties { #[serde(rename = "isActiveCMK", default, skip_serializing_if = "Option::is_none")] pub is_active_cmk: Option<bool>, #[serde(rename = "keyVaultUrl", default, skip_serializing_if = "Option::is_none")] pub key_vault_url: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct LibraryListResponse { pub value: Vec<LibraryResource>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct LibraryResource { #[serde(flatten)] pub sub_resource: SubResource, pub properties: LibraryInfo, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ErrorResponse { #[serde(default, skip_serializing_if = "Option::is_none")] pub error: Option<ErrorDetail>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ErrorDetail { #[serde(default, skip_serializing_if = "Option::is_none")] pub code: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub message: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub target: Option<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub details: Vec<ErrorDetail>, #[serde(rename = "additionalInfo", default, skip_serializing_if = "Vec::is_empty")] pub additional_info: Vec<ErrorAdditionalInfo>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ErrorAdditionalInfo { #[serde(rename = "type", default, skip_serializing_if = "Option::is_none")] pub type_: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub info: Option<serde_json::Value>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct TrackedResource { #[serde(flatten)] pub resource: Resource, #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, pub location: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Resource { #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(rename = "type", default, skip_serializing_if = "Option::is_none")] pub type_: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ProxyResource { #[serde(flatten)] pub resource: Resource, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PrivateEndpointConnection { #[serde(flatten)] pub proxy_resource: ProxyResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<PrivateEndpointConnectionProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PrivateEndpointConnectionProperties { #[serde(rename = "privateEndpoint", default, skip_serializing_if = "Option::is_none")] pub private_endpoint: Option<PrivateEndpoint>, #[serde(rename = "privateLinkServiceConnectionState", default, skip_serializing_if = "Option::is_none")] pub private_link_service_connection_state: Option<PrivateLinkServiceConnectionState>, #[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")] pub provisioning_state: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PrivateEndpoint { #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PrivateLinkServiceConnectionState { #[serde(default, skip_serializing_if = "Option::is_none")] pub status: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub description: Option<String>, #[serde(rename = "actionsRequired", default, skip_serializing_if = "Option::is_none")] pub actions_required: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AzureEntityResource { #[serde(flatten)] pub resource: Resource, #[serde(default, skip_serializing_if = "Option::is_none")] pub etag: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PrivateEndpointConnectionList { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<PrivateEndpointConnection>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PrivateEndpointConnectionForPrivateLinkHubResourceCollectionResponse { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<PrivateEndpointConnectionForPrivateLinkHub>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PrivateEndpointConnectionForPrivateLinkHub { #[serde(flatten)] pub private_endpoint_connection_for_private_link_hub_basic: PrivateEndpointConnectionForPrivateLinkHubBasic, #[serde(flatten)] pub serde_json_value: serde_json::Value, }
45.837604
127
0.719986
61e47d08040d9373518484d9076bfe90bd87cd8f
1,420
extern crate peg; peg::parser!( grammar ra() for str { use peg::ParseLiteral; rule number() -> i64 = n:$(['0'..='9']+) { n.parse().unwrap() } rule commasep<T>(x: rule<T>) -> Vec<T> = v:(x() ** ",") ","? {v} rule bracketed<T>(x: rule<T>) -> T = "[" v:x() "]" {v} pub rule list() -> Vec<i64> = commasep(<number()>) pub rule array() -> Vec<i64> = bracketed(<commasep(<number()>)>) rule keyword(id: &'static str) = ##parse_string_literal(id) !['0'..='9' | 'a'..='z' | 'A'..='Z' | '_'] rule ident() = ['a'..='z']+ rule _() = [' ']* pub rule ifelse() = keyword("if") _ ident() _ keyword("then") _ ident() _ keyword("else") _ ident() pub rule repeated_a(i: usize) = ['a']*<{i}> rule i(literal: &'static str) = input:$([_]*<{literal.len()}>) {? if input.eq_ignore_ascii_case(literal) { Ok(()) } else { Err(literal) } } pub rule test_i() = i("foo") i("bar") }); use ra::*; fn main() { assert_eq!(list("1,2,3,4"), Ok(vec![1,2,3,4])); assert_eq!(array("[1,1,2,3,5,]"), Ok(vec![1,1,2,3,5])); assert!(ifelse("if foo then x else y").is_ok()); assert!(ifelse("iffoothenxelsey").is_err()); assert!(repeated_a("aa", 2).is_ok()); assert!(repeated_a("aaa", 2).is_err()); assert!(repeated_a("aaaaa", 5).is_ok()); assert!(test_i("fOoBaR").is_ok()); assert!(test_i("fOoBaZ").is_err()); assert!(test_i("fOoX").is_err()); }
33.023256
143
0.526056
d925c6dd3549aa5da8b01d7a80858bd1dccaf8d1
34,339
//! AST walker. Each overridden visit method has full control over what //! happens with its node, it can do its own traversal of the node's children, //! call `visit::walk_*` to apply the default traversal algorithm, or prevent //! deeper traversal by doing nothing. //! //! Note: it is an important invariant that the default visitor walks the body //! of a function in "execution order" (more concretely, reverse post-order //! with respect to the CFG implied by the AST), meaning that if AST node A may //! execute before AST node B, then A is visited first. The borrow checker in //! particular relies on this property. //! //! Note: walking an AST before macro expansion is probably a bad idea. For //! instance, a walker looking for item names in a module will miss all of //! those that are created by the expansion of a macro. use crate::ast::*; use crate::token; use rustc_span::symbol::{Ident, Symbol}; use rustc_span::Span; #[derive(Copy, Clone, Debug, PartialEq)] pub enum AssocCtxt { Trait, Impl, } #[derive(Copy, Clone, Debug, PartialEq)] pub enum FnCtxt { Free, Foreign, Assoc(AssocCtxt), } #[derive(Copy, Clone, Debug)] pub enum FnKind<'a> { /// E.g., `fn foo()`, `fn foo(&self)`, or `extern "Abi" fn foo()`. Fn(FnCtxt, Ident, &'a FnSig, &'a Visibility, &'a Generics, Option<&'a Block>), /// E.g., `|x, y| body`. Closure(&'a FnDecl, &'a Expr), } impl<'a> FnKind<'a> { pub fn header(&self) -> Option<&'a FnHeader> { match *self { FnKind::Fn(_, _, sig, _, _, _) => Some(&sig.header), FnKind::Closure(_, _) => None, } } pub fn ident(&self) -> Option<&Ident> { match self { FnKind::Fn(_, ident, ..) => Some(ident), _ => None, } } pub fn decl(&self) -> &'a FnDecl { match self { FnKind::Fn(_, _, sig, _, _, _) => &sig.decl, FnKind::Closure(decl, _) => decl, } } pub fn ctxt(&self) -> Option<FnCtxt> { match self { FnKind::Fn(ctxt, ..) => Some(*ctxt), FnKind::Closure(..) => None, } } } /// Each method of the `Visitor` trait is a hook to be potentially /// overridden. Each method's default implementation recursively visits /// the substructure of the input via the corresponding `walk` method; /// e.g., the `visit_item` method by default calls `visit::walk_item`. /// /// If you want to ensure that your code handles every variant /// explicitly, you need to override each method. (And you also need /// to monitor future changes to `Visitor` in case a new method with a /// new default implementation gets introduced.) pub trait Visitor<'ast>: Sized { fn visit_name(&mut self, _span: Span, _name: Symbol) { // Nothing to do. } fn visit_ident(&mut self, ident: Ident) { walk_ident(self, ident); } fn visit_foreign_item(&mut self, i: &'ast ForeignItem) { walk_foreign_item(self, i) } fn visit_item(&mut self, i: &'ast Item) { walk_item(self, i) } fn visit_local(&mut self, l: &'ast Local) { walk_local(self, l) } fn visit_block(&mut self, b: &'ast Block) { walk_block(self, b) } fn visit_stmt(&mut self, s: &'ast Stmt) { walk_stmt(self, s) } fn visit_param(&mut self, param: &'ast Param) { walk_param(self, param) } fn visit_arm(&mut self, a: &'ast Arm) { walk_arm(self, a) } fn visit_pat(&mut self, p: &'ast Pat) { walk_pat(self, p) } fn visit_anon_const(&mut self, c: &'ast AnonConst) { walk_anon_const(self, c) } fn visit_expr(&mut self, ex: &'ast Expr) { walk_expr(self, ex) } fn visit_expr_post(&mut self, _ex: &'ast Expr) {} fn visit_ty(&mut self, t: &'ast Ty) { walk_ty(self, t) } fn visit_generic_param(&mut self, param: &'ast GenericParam) { walk_generic_param(self, param) } fn visit_generics(&mut self, g: &'ast Generics) { walk_generics(self, g) } fn visit_where_predicate(&mut self, p: &'ast WherePredicate) { walk_where_predicate(self, p) } fn visit_fn(&mut self, fk: FnKind<'ast>, s: Span, _: NodeId) { walk_fn(self, fk, s) } fn visit_assoc_item(&mut self, i: &'ast AssocItem, ctxt: AssocCtxt) { walk_assoc_item(self, i, ctxt) } fn visit_trait_ref(&mut self, t: &'ast TraitRef) { walk_trait_ref(self, t) } fn visit_param_bound(&mut self, bounds: &'ast GenericBound) { walk_param_bound(self, bounds) } fn visit_poly_trait_ref(&mut self, t: &'ast PolyTraitRef, m: &'ast TraitBoundModifier) { walk_poly_trait_ref(self, t, m) } fn visit_variant_data(&mut self, s: &'ast VariantData) { walk_struct_def(self, s) } fn visit_field_def(&mut self, s: &'ast FieldDef) { walk_field_def(self, s) } fn visit_enum_def( &mut self, enum_definition: &'ast EnumDef, generics: &'ast Generics, item_id: NodeId, _: Span, ) { walk_enum_def(self, enum_definition, generics, item_id) } fn visit_variant(&mut self, v: &'ast Variant) { walk_variant(self, v) } fn visit_label(&mut self, label: &'ast Label) { walk_label(self, label) } fn visit_lifetime(&mut self, lifetime: &'ast Lifetime) { walk_lifetime(self, lifetime) } fn visit_mac_call(&mut self, mac: &'ast MacCall) { walk_mac(self, mac) } fn visit_mac_def(&mut self, _mac: &'ast MacroDef, _id: NodeId) { // Nothing to do } fn visit_path(&mut self, path: &'ast Path, _id: NodeId) { walk_path(self, path) } fn visit_use_tree(&mut self, use_tree: &'ast UseTree, id: NodeId, _nested: bool) { walk_use_tree(self, use_tree, id) } fn visit_path_segment(&mut self, path_span: Span, path_segment: &'ast PathSegment) { walk_path_segment(self, path_span, path_segment) } fn visit_generic_args(&mut self, path_span: Span, generic_args: &'ast GenericArgs) { walk_generic_args(self, path_span, generic_args) } fn visit_generic_arg(&mut self, generic_arg: &'ast GenericArg) { walk_generic_arg(self, generic_arg) } fn visit_assoc_constraint(&mut self, constraint: &'ast AssocConstraint) { walk_assoc_constraint(self, constraint) } fn visit_attribute(&mut self, attr: &'ast Attribute) { walk_attribute(self, attr) } fn visit_vis(&mut self, vis: &'ast Visibility) { walk_vis(self, vis) } fn visit_fn_ret_ty(&mut self, ret_ty: &'ast FnRetTy) { walk_fn_ret_ty(self, ret_ty) } fn visit_fn_header(&mut self, _header: &'ast FnHeader) { // Nothing to do } fn visit_expr_field(&mut self, f: &'ast ExprField) { walk_expr_field(self, f) } fn visit_pat_field(&mut self, fp: &'ast PatField) { walk_pat_field(self, fp) } fn visit_crate(&mut self, krate: &'ast Crate) { walk_crate(self, krate) } fn visit_inline_asm(&mut self, asm: &'ast InlineAsm) { walk_inline_asm(self, asm) } fn visit_inline_asm_sym(&mut self, sym: &'ast InlineAsmSym) { walk_inline_asm_sym(self, sym) } } #[macro_export] macro_rules! walk_list { ($visitor: expr, $method: ident, $list: expr) => { for elem in $list { $visitor.$method(elem) } }; ($visitor: expr, $method: ident, $list: expr, $($extra_args: expr),*) => { for elem in $list { $visitor.$method(elem, $($extra_args,)*) } } } pub fn walk_ident<'a, V: Visitor<'a>>(visitor: &mut V, ident: Ident) { visitor.visit_name(ident.span, ident.name); } pub fn walk_crate<'a, V: Visitor<'a>>(visitor: &mut V, krate: &'a Crate) { walk_list!(visitor, visit_item, &krate.items); walk_list!(visitor, visit_attribute, &krate.attrs); } pub fn walk_local<'a, V: Visitor<'a>>(visitor: &mut V, local: &'a Local) { for attr in local.attrs.iter() { visitor.visit_attribute(attr); } visitor.visit_pat(&local.pat); walk_list!(visitor, visit_ty, &local.ty); if let Some((init, els)) = local.kind.init_else_opt() { visitor.visit_expr(init); walk_list!(visitor, visit_block, els); } } pub fn walk_label<'a, V: Visitor<'a>>(visitor: &mut V, label: &'a Label) { visitor.visit_ident(label.ident); } pub fn walk_lifetime<'a, V: Visitor<'a>>(visitor: &mut V, lifetime: &'a Lifetime) { visitor.visit_ident(lifetime.ident); } pub fn walk_poly_trait_ref<'a, V>( visitor: &mut V, trait_ref: &'a PolyTraitRef, _: &TraitBoundModifier, ) where V: Visitor<'a>, { walk_list!(visitor, visit_generic_param, &trait_ref.bound_generic_params); visitor.visit_trait_ref(&trait_ref.trait_ref); } pub fn walk_trait_ref<'a, V: Visitor<'a>>(visitor: &mut V, trait_ref: &'a TraitRef) { visitor.visit_path(&trait_ref.path, trait_ref.ref_id) } pub fn walk_item<'a, V: Visitor<'a>>(visitor: &mut V, item: &'a Item) { visitor.visit_vis(&item.vis); visitor.visit_ident(item.ident); match item.kind { ItemKind::ExternCrate(orig_name) => { if let Some(orig_name) = orig_name { visitor.visit_name(item.span, orig_name); } } ItemKind::Use(ref use_tree) => visitor.visit_use_tree(use_tree, item.id, false), ItemKind::Static(ref typ, _, ref expr) | ItemKind::Const(_, ref typ, ref expr) => { visitor.visit_ty(typ); walk_list!(visitor, visit_expr, expr); } ItemKind::Fn(box Fn { defaultness: _, ref generics, ref sig, ref body }) => { let kind = FnKind::Fn(FnCtxt::Free, item.ident, sig, &item.vis, generics, body.as_deref()); visitor.visit_fn(kind, item.span, item.id) } ItemKind::Mod(_unsafety, ref mod_kind) => match mod_kind { ModKind::Loaded(items, _inline, _inner_span) => { walk_list!(visitor, visit_item, items) } ModKind::Unloaded => {} }, ItemKind::ForeignMod(ref foreign_module) => { walk_list!(visitor, visit_foreign_item, &foreign_module.items); } ItemKind::GlobalAsm(ref asm) => walk_inline_asm(visitor, asm), ItemKind::TyAlias(box TyAlias { ref generics, ref bounds, ref ty, .. }) => { visitor.visit_generics(generics); walk_list!(visitor, visit_param_bound, bounds); walk_list!(visitor, visit_ty, ty); } ItemKind::Enum(ref enum_definition, ref generics) => { visitor.visit_generics(generics); visitor.visit_enum_def(enum_definition, generics, item.id, item.span) } ItemKind::Impl(box Impl { defaultness: _, unsafety: _, ref generics, constness: _, polarity: _, ref of_trait, ref self_ty, ref items, }) => { visitor.visit_generics(generics); walk_list!(visitor, visit_trait_ref, of_trait); visitor.visit_ty(self_ty); walk_list!(visitor, visit_assoc_item, items, AssocCtxt::Impl); } ItemKind::Struct(ref struct_definition, ref generics) | ItemKind::Union(ref struct_definition, ref generics) => { visitor.visit_generics(generics); visitor.visit_variant_data(struct_definition); } ItemKind::Trait(box Trait { unsafety: _, is_auto: _, ref generics, ref bounds, ref items, }) => { visitor.visit_generics(generics); walk_list!(visitor, visit_param_bound, bounds); walk_list!(visitor, visit_assoc_item, items, AssocCtxt::Trait); } ItemKind::TraitAlias(ref generics, ref bounds) => { visitor.visit_generics(generics); walk_list!(visitor, visit_param_bound, bounds); } ItemKind::MacCall(ref mac) => visitor.visit_mac_call(mac), ItemKind::MacroDef(ref ts) => visitor.visit_mac_def(ts, item.id), } walk_list!(visitor, visit_attribute, &item.attrs); } pub fn walk_enum_def<'a, V: Visitor<'a>>( visitor: &mut V, enum_definition: &'a EnumDef, _: &'a Generics, _: NodeId, ) { walk_list!(visitor, visit_variant, &enum_definition.variants); } pub fn walk_variant<'a, V: Visitor<'a>>(visitor: &mut V, variant: &'a Variant) where V: Visitor<'a>, { visitor.visit_ident(variant.ident); visitor.visit_vis(&variant.vis); visitor.visit_variant_data(&variant.data); walk_list!(visitor, visit_anon_const, &variant.disr_expr); walk_list!(visitor, visit_attribute, &variant.attrs); } pub fn walk_expr_field<'a, V: Visitor<'a>>(visitor: &mut V, f: &'a ExprField) { visitor.visit_expr(&f.expr); visitor.visit_ident(f.ident); walk_list!(visitor, visit_attribute, f.attrs.iter()); } pub fn walk_pat_field<'a, V: Visitor<'a>>(visitor: &mut V, fp: &'a PatField) { visitor.visit_ident(fp.ident); visitor.visit_pat(&fp.pat); walk_list!(visitor, visit_attribute, fp.attrs.iter()); } pub fn walk_ty<'a, V: Visitor<'a>>(visitor: &mut V, typ: &'a Ty) { match typ.kind { TyKind::Slice(ref ty) | TyKind::Paren(ref ty) => visitor.visit_ty(ty), TyKind::Ptr(ref mutable_type) => visitor.visit_ty(&mutable_type.ty), TyKind::Rptr(ref opt_lifetime, ref mutable_type) => { walk_list!(visitor, visit_lifetime, opt_lifetime); visitor.visit_ty(&mutable_type.ty) } TyKind::Tup(ref tuple_element_types) => { walk_list!(visitor, visit_ty, tuple_element_types); } TyKind::BareFn(ref function_declaration) => { walk_list!(visitor, visit_generic_param, &function_declaration.generic_params); walk_fn_decl(visitor, &function_declaration.decl); } TyKind::Path(ref maybe_qself, ref path) => { if let Some(ref qself) = *maybe_qself { visitor.visit_ty(&qself.ty); } visitor.visit_path(path, typ.id); } TyKind::Array(ref ty, ref length) => { visitor.visit_ty(ty); visitor.visit_anon_const(length) } TyKind::TraitObject(ref bounds, ..) | TyKind::ImplTrait(_, ref bounds) => { walk_list!(visitor, visit_param_bound, bounds); } TyKind::Typeof(ref expression) => visitor.visit_anon_const(expression), TyKind::Infer | TyKind::ImplicitSelf | TyKind::Err => {} TyKind::MacCall(ref mac) => visitor.visit_mac_call(mac), TyKind::Never | TyKind::CVarArgs => {} } } pub fn walk_path<'a, V: Visitor<'a>>(visitor: &mut V, path: &'a Path) { for segment in &path.segments { visitor.visit_path_segment(path.span, segment); } } pub fn walk_use_tree<'a, V: Visitor<'a>>(visitor: &mut V, use_tree: &'a UseTree, id: NodeId) { visitor.visit_path(&use_tree.prefix, id); match use_tree.kind { UseTreeKind::Simple(rename, ..) => { // The extra IDs are handled during HIR lowering. if let Some(rename) = rename { visitor.visit_ident(rename); } } UseTreeKind::Glob => {} UseTreeKind::Nested(ref use_trees) => { for &(ref nested_tree, nested_id) in use_trees { visitor.visit_use_tree(nested_tree, nested_id, true); } } } } pub fn walk_path_segment<'a, V: Visitor<'a>>( visitor: &mut V, path_span: Span, segment: &'a PathSegment, ) { visitor.visit_ident(segment.ident); if let Some(ref args) = segment.args { visitor.visit_generic_args(path_span, args); } } pub fn walk_generic_args<'a, V>(visitor: &mut V, _path_span: Span, generic_args: &'a GenericArgs) where V: Visitor<'a>, { match *generic_args { GenericArgs::AngleBracketed(ref data) => { for arg in &data.args { match arg { AngleBracketedArg::Arg(a) => visitor.visit_generic_arg(a), AngleBracketedArg::Constraint(c) => visitor.visit_assoc_constraint(c), } } } GenericArgs::Parenthesized(ref data) => { walk_list!(visitor, visit_ty, &data.inputs); walk_fn_ret_ty(visitor, &data.output); } } } pub fn walk_generic_arg<'a, V>(visitor: &mut V, generic_arg: &'a GenericArg) where V: Visitor<'a>, { match generic_arg { GenericArg::Lifetime(lt) => visitor.visit_lifetime(lt), GenericArg::Type(ty) => visitor.visit_ty(ty), GenericArg::Const(ct) => visitor.visit_anon_const(ct), } } pub fn walk_assoc_constraint<'a, V: Visitor<'a>>(visitor: &mut V, constraint: &'a AssocConstraint) { visitor.visit_ident(constraint.ident); if let Some(ref gen_args) = constraint.gen_args { visitor.visit_generic_args(gen_args.span(), gen_args); } match constraint.kind { AssocConstraintKind::Equality { ref term } => match term { Term::Ty(ty) => visitor.visit_ty(ty), Term::Const(c) => visitor.visit_anon_const(c), }, AssocConstraintKind::Bound { ref bounds } => { walk_list!(visitor, visit_param_bound, bounds); } } } pub fn walk_pat<'a, V: Visitor<'a>>(visitor: &mut V, pattern: &'a Pat) { match pattern.kind { PatKind::TupleStruct(ref opt_qself, ref path, ref elems) => { if let Some(ref qself) = *opt_qself { visitor.visit_ty(&qself.ty); } visitor.visit_path(path, pattern.id); walk_list!(visitor, visit_pat, elems); } PatKind::Path(ref opt_qself, ref path) => { if let Some(ref qself) = *opt_qself { visitor.visit_ty(&qself.ty); } visitor.visit_path(path, pattern.id) } PatKind::Struct(ref opt_qself, ref path, ref fields, _) => { if let Some(ref qself) = *opt_qself { visitor.visit_ty(&qself.ty); } visitor.visit_path(path, pattern.id); walk_list!(visitor, visit_pat_field, fields); } PatKind::Box(ref subpattern) | PatKind::Ref(ref subpattern, _) | PatKind::Paren(ref subpattern) => visitor.visit_pat(subpattern), PatKind::Ident(_, ident, ref optional_subpattern) => { visitor.visit_ident(ident); walk_list!(visitor, visit_pat, optional_subpattern); } PatKind::Lit(ref expression) => visitor.visit_expr(expression), PatKind::Range(ref lower_bound, ref upper_bound, _) => { walk_list!(visitor, visit_expr, lower_bound); walk_list!(visitor, visit_expr, upper_bound); } PatKind::Wild | PatKind::Rest => {} PatKind::Tuple(ref elems) | PatKind::Slice(ref elems) | PatKind::Or(ref elems) => { walk_list!(visitor, visit_pat, elems); } PatKind::MacCall(ref mac) => visitor.visit_mac_call(mac), } } pub fn walk_foreign_item<'a, V: Visitor<'a>>(visitor: &mut V, item: &'a ForeignItem) { let Item { id, span, ident, ref vis, ref attrs, ref kind, tokens: _ } = *item; visitor.visit_vis(vis); visitor.visit_ident(ident); walk_list!(visitor, visit_attribute, attrs); match kind { ForeignItemKind::Static(ty, _, expr) => { visitor.visit_ty(ty); walk_list!(visitor, visit_expr, expr); } ForeignItemKind::Fn(box Fn { defaultness: _, ref generics, ref sig, ref body }) => { let kind = FnKind::Fn(FnCtxt::Foreign, ident, sig, vis, generics, body.as_deref()); visitor.visit_fn(kind, span, id); } ForeignItemKind::TyAlias(box TyAlias { generics, bounds, ty, .. }) => { visitor.visit_generics(generics); walk_list!(visitor, visit_param_bound, bounds); walk_list!(visitor, visit_ty, ty); } ForeignItemKind::MacCall(mac) => { visitor.visit_mac_call(mac); } } } pub fn walk_param_bound<'a, V: Visitor<'a>>(visitor: &mut V, bound: &'a GenericBound) { match *bound { GenericBound::Trait(ref typ, ref modifier) => visitor.visit_poly_trait_ref(typ, modifier), GenericBound::Outlives(ref lifetime) => visitor.visit_lifetime(lifetime), } } pub fn walk_generic_param<'a, V: Visitor<'a>>(visitor: &mut V, param: &'a GenericParam) { visitor.visit_ident(param.ident); walk_list!(visitor, visit_attribute, param.attrs.iter()); walk_list!(visitor, visit_param_bound, &param.bounds); match param.kind { GenericParamKind::Lifetime => (), GenericParamKind::Type { ref default } => walk_list!(visitor, visit_ty, default), GenericParamKind::Const { ref ty, ref default, .. } => { visitor.visit_ty(ty); if let Some(default) = default { visitor.visit_anon_const(default); } } } } pub fn walk_generics<'a, V: Visitor<'a>>(visitor: &mut V, generics: &'a Generics) { walk_list!(visitor, visit_generic_param, &generics.params); walk_list!(visitor, visit_where_predicate, &generics.where_clause.predicates); } pub fn walk_where_predicate<'a, V: Visitor<'a>>(visitor: &mut V, predicate: &'a WherePredicate) { match *predicate { WherePredicate::BoundPredicate(WhereBoundPredicate { ref bounded_ty, ref bounds, ref bound_generic_params, .. }) => { visitor.visit_ty(bounded_ty); walk_list!(visitor, visit_param_bound, bounds); walk_list!(visitor, visit_generic_param, bound_generic_params); } WherePredicate::RegionPredicate(WhereRegionPredicate { ref lifetime, ref bounds, .. }) => { visitor.visit_lifetime(lifetime); walk_list!(visitor, visit_param_bound, bounds); } WherePredicate::EqPredicate(WhereEqPredicate { ref lhs_ty, ref rhs_ty, .. }) => { visitor.visit_ty(lhs_ty); visitor.visit_ty(rhs_ty); } } } pub fn walk_fn_ret_ty<'a, V: Visitor<'a>>(visitor: &mut V, ret_ty: &'a FnRetTy) { if let FnRetTy::Ty(ref output_ty) = *ret_ty { visitor.visit_ty(output_ty) } } pub fn walk_fn_decl<'a, V: Visitor<'a>>(visitor: &mut V, function_declaration: &'a FnDecl) { for param in &function_declaration.inputs { visitor.visit_param(param); } visitor.visit_fn_ret_ty(&function_declaration.output); } pub fn walk_fn<'a, V: Visitor<'a>>(visitor: &mut V, kind: FnKind<'a>, _span: Span) { match kind { FnKind::Fn(_, _, sig, _, generics, body) => { visitor.visit_generics(generics); visitor.visit_fn_header(&sig.header); walk_fn_decl(visitor, &sig.decl); walk_list!(visitor, visit_block, body); } FnKind::Closure(decl, body) => { walk_fn_decl(visitor, decl); visitor.visit_expr(body); } } } pub fn walk_assoc_item<'a, V: Visitor<'a>>(visitor: &mut V, item: &'a AssocItem, ctxt: AssocCtxt) { let Item { id, span, ident, ref vis, ref attrs, ref kind, tokens: _ } = *item; visitor.visit_vis(vis); visitor.visit_ident(ident); walk_list!(visitor, visit_attribute, attrs); match kind { AssocItemKind::Const(_, ty, expr) => { visitor.visit_ty(ty); walk_list!(visitor, visit_expr, expr); } AssocItemKind::Fn(box Fn { defaultness: _, ref generics, ref sig, ref body }) => { let kind = FnKind::Fn(FnCtxt::Assoc(ctxt), ident, sig, vis, generics, body.as_deref()); visitor.visit_fn(kind, span, id); } AssocItemKind::TyAlias(box TyAlias { generics, bounds, ty, .. }) => { visitor.visit_generics(generics); walk_list!(visitor, visit_param_bound, bounds); walk_list!(visitor, visit_ty, ty); } AssocItemKind::MacCall(mac) => { visitor.visit_mac_call(mac); } } } pub fn walk_struct_def<'a, V: Visitor<'a>>(visitor: &mut V, struct_definition: &'a VariantData) { walk_list!(visitor, visit_field_def, struct_definition.fields()); } pub fn walk_field_def<'a, V: Visitor<'a>>(visitor: &mut V, field: &'a FieldDef) { visitor.visit_vis(&field.vis); if let Some(ident) = field.ident { visitor.visit_ident(ident); } visitor.visit_ty(&field.ty); walk_list!(visitor, visit_attribute, &field.attrs); } pub fn walk_block<'a, V: Visitor<'a>>(visitor: &mut V, block: &'a Block) { walk_list!(visitor, visit_stmt, &block.stmts); } pub fn walk_stmt<'a, V: Visitor<'a>>(visitor: &mut V, statement: &'a Stmt) { match statement.kind { StmtKind::Local(ref local) => visitor.visit_local(local), StmtKind::Item(ref item) => visitor.visit_item(item), StmtKind::Expr(ref expr) | StmtKind::Semi(ref expr) => visitor.visit_expr(expr), StmtKind::Empty => {} StmtKind::MacCall(ref mac) => { let MacCallStmt { ref mac, style: _, ref attrs, tokens: _ } = **mac; visitor.visit_mac_call(mac); for attr in attrs.iter() { visitor.visit_attribute(attr); } } } } pub fn walk_mac<'a, V: Visitor<'a>>(visitor: &mut V, mac: &'a MacCall) { visitor.visit_path(&mac.path, DUMMY_NODE_ID); } pub fn walk_anon_const<'a, V: Visitor<'a>>(visitor: &mut V, constant: &'a AnonConst) { visitor.visit_expr(&constant.value); } pub fn walk_inline_asm<'a, V: Visitor<'a>>(visitor: &mut V, asm: &'a InlineAsm) { for (op, _) in &asm.operands { match op { InlineAsmOperand::In { expr, .. } | InlineAsmOperand::Out { expr: Some(expr), .. } | InlineAsmOperand::InOut { expr, .. } => visitor.visit_expr(expr), InlineAsmOperand::Out { expr: None, .. } => {} InlineAsmOperand::SplitInOut { in_expr, out_expr, .. } => { visitor.visit_expr(in_expr); if let Some(out_expr) = out_expr { visitor.visit_expr(out_expr); } } InlineAsmOperand::Const { anon_const, .. } => visitor.visit_anon_const(anon_const), InlineAsmOperand::Sym { sym } => visitor.visit_inline_asm_sym(sym), } } } pub fn walk_inline_asm_sym<'a, V: Visitor<'a>>(visitor: &mut V, sym: &'a InlineAsmSym) { if let Some(ref qself) = sym.qself { visitor.visit_ty(&qself.ty); } visitor.visit_path(&sym.path, sym.id); } pub fn walk_expr<'a, V: Visitor<'a>>(visitor: &mut V, expression: &'a Expr) { walk_list!(visitor, visit_attribute, expression.attrs.iter()); match expression.kind { ExprKind::Box(ref subexpression) => visitor.visit_expr(subexpression), ExprKind::Array(ref subexpressions) => { walk_list!(visitor, visit_expr, subexpressions); } ExprKind::ConstBlock(ref anon_const) => visitor.visit_anon_const(anon_const), ExprKind::Repeat(ref element, ref count) => { visitor.visit_expr(element); visitor.visit_anon_const(count) } ExprKind::Struct(ref se) => { if let Some(ref qself) = se.qself { visitor.visit_ty(&qself.ty); } visitor.visit_path(&se.path, expression.id); walk_list!(visitor, visit_expr_field, &se.fields); match &se.rest { StructRest::Base(expr) => visitor.visit_expr(expr), StructRest::Rest(_span) => {} StructRest::None => {} } } ExprKind::Tup(ref subexpressions) => { walk_list!(visitor, visit_expr, subexpressions); } ExprKind::Call(ref callee_expression, ref arguments) => { visitor.visit_expr(callee_expression); walk_list!(visitor, visit_expr, arguments); } ExprKind::MethodCall(ref segment, ref arguments, _span) => { visitor.visit_path_segment(expression.span, segment); walk_list!(visitor, visit_expr, arguments); } ExprKind::Binary(_, ref left_expression, ref right_expression) => { visitor.visit_expr(left_expression); visitor.visit_expr(right_expression) } ExprKind::AddrOf(_, _, ref subexpression) | ExprKind::Unary(_, ref subexpression) => { visitor.visit_expr(subexpression) } ExprKind::Cast(ref subexpression, ref typ) | ExprKind::Type(ref subexpression, ref typ) => { visitor.visit_expr(subexpression); visitor.visit_ty(typ) } ExprKind::Let(ref pat, ref expr, _) => { visitor.visit_pat(pat); visitor.visit_expr(expr); } ExprKind::If(ref head_expression, ref if_block, ref optional_else) => { visitor.visit_expr(head_expression); visitor.visit_block(if_block); walk_list!(visitor, visit_expr, optional_else); } ExprKind::While(ref subexpression, ref block, ref opt_label) => { walk_list!(visitor, visit_label, opt_label); visitor.visit_expr(subexpression); visitor.visit_block(block); } ExprKind::ForLoop(ref pattern, ref subexpression, ref block, ref opt_label) => { walk_list!(visitor, visit_label, opt_label); visitor.visit_pat(pattern); visitor.visit_expr(subexpression); visitor.visit_block(block); } ExprKind::Loop(ref block, ref opt_label) => { walk_list!(visitor, visit_label, opt_label); visitor.visit_block(block); } ExprKind::Match(ref subexpression, ref arms) => { visitor.visit_expr(subexpression); walk_list!(visitor, visit_arm, arms); } ExprKind::Closure(_, _, _, ref decl, ref body, _decl_span) => { visitor.visit_fn(FnKind::Closure(decl, body), expression.span, expression.id) } ExprKind::Block(ref block, ref opt_label) => { walk_list!(visitor, visit_label, opt_label); visitor.visit_block(block); } ExprKind::Async(_, _, ref body) => { visitor.visit_block(body); } ExprKind::Await(ref expr) => visitor.visit_expr(expr), ExprKind::Assign(ref lhs, ref rhs, _) => { visitor.visit_expr(lhs); visitor.visit_expr(rhs); } ExprKind::AssignOp(_, ref left_expression, ref right_expression) => { visitor.visit_expr(left_expression); visitor.visit_expr(right_expression); } ExprKind::Field(ref subexpression, ident) => { visitor.visit_expr(subexpression); visitor.visit_ident(ident); } ExprKind::Index(ref main_expression, ref index_expression) => { visitor.visit_expr(main_expression); visitor.visit_expr(index_expression) } ExprKind::Range(ref start, ref end, _) => { walk_list!(visitor, visit_expr, start); walk_list!(visitor, visit_expr, end); } ExprKind::Underscore => {} ExprKind::Path(ref maybe_qself, ref path) => { if let Some(ref qself) = *maybe_qself { visitor.visit_ty(&qself.ty); } visitor.visit_path(path, expression.id) } ExprKind::Break(ref opt_label, ref opt_expr) => { walk_list!(visitor, visit_label, opt_label); walk_list!(visitor, visit_expr, opt_expr); } ExprKind::Continue(ref opt_label) => { walk_list!(visitor, visit_label, opt_label); } ExprKind::Ret(ref optional_expression) => { walk_list!(visitor, visit_expr, optional_expression); } ExprKind::MacCall(ref mac) => visitor.visit_mac_call(mac), ExprKind::Paren(ref subexpression) => visitor.visit_expr(subexpression), ExprKind::InlineAsm(ref asm) => walk_inline_asm(visitor, asm), ExprKind::Yield(ref optional_expression) => { walk_list!(visitor, visit_expr, optional_expression); } ExprKind::Try(ref subexpression) => visitor.visit_expr(subexpression), ExprKind::TryBlock(ref body) => visitor.visit_block(body), ExprKind::Lit(_) | ExprKind::Err => {} } visitor.visit_expr_post(expression) } pub fn walk_param<'a, V: Visitor<'a>>(visitor: &mut V, param: &'a Param) { walk_list!(visitor, visit_attribute, param.attrs.iter()); visitor.visit_pat(&param.pat); visitor.visit_ty(&param.ty); } pub fn walk_arm<'a, V: Visitor<'a>>(visitor: &mut V, arm: &'a Arm) { visitor.visit_pat(&arm.pat); walk_list!(visitor, visit_expr, &arm.guard); visitor.visit_expr(&arm.body); walk_list!(visitor, visit_attribute, &arm.attrs); } pub fn walk_vis<'a, V: Visitor<'a>>(visitor: &mut V, vis: &'a Visibility) { if let VisibilityKind::Restricted { ref path, id } = vis.kind { visitor.visit_path(path, id); } } pub fn walk_attribute<'a, V: Visitor<'a>>(visitor: &mut V, attr: &'a Attribute) { match attr.kind { AttrKind::Normal(ref item, ref _tokens) => walk_mac_args(visitor, &item.args), AttrKind::DocComment(..) => {} } } pub fn walk_mac_args<'a, V: Visitor<'a>>(visitor: &mut V, args: &'a MacArgs) { match args { MacArgs::Empty => {} MacArgs::Delimited(_dspan, _delim, _tokens) => {} // The value in `#[key = VALUE]` must be visited as an expression for backward // compatibility, so that macros can be expanded in that position. MacArgs::Eq(_eq_span, token) => match &token.kind { token::Interpolated(nt) => match &**nt { token::NtExpr(expr) => visitor.visit_expr(expr), t => panic!("unexpected token in key-value attribute: {:?}", t), }, t => panic!("unexpected token in key-value attribute: {:?}", t), }, } }
36.963402
100
0.59827
508724d9f475873cb0a383afa6c2a19a43634163
4,963
use git_commitgraph::{graph::Position as GraphPosition, Graph}; use git_object::{borrowed, owned}; use std::{ collections::{HashMap, HashSet}, convert::{TryFrom, TryInto}, hash::BuildHasher, io::{BufRead, Cursor}, path::{Path, PathBuf}, process::Command, }; type Result = std::result::Result<(), Box<dyn std::error::Error>>; mod access; pub fn check_common(cg: &Graph, expected: &HashMap<String, RefInfo, impl BuildHasher>) { assert_eq!( usize::try_from(cg.num_commits()).expect("an architecture able to hold 32 bits of integer"), expected.len() ); for ref_info in expected.values() { assert_eq!(cg.id_at(ref_info.pos()), ref_info.id(), "id_at({})", ref_info.pos()); assert_eq!( cg.lookup(ref_info.id()), Some(ref_info.pos()), "lookup({})", ref_info.id() ); let expected_parents: Vec<_> = ref_info .parent_ids() .into_iter() .map(|id| { expected .values() .find(|item| item.id() == id) .expect("find RefInfo by id") }) .collect(); let commit = cg.commit_at(ref_info.pos()); assert_eq!(commit.id(), ref_info.id()); assert_eq!(commit.root_tree_id(), ref_info.root_tree_id()); assert_eq!( commit.parent1().expect("failed to access commit's parent1"), expected_parents.iter().map(|x| x.pos()).next() ); assert_eq!( commit .iter_parents() .collect::<std::result::Result<Vec<_>, _>>() .expect("failed to access commit's parents"), expected_parents.iter().map(|x| x.pos()).collect::<Vec<_>>() ); } assert_eq!( cg.iter_ids().collect::<HashSet<_>>(), expected.values().map(|x| x.id()).collect::<HashSet<_>>() ); } pub fn create_repo(script_path: &str) -> tempfile::TempDir { let dir = tempfile::tempdir().expect("failed to create temp dir"); let status = Command::new("bash") .arg(fixture_path(script_path)) .arg(dir.path()) .env_remove("GIT_DIR") .status() .expect("failed to run repo script"); assert!(status.success(), "repo script failed"); dir } pub fn fixture_path(path: &str) -> PathBuf { PathBuf::from("tests").join("fixtures").join(path) } pub fn hex_to_id(hex: &[u8]) -> owned::Id { owned::Id::from_40_bytes_in_hex(hex).expect("40 bytes hex") } pub struct RefInfo { id: owned::Id, parent_ids: Vec<owned::Id>, pos: GraphPosition, root_tree_id: owned::Id, } impl RefInfo { pub fn id(&self) -> borrowed::Id { self.id.to_borrowed() } pub fn pos(&self) -> GraphPosition { self.pos } pub fn parent_ids(&self) -> impl IntoIterator<Item = borrowed::Id> { self.parent_ids.iter().map(|x| x.to_borrowed()) } pub fn root_tree_id(&self) -> borrowed::Id { self.root_tree_id.to_borrowed() } } pub fn inspect_refs(repo_dir: impl AsRef<Path>, refs: &[&'static str]) -> HashMap<String, RefInfo> { let output = Command::new("git") .arg("-C") .arg(repo_dir.as_ref()) .arg("show") .arg("--no-patch") .arg("--pretty=format:%S %H %T %P") .args(refs) .arg("--") .env_remove("GIT_DIR") .output() .expect("failed to execute `git show`"); // Output format: <refname> <id> <tree_id> <parent_ids> let mut infos: Vec<_> = Cursor::new(output.stdout) .lines() .map(|x| x.expect("failed to read `git show` output")) .map(|x| { let parts = x.trim_end().split(' ').collect::<Vec<_>>(); ( parts[0].to_string(), owned::Id::from_40_bytes_in_hex(parts[1].as_bytes()).expect("40 bytes hex"), owned::Id::from_40_bytes_in_hex(parts[2].as_bytes()).expect("40 bytes hex"), parts[3..] .iter() .map(|x| owned::Id::from_40_bytes_in_hex(x.as_bytes()).expect("40 bytes hex")) .collect(), ) }) .collect(); infos.sort_by_key(|x| x.1); let get_pos = |id: borrowed::Id| -> GraphPosition { let pos: u32 = infos .binary_search_by_key(&id, |x| x.1.to_borrowed()) .expect("sorted_ids to contain id") .try_into() .expect("graph position to fit in u32"); GraphPosition(pos) }; infos .iter() .cloned() .map(|(name, id, root_tree_id, parent_ids)| { ( name, RefInfo { id, parent_ids, root_tree_id, pos: get_pos(id.to_borrowed()), }, ) }) .collect() }
30.262195
100
0.52166
deadf15a87fc671af2765369fc24c8525ca0bb89
4,881
use anyhow::Context; use super::*; use crate::ok_or_shutdown; use crate::state_helper::{pause_on_failure, save_state}; impl TaskHandler { /// Check whether there are any finished processes /// In case there are, handle them and update the shared state pub fn handle_finished_tasks(&mut self) { let finished = self.get_finished(); // Nothing to do. Early return if finished.is_empty() { return; } // Clone the state ref, so we don't have two mutable borrows later on. let state_ref = self.state.clone(); let mut state = state_ref.lock().unwrap(); for ((task_id, group, worker_id), error) in finished.iter() { // Handle std::io errors on child processes. // I have never seen something like this, but it might happen. if let Some(error) = error { let (_taks_id, _child) = self .children .0 .get_mut(group) .expect("Worker group must exist when handling finished tasks.") .remove(worker_id) .expect("Errored child went missing while handling finished task."); let group = { let mut task = state.tasks.get_mut(task_id).unwrap(); task.status = TaskStatus::Done(TaskResult::Errored); task.end = Some(Local::now()); self.spawn_callback(task); task.group.clone() }; error!("Child {} failed with io::Error: {:?}", task_id, error); pause_on_failure(&mut state, group); continue; } // Handle any tasks that exited with some kind of exit code let (_task_id, mut child) = self .children .0 .get_mut(group) .expect("Worker group must exist when handling finished tasks.") .remove(worker_id) .expect("Child of task {} went away while handling finished task."); // Get the exit code of the child. // Errors really shouldn't happen in here, since we already checked if it's finished // with try_wait() before. let exit_code_result = child.wait(); let exit_code = exit_code_result .context(format!( "Failed on wait() for finished task {} with error: {:?}", task_id, error )) .unwrap() .code(); // Processes with exit code 0 exited successfully // Processes with `None` have been killed by a Signal let result = match exit_code { Some(0) => TaskResult::Success, Some(exit_code) => TaskResult::Failed(exit_code), None => TaskResult::Killed, }; // Update all properties on the task and get the group for later let group = { let mut task = state .tasks .get_mut(task_id) .expect("Task was removed before child process has finished!"); task.status = TaskStatus::Done(result.clone()); task.end = Some(Local::now()); self.spawn_callback(task); task.group.clone() }; if let TaskResult::Failed(_) = result { pause_on_failure(&mut state, group); } // Already remove the output files, if the daemon is being reset anyway if self.full_reset { clean_log_handles(*task_id, &self.pueue_directory); } } ok_or_shutdown!(self, save_state(&state)); } /// Gather all finished tasks and sort them by finished and errored. /// Returns a list of finished task ids and whether they errored or not. fn get_finished(&mut self) -> Vec<((usize, String, usize), Option<std::io::Error>)> { let mut finished = Vec::new(); for (group, children) in self.children.0.iter_mut() { for (worker_id, (task_id, child)) in children.iter_mut() { match child.try_wait() { // Handle a child error. Err(error) => { finished.push(((*task_id, group.clone(), *worker_id), Some(error))); } // Child process did not exit yet Ok(None) => continue, Ok(_exit_status) => { info!("Task {} just finished", task_id); finished.push(((*task_id, group.clone(), *worker_id), None)); } } } } finished } }
37.837209
96
0.507478
26fc520fd38c9bcd6ddedd9caf0c4a2a732efa9b
3,378
use anyhow::Result; use ckb_types::{ bytes::Bytes, prelude::{Builder, Entity}, }; use gw_config::BlockProducerConfig; use gw_rpc_client::rpc_client::RPCClient; use gw_types::{ core::{DepType, ScriptHashType}, offchain::{CellInfo, InputCellInfo, RollupContext}, packed::{CellDep, CellInput, CellOutput, L2Block, Script, StakeLockArgs}, prelude::{Pack, Unpack}, }; pub struct GeneratedStake { pub deps: Vec<CellDep>, pub inputs: Vec<InputCellInfo>, pub output: CellOutput, pub output_data: Bytes, } pub async fn generate( rollup_cell: &CellInfo, rollup_context: &RollupContext, block: &L2Block, block_producer_config: &BlockProducerConfig, rpc_client: &RPCClient, lock_script: Script, ) -> Result<GeneratedStake> { let owner_lock_hash = lock_script.hash(); let lock_args: Bytes = { let stake_lock_args = StakeLockArgs::new_builder() .owner_lock_hash(owner_lock_hash.pack()) .stake_block_number(block.raw().number()) .build(); let rollup_type_hash = rollup_context.rollup_script_hash.as_slice().iter(); rollup_type_hash .chain(stake_lock_args.as_slice().iter()) .cloned() .collect() }; let lock = Script::new_builder() .code_hash(rollup_context.rollup_config.stake_script_type_hash()) .hash_type(ScriptHashType::Type.into()) .args(lock_args.pack()) .build(); if let Some(unlocked_stake) = rpc_client .query_stake(rollup_context, owner_lock_hash, None) .await? { let stake_lock_dep = block_producer_config.stake_cell_lock_dep.clone(); let rollup_cell_dep = CellDep::new_builder() .out_point(rollup_cell.out_point.to_owned()) .dep_type(DepType::Code.into()) .build(); let stake_cell = CellOutput::new_builder() .capacity(unlocked_stake.output.capacity()) .lock(lock) .build(); let input_unlocked_stake = InputCellInfo { input: CellInput::new_builder() .previous_output(unlocked_stake.out_point.clone()) .build(), cell: unlocked_stake, }; let generated_stake = GeneratedStake { deps: vec![stake_lock_dep.into(), rollup_cell_dep], inputs: vec![input_unlocked_stake], output: stake_cell, output_data: Bytes::new(), }; return Ok(generated_stake); } // No unlocked stake, collect free ckb cells to generate one let stake_capacity = { let required_staking_capacity = rollup_context .rollup_config .required_staking_capacity() .unpack(); assert!(lock.as_slice().len() < u64::max_value() as usize); let min_capacity = (8u64 + lock.as_slice().len() as u64) * 100000000; if required_staking_capacity <= min_capacity { min_capacity } else { required_staking_capacity } }; let stake_cell = CellOutput::new_builder() .capacity(stake_capacity.pack()) .lock(lock) .build(); let generated_stake = GeneratedStake { deps: vec![], inputs: vec![], output: stake_cell, output_data: Bytes::new(), }; Ok(generated_stake) }
29.893805
83
0.615157
2f8ae61f885bc009a0b4d9582a2c62313ee2aad7
10,983
use super::highlighters::container_code; use yew::prelude::*; use yew_prism::Prism; use yew_styles::layouts::{ container::{AlignContent, AlignItems, Container, Direction, JustifyContent, Mode, Wrap}, item::{AlignSelf, Item, ItemLayout}, }; pub struct LayoutsPage; pub struct LayoutsPageModel; impl Component for LayoutsPage { type Message = (); type Properties = (); fn create(_props: Self::Properties, _link: ComponentLink<Self>) -> Self { LayoutsPage {} } fn update(&mut self, _: Self::Message) -> ShouldRender { false } fn change(&mut self, _props: Self::Properties) -> ShouldRender { false } fn view(&self) -> Html { html! { <> <h1>{"Layouts Components"}</h1> <h2>{"Features required"}</h2> <span><code>{"layouts"}</code></span> <h2>{"Code example"}</h2> <Prism code=container_code() language="rust" /> <h2>{"Container properties"}</h2> <ul> <li><b>{"direction: "}</b>{"which direction are placing the items. Options included in "}<code>{"Direction"}</code>{". Required."}</li> <li><b>{"wrap: "}</b>{"set a wrap for the items. Options included in "}<code>{"Wrap"}</code>{". Default "}<code>{"Wrap"}</code>{"."}</li> <li><b>{"justify_content: "}</b>{"set how will be justified the content. Options included in "}<code>{"JustifyContent"}</code>{". Default "}<code>{"FlexStart(No Mode)"}</code>{"."}</li> <li><b>{"align_content: "}</b>{"set how will be aligned the content. Options included in "}<code>{"AlignContent"}</code>{". Default "}<code>{"Stretch(NoMode)"}</code>{"."}</li> <li><b>{"align_items: "}</b>{"set how will be aligned the items. Options included in "}<code>{"AlignItems"}</code>{". Default "}<code>{"Stretch(NoMode)"}</code>{"."}</li> <li><b>{"mode: "}</b>{"safe postion handler which is additional option for justify_content, align_content and align_items. Options included in "}<code>{"Mode"}</code>{". Default "}<code>{"NoMode"}</code>{"."}</li> <li><b>{"key: "}</b>{"general property to add keys."}</li> <li><b>{"code_ref: "}</b>{"general property to get the ref of the component."}</li> <li><b>{"id: "}</b>{"general property to add custom id"}</li> <li><b>{"class_name: "}</b>{"general property to add custom class styles"}</li> </ul> <h2>{"Item properties"}</h2> <ul> <li><b>{"layouts: "}</b>{"percent of the layout that will take the item. The value is a vector "}<code>{"Vec<ItemLayout>"}</code>{". Required"}</li> <li><b>{"align_self: "}</b>{"align the item itself. Options include in "}<code>{"AlignSelf"}</code>{". Default "}<code>{"Auto"}</code></li> <li><b>{"onclick_signal: "}</b>{"click event for the item. Default "}<code>{"noop()"}</code></li> <li><b>{"key: "}</b>{"general property to add keys."}</li> <li><b>{"code_ref: "}</b>{"general property to get the ref of the component."}</li> <li><b>{"id: "}</b>{"general property to add custom id"}</li> <li><b>{"class_name: "}</b>{"general property to add custom class styles"}</li> </ul> <h2>{"Visual examples"}</h2> <h3>{"Wrap"}</h3> <Container direction=Direction::Row wrap=Wrap::Wrap> {(1..13).map(|x| LayoutsPageModel.get_items(x)).collect::<Html>()} </Container> <h3>{"No wrap"}</h3> <Container direction=Direction::Row wrap=Wrap::Nowrap> {(1..13).map(|x| LayoutsPageModel.get_items(x)).collect::<Html>()} </Container> <h3>{"Wrap reverse"}</h3> <Container direction=Direction::Row wrap=Wrap::WrapReverse> {(1..13).map(|x| LayoutsPageModel.get_items(x)).collect::<Html>()} </Container> <h3>{"Row direction:"}</h3> <Container direction=Direction::Row wrap=Wrap::Wrap> {(1..5).map(|x| LayoutsPageModel.get_items(x)).collect::<Html>()} </Container> <h3>{"Row reverse direction:"}</h3> <Container direction=Direction::RowReverse wrap=Wrap::Wrap> {(1..5).map(|x| LayoutsPageModel.get_items(x)).collect::<Html>()} </Container> <h3>{"Column direction:"}</h3> <Container direction=Direction::Column wrap=Wrap::Wrap> {(1..5).map(|x| LayoutsPageModel.get_items(x)).collect::<Html>()} </Container> <h3>{"Column reverse direction:"}</h3> <Container direction=Direction::ColumnReverse wrap=Wrap::Wrap> {(1..5).map(|x| LayoutsPageModel.get_items(x)).collect::<Html>()} </Container> <h3>{"Combination of column and row direction"}</h3> <Container direction=Direction::Row wrap=Wrap::Wrap> <Item layouts=vec!(ItemLayout::ItXs(6)) > <Container direction=Direction::Column wrap=Wrap::Wrap> {(1..5).map(|x| LayoutsPageModel.get_items(x)).collect::<Html>()} </Container> </Item> <Item layouts=vec!(ItemLayout::ItXs(6)) > <Container direction=Direction::Row wrap=Wrap::Wrap> {(1..5).map(|x| LayoutsPageModel.get_items(x)).collect::<Html>()} </Container> </Item> </Container> <h3>{"Justify Content"}</h3> <Container direction=Direction::Row wrap=Wrap::Wrap justify_content=JustifyContent::Rigth(Mode::NoMode) > <Item layouts=vec!(ItemLayout::ItXs(4)) > <Container direction=Direction::Column wrap=Wrap::Wrap> {(1..5).map(|x| LayoutsPageModel.get_items(x)).collect::<Html>()} </Container> </Item> <Item layouts=vec!(ItemLayout::ItXs(4)) > <Container direction=Direction::Column wrap=Wrap::Wrap> {(1..5).map(|x| LayoutsPageModel.get_items(x)).collect::<Html>()} </Container> </Item> </Container> <p>{"To know about more options please visit "} <a href="https://developer.mozilla.org/en-US/docs/Web/CSS/justify-content" target="_bank">{"Justify Content"}</a> </p> <h3>{"Align Content"}</h3> <Container direction=Direction::Row wrap=Wrap::Wrap align_content=AlignContent::FlexEnd(Mode::NoMode) class_name="align" > <Item layouts=vec!(ItemLayout::ItXs(6)) > <Container direction=Direction::Column wrap=Wrap::Wrap> {(1..5).map(|x| LayoutsPageModel.get_items(x)).collect::<Html>()} </Container> </Item> <Item layouts=vec!(ItemLayout::ItXs(6)) > <Container direction=Direction::Column wrap=Wrap::Wrap> {(1..5).map(|x| LayoutsPageModel.get_items(x)).collect::<Html>()} </Container> </Item> </Container> <p>{"To know about more options please visit "} <a href="https://developer.mozilla.org/en-US/docs/Web/CSS/align-content" target="_bank">{"Align Content"}</a> </p> <h3>{"Align Items"}</h3> <Container direction=Direction::Row wrap=Wrap::Wrap> <Item layouts=vec!(ItemLayout::ItXs(6)) > <Container direction=Direction::Column wrap=Wrap::Wrap align_items=AlignItems::Center(Mode::NoMode) > {(1..5).map(|x| LayoutsPageModel.get_items(x)).collect::<Html>()} </Container> </Item> <Item layouts=vec!(ItemLayout::ItXs(6)) > <Container direction=Direction::Column wrap=Wrap::Wrap align_items=AlignItems::FlexEnd(Mode::NoMode) > {(1..5).map(|x| LayoutsPageModel.get_items(x)).collect::<Html>()} </Container> </Item> </Container> <p>{"To know about more options please visit "} <a href="https://developer.mozilla.org/en-US/docs/Web/CSS/align-items" target="_bank">{"Align Items"}</a> </p> <h3>{"Align self"}</h3> <Container direction=Direction::Row wrap=Wrap::Wrap class_name="align-item"> <Item layouts=vec!(ItemLayout::ItXs(4)) align_self=AlignSelf::FlexStart> <h3>{"start"}</h3> </Item> <Item layouts=vec!(ItemLayout::ItXs(4)) align_self=AlignSelf::Center> <h3>{"center"}</h3> </Item> <Item layouts=vec!(ItemLayout::ItXs(4)) align_self=AlignSelf::FlexEnd> <h3>{"end"}</h3> </Item> </Container> <p>{"To know about more options please visit "} <a href="https://developer.mozilla.org/en-US/docs/Web/CSS/align-self" target="_bank">{"Align Self"}</a> </p> </> } } } impl LayoutsPageModel { fn get_items(self, number: i8) -> Html { html! { <Item layouts=vec!(ItemLayout::ItXl(3), ItemLayout::ItL(3), ItemLayout::ItM(6), ItemLayout::ItXs(12)) > <h3>{number}</h3> </Item> } } }
49.472973
233
0.461623
3a304c629293062dfbb5d8eb313c12889c6ff72b
58,122
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! A UTF-8 encoded, growable string. //! //! This module contains the [`String`] type, a trait for converting //! [`ToString`]s, and several error types that may result from working with //! [`String`]s. //! //! [`String`]: struct.String.html //! [`ToString`]: trait.ToString.html //! //! # Examples //! //! There are multiple ways to create a new `String` from a string literal: //! //! ```rust //! let s = "Hello".to_string(); //! //! let s = String::from("world"); //! let s: String = "also this".into(); //! ``` //! //! You can create a new `String` from an existing one by concatenating with //! `+`: //! //! ```rust //! let s = "Hello".to_string(); //! //! let message = s + " world!"; //! ``` //! //! If you have a vector of valid UTF-8 bytes, you can make a `String` out of //! it. You can do the reverse too. //! //! ```rust //! let sparkle_heart = vec![240, 159, 146, 150]; //! //! // We know these bytes are valid, so we'll use `unwrap()`. //! let sparkle_heart = String::from_utf8(sparkle_heart).unwrap(); //! //! assert_eq!("💖", sparkle_heart); //! //! let bytes = sparkle_heart.into_bytes(); //! //! assert_eq!(bytes, [240, 159, 146, 150]); //! ``` #![stable(feature = "rust1", since = "1.0.0")] use core::fmt; use core::hash; use core::iter::{FromIterator, FusedIterator}; use core::mem; use core::ops::{self, Add, AddAssign, Index, IndexMut}; use core::ptr; use core::str::pattern::Pattern; use rustc_unicode::char::{decode_utf16, REPLACEMENT_CHARACTER}; use rustc_unicode::str as unicode_str; use borrow::{Cow, ToOwned}; use range::RangeArgument; use str::{self, FromStr, Utf8Error, Chars}; use vec::Vec; use boxed::Box; /// A UTF-8 encoded, growable string. /// /// The `String` type is the most common string type that has ownership over the /// contents of the string. It has a close relationship with its borrowed /// counterpart, the primitive [`str`]. /// /// [`str`]: ../../std/primitive.str.html /// /// # Examples /// /// You can create a `String` from a literal string with `String::from`: /// /// ``` /// let hello = String::from("Hello, world!"); /// ``` /// /// You can append a [`char`] to a `String` with the [`push()`] method, and /// append a [`&str`] with the [`push_str()`] method: /// /// ``` /// let mut hello = String::from("Hello, "); /// /// hello.push('w'); /// hello.push_str("orld!"); /// ``` /// /// [`char`]: ../../std/primitive.char.html /// [`push()`]: #method.push /// [`push_str()`]: #method.push_str /// /// If you have a vector of UTF-8 bytes, you can create a `String` from it with /// the [`from_utf8()`] method: /// /// ``` /// // some bytes, in a vector /// let sparkle_heart = vec![240, 159, 146, 150]; /// /// // We know these bytes are valid, so we'll use `unwrap()`. /// let sparkle_heart = String::from_utf8(sparkle_heart).unwrap(); /// /// assert_eq!("💖", sparkle_heart); /// ``` /// /// [`from_utf8()`]: #method.from_utf8 /// /// # UTF-8 /// /// `String`s are always valid UTF-8. This has a few implications, the first of /// which is that if you need a non-UTF-8 string, consider [`OsString`]. It is /// similar, but without the UTF-8 constraint. The second implication is that /// you cannot index into a `String`: /// /// ```ignore /// let s = "hello"; /// /// println!("The first letter of s is {}", s[0]); // ERROR!!! /// ``` /// /// [`OsString`]: ../../std/ffi/struct.OsString.html /// /// Indexing is intended to be a constant-time operation, but UTF-8 encoding /// does not allow us to do this. Furthermore, it's not clear what sort of /// thing the index should return: a byte, a codepoint, or a grapheme cluster. /// The [`as_bytes()`] and [`chars()`] methods return iterators over the first /// two, respectively. /// /// [`as_bytes()`]: #method.as_bytes /// [`chars()`]: #method.chars /// /// # Deref /// /// `String`s implement [`Deref`]`<Target=str>`, and so inherit all of [`str`]'s /// methods. In addition, this means that you can pass a `String` to any /// function which takes a [`&str`] by using an ampersand (`&`): /// /// ``` /// fn takes_str(s: &str) { } /// /// let s = String::from("Hello"); /// /// takes_str(&s); /// ``` /// /// [`&str`]: ../../std/primitive.str.html /// [`Deref`]: ../../std/ops/trait.Deref.html /// /// This will create a [`&str`] from the `String` and pass it in. This /// conversion is very inexpensive, and so generally, functions will accept /// [`&str`]s as arguments unless they need a `String` for some specific reason. /// /// /// # Representation /// /// A `String` is made up of three components: a pointer to some bytes, a /// length, and a capacity. The pointer points to an internal buffer `String` /// uses to store its data. The length is the number of bytes currently stored /// in the buffer, and the capacity is the size of the buffer in bytes. As such, /// the length will always be less than or equal to the capacity. /// /// This buffer is always stored on the heap. /// /// You can look at these with the [`as_ptr()`], [`len()`], and [`capacity()`] /// methods: /// /// ``` /// use std::mem; /// /// let story = String::from("Once upon a time..."); /// /// let ptr = story.as_ptr(); /// let len = story.len(); /// let capacity = story.capacity(); /// /// // story has nineteen bytes /// assert_eq!(19, len); /// /// // Now that we have our parts, we throw the story away. /// mem::forget(story); /// /// // We can re-build a String out of ptr, len, and capacity. This is all /// // unsafe because we are responsible for making sure the components are /// // valid: /// let s = unsafe { String::from_raw_parts(ptr as *mut _, len, capacity) } ; /// /// assert_eq!(String::from("Once upon a time..."), s); /// ``` /// /// [`as_ptr()`]: #method.as_ptr /// [`len()`]: #method.len /// [`capacity()`]: #method.capacity /// /// If a `String` has enough capacity, adding elements to it will not /// re-allocate. For example, consider this program: /// /// ``` /// let mut s = String::new(); /// /// println!("{}", s.capacity()); /// /// for _ in 0..5 { /// s.push_str("hello"); /// println!("{}", s.capacity()); /// } /// ``` /// /// This will output the following: /// /// ```text /// 0 /// 5 /// 10 /// 20 /// 20 /// 40 /// ``` /// /// At first, we have no memory allocated at all, but as we append to the /// string, it increases its capacity appropriately. If we instead use the /// [`with_capacity()`] method to allocate the correct capacity initially: /// /// ``` /// let mut s = String::with_capacity(25); /// /// println!("{}", s.capacity()); /// /// for _ in 0..5 { /// s.push_str("hello"); /// println!("{}", s.capacity()); /// } /// ``` /// /// [`with_capacity()`]: #method.with_capacity /// /// We end up with a different output: /// /// ```text /// 25 /// 25 /// 25 /// 25 /// 25 /// 25 /// ``` /// /// Here, there's no need to allocate more memory inside the loop. #[derive(PartialOrd, Eq, Ord)] #[stable(feature = "rust1", since = "1.0.0")] pub struct String { vec: Vec<u8>, } /// A possible error value when converting a `String` from a UTF-8 byte vector. /// /// This type is the error type for the [`from_utf8()`] method on [`String`]. It /// is designed in such a way to carefully avoid reallocations: the /// [`into_bytes()`] method will give back the byte vector that was used in the /// conversion attempt. /// /// [`from_utf8()`]: struct.String.html#method.from_utf8 /// [`String`]: struct.String.html /// [`into_bytes()`]: struct.FromUtf8Error.html#method.into_bytes /// /// The [`Utf8Error`] type provided by [`std::str`] represents an error that may /// occur when converting a slice of [`u8`]s to a [`&str`]. In this sense, it's /// an analogue to `FromUtf8Error`, and you can get one from a `FromUtf8Error` /// through the [`utf8_error()`] method. /// /// [`Utf8Error`]: ../../std/str/struct.Utf8Error.html /// [`std::str`]: ../../std/str/index.html /// [`u8`]: ../../std/primitive.u8.html /// [`&str`]: ../../std/primitive.str.html /// [`utf8_error()`]: #method.utf8_error /// /// # Examples /// /// Basic usage: /// /// ``` /// // some invalid bytes, in a vector /// let bytes = vec![0, 159]; /// /// let value = String::from_utf8(bytes); /// /// assert!(value.is_err()); /// assert_eq!(vec![0, 159], value.unwrap_err().into_bytes()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[derive(Debug)] pub struct FromUtf8Error { bytes: Vec<u8>, error: Utf8Error, } /// A possible error value when converting a `String` from a UTF-16 byte slice. /// /// This type is the error type for the [`from_utf16()`] method on [`String`]. /// /// [`from_utf16()`]: struct.String.html#method.from_utf16 /// [`String`]: struct.String.html /// /// # Examples /// /// Basic usage: /// /// ``` /// // 𝄞mu<invalid>ic /// let v = &[0xD834, 0xDD1E, 0x006d, 0x0075, /// 0xD800, 0x0069, 0x0063]; /// /// assert!(String::from_utf16(v).is_err()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[derive(Debug)] pub struct FromUtf16Error(()); impl String { /// Creates a new empty `String`. /// /// Given that the `String` is empty, this will not allocate any initial /// buffer. While that means that this initial operation is very /// inexpensive, but may cause excessive allocation later, when you add /// data. If you have an idea of how much data the `String` will hold, /// consider the [`with_capacity()`] method to prevent excessive /// re-allocation. /// /// [`with_capacity()`]: #method.with_capacity /// /// # Examples /// /// Basic usage: /// /// ``` /// let s = String::new(); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn new() -> String { String { vec: Vec::new() } } /// Creates a new empty `String` with a particular capacity. /// /// `String`s have an internal buffer to hold their data. The capacity is /// the length of that buffer, and can be queried with the [`capacity()`] /// method. This method creates an empty `String`, but one with an initial /// buffer that can hold `capacity` bytes. This is useful when you may be /// appending a bunch of data to the `String`, reducing the number of /// reallocations it needs to do. /// /// [`capacity()`]: #method.capacity /// /// If the given capacity is `0`, no allocation will occur, and this method /// is identical to the [`new()`] method. /// /// [`new()`]: #method.new /// /// # Examples /// /// Basic usage: /// /// ``` /// let mut s = String::with_capacity(10); /// /// // The String contains no chars, even though it has capacity for more /// assert_eq!(s.len(), 0); /// /// // These are all done without reallocating... /// let cap = s.capacity(); /// for i in 0..10 { /// s.push('a'); /// } /// /// assert_eq!(s.capacity(), cap); /// /// // ...but this may make the vector reallocate /// s.push('a'); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn with_capacity(capacity: usize) -> String { String { vec: Vec::with_capacity(capacity) } } // HACK(japaric): with cfg(test) the inherent `[T]::to_vec` method, which is // required for this method definition, is not available. Since we don't // require this method for testing purposes, I'll just stub it // NB see the slice::hack module in slice.rs for more information #[inline] #[cfg(test)] pub fn from_str(_: &str) -> String { panic!("not available with cfg(test)"); } /// Converts a vector of bytes to a `String`. /// /// A string slice ([`&str`]) is made of bytes ([`u8`]), and a vector of bytes /// ([`Vec<u8>`]) is made of bytes, so this function converts between the /// two. Not all byte slices are valid `String`s, however: `String` /// requires that it is valid UTF-8. `from_utf8()` checks to ensure that /// the bytes are valid UTF-8, and then does the conversion. /// /// [`&str`]: ../../std/primitive.str.html /// [`u8`]: ../../std/primitive.u8.html /// [`Vec<u8>`]: ../../std/vec/struct.Vec.html /// /// If you are sure that the byte slice is valid UTF-8, and you don't want /// to incur the overhead of the validity check, there is an unsafe version /// of this function, [`from_utf8_unchecked()`], which has the same behavior /// but skips the check. /// /// [`from_utf8_unchecked()`]: struct.String.html#method.from_utf8_unchecked /// /// This method will take care to not copy the vector, for efficiency's /// sake. /// /// If you need a `&str` instead of a `String`, consider /// [`str::from_utf8()`]. /// /// [`str::from_utf8()`]: ../../std/str/fn.from_utf8.html /// /// # Errors /// /// Returns `Err` if the slice is not UTF-8 with a description as to why the /// provided bytes are not UTF-8. The vector you moved in is also included. /// /// # Examples /// /// Basic usage: /// /// ``` /// // some bytes, in a vector /// let sparkle_heart = vec![240, 159, 146, 150]; /// /// // We know these bytes are valid, so we'll use `unwrap()`. /// let sparkle_heart = String::from_utf8(sparkle_heart).unwrap(); /// /// assert_eq!("💖", sparkle_heart); /// ``` /// /// Incorrect bytes: /// /// ``` /// // some invalid bytes, in a vector /// let sparkle_heart = vec![0, 159, 146, 150]; /// /// assert!(String::from_utf8(sparkle_heart).is_err()); /// ``` /// /// See the docs for [`FromUtf8Error`] for more details on what you can do /// with this error. /// /// [`FromUtf8Error`]: struct.FromUtf8Error.html #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn from_utf8(vec: Vec<u8>) -> Result<String, FromUtf8Error> { match str::from_utf8(&vec) { Ok(..) => Ok(String { vec: vec }), Err(e) => { Err(FromUtf8Error { bytes: vec, error: e, }) } } } /// Converts a slice of bytes to a string, including invalid characters. /// /// Strings are made of bytes ([`u8`]), and a slice of bytes /// ([`&[u8]`][byteslice]) is made of bytes, so this function converts /// between the two. Not all byte slices are valid strings, however: strings /// are required to be valid UTF-8. During this conversion, /// `from_utf8_lossy()` will replace any invalid UTF-8 sequences with /// `U+FFFD REPLACEMENT CHARACTER`, which looks like this: � /// /// [`u8`]: ../../std/primitive.u8.html /// [byteslice]: ../../std/primitive.slice.html /// /// If you are sure that the byte slice is valid UTF-8, and you don't want /// to incur the overhead of the conversion, there is an unsafe version /// of this function, [`from_utf8_unchecked()`], which has the same behavior /// but skips the checks. /// /// [`from_utf8_unchecked()`]: struct.String.html#method.from_utf8_unchecked /// /// This function returns a [`Cow<'a, str>`]. If our byte slice is invalid /// UTF-8, then we need to insert the replacement characters, which will /// change the size of the string, and hence, require a `String`. But if /// it's already valid UTF-8, we don't need a new allocation. This return /// type allows us to handle both cases. /// /// [`Cow<'a, str>`]: ../../std/borrow/enum.Cow.html /// /// # Examples /// /// Basic usage: /// /// ``` /// // some bytes, in a vector /// let sparkle_heart = vec![240, 159, 146, 150]; /// /// let sparkle_heart = String::from_utf8_lossy(&sparkle_heart); /// /// assert_eq!("💖", sparkle_heart); /// ``` /// /// Incorrect bytes: /// /// ``` /// // some invalid bytes /// let input = b"Hello \xF0\x90\x80World"; /// let output = String::from_utf8_lossy(input); /// /// assert_eq!("Hello �World", output); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn from_utf8_lossy<'a>(v: &'a [u8]) -> Cow<'a, str> { let mut i; match str::from_utf8(v) { Ok(s) => return Cow::Borrowed(s), Err(e) => i = e.valid_up_to(), } const TAG_CONT_U8: u8 = 128; const REPLACEMENT: &'static [u8] = b"\xEF\xBF\xBD"; // U+FFFD in UTF-8 let total = v.len(); fn unsafe_get(xs: &[u8], i: usize) -> u8 { unsafe { *xs.get_unchecked(i) } } fn safe_get(xs: &[u8], i: usize, total: usize) -> u8 { if i >= total { 0 } else { unsafe_get(xs, i) } } let mut res = String::with_capacity(total); if i > 0 { unsafe { res.as_mut_vec().extend_from_slice(&v[..i]) }; } // subseqidx is the index of the first byte of the subsequence we're // looking at. It's used to copy a bunch of contiguous good codepoints // at once instead of copying them one by one. let mut subseqidx = i; while i < total { let i_ = i; let byte = unsafe_get(v, i); i += 1; macro_rules! error { () => ({ unsafe { if subseqidx != i_ { res.as_mut_vec().extend_from_slice(&v[subseqidx..i_]); } subseqidx = i; res.as_mut_vec().extend_from_slice(REPLACEMENT); } })} if byte < 128 { // subseqidx handles this } else { let w = unicode_str::utf8_char_width(byte); match w { 2 => { if safe_get(v, i, total) & 192 != TAG_CONT_U8 { error!(); continue; } i += 1; } 3 => { match (byte, safe_get(v, i, total)) { (0xE0, 0xA0...0xBF) => (), (0xE1...0xEC, 0x80...0xBF) => (), (0xED, 0x80...0x9F) => (), (0xEE...0xEF, 0x80...0xBF) => (), _ => { error!(); continue; } } i += 1; if safe_get(v, i, total) & 192 != TAG_CONT_U8 { error!(); continue; } i += 1; } 4 => { match (byte, safe_get(v, i, total)) { (0xF0, 0x90...0xBF) => (), (0xF1...0xF3, 0x80...0xBF) => (), (0xF4, 0x80...0x8F) => (), _ => { error!(); continue; } } i += 1; if safe_get(v, i, total) & 192 != TAG_CONT_U8 { error!(); continue; } i += 1; if safe_get(v, i, total) & 192 != TAG_CONT_U8 { error!(); continue; } i += 1; } _ => { error!(); continue; } } } } if subseqidx < total { unsafe { res.as_mut_vec().extend_from_slice(&v[subseqidx..total]) }; } Cow::Owned(res) } /// Decode a UTF-16 encoded vector `v` into a `String`, returning `Err` /// if `v` contains any invalid data. /// /// # Examples /// /// Basic usage: /// /// ``` /// // 𝄞music /// let v = &[0xD834, 0xDD1E, 0x006d, 0x0075, /// 0x0073, 0x0069, 0x0063]; /// assert_eq!(String::from("𝄞music"), /// String::from_utf16(v).unwrap()); /// /// // 𝄞mu<invalid>ic /// let v = &[0xD834, 0xDD1E, 0x006d, 0x0075, /// 0xD800, 0x0069, 0x0063]; /// assert!(String::from_utf16(v).is_err()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn from_utf16(v: &[u16]) -> Result<String, FromUtf16Error> { decode_utf16(v.iter().cloned()).collect::<Result<_, _>>().map_err(|_| FromUtf16Error(())) } /// Decode a UTF-16 encoded vector `v` into a string, replacing /// invalid data with the replacement character (U+FFFD). /// /// # Examples /// /// Basic usage: /// /// ``` /// // 𝄞mus<invalid>ic<invalid> /// let v = &[0xD834, 0xDD1E, 0x006d, 0x0075, /// 0x0073, 0xDD1E, 0x0069, 0x0063, /// 0xD834]; /// /// assert_eq!(String::from("𝄞mus\u{FFFD}ic\u{FFFD}"), /// String::from_utf16_lossy(v)); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn from_utf16_lossy(v: &[u16]) -> String { decode_utf16(v.iter().cloned()).map(|r| r.unwrap_or(REPLACEMENT_CHARACTER)).collect() } /// Creates a new `String` from a length, capacity, and pointer. /// /// # Safety /// /// This is highly unsafe, due to the number of invariants that aren't /// checked: /// /// * The memory at `ptr` needs to have been previously allocated by the /// same allocator the standard library uses. /// * `length` needs to be less than or equal to `capacity`. /// * `capacity` needs to be the correct value. /// /// Violating these may cause problems like corrupting the allocator's /// internal datastructures. /// /// The ownership of `ptr` is effectively transferred to the /// `String` which may then deallocate, reallocate or change the /// contents of memory pointed to by the pointer at will. Ensure /// that nothing else uses the pointer after calling this /// function. /// /// # Examples /// /// Basic usage: /// /// ``` /// use std::mem; /// /// unsafe { /// let s = String::from("hello"); /// let ptr = s.as_ptr(); /// let len = s.len(); /// let capacity = s.capacity(); /// /// mem::forget(s); /// /// let s = String::from_raw_parts(ptr as *mut _, len, capacity); /// /// assert_eq!(String::from("hello"), s); /// } /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub unsafe fn from_raw_parts(buf: *mut u8, length: usize, capacity: usize) -> String { String { vec: Vec::from_raw_parts(buf, length, capacity) } } /// Converts a vector of bytes to a `String` without checking that the /// string contains valid UTF-8. /// /// See the safe version, [`from_utf8()`], for more details. /// /// [`from_utf8()`]: struct.String.html#method.from_utf8 /// /// # Safety /// /// This function is unsafe because it does not check that the bytes passed /// to it are valid UTF-8. If this constraint is violated, it may cause /// memory unsafety issues with future users of the `String`, as the rest of /// the standard library assumes that `String`s are valid UTF-8. /// /// # Examples /// /// Basic usage: /// /// ``` /// // some bytes, in a vector /// let sparkle_heart = vec![240, 159, 146, 150]; /// /// let sparkle_heart = unsafe { /// String::from_utf8_unchecked(sparkle_heart) /// }; /// /// assert_eq!("💖", sparkle_heart); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub unsafe fn from_utf8_unchecked(bytes: Vec<u8>) -> String { String { vec: bytes } } /// Converts a `String` into a byte vector. /// /// This consumes the `String`, so we do not need to copy its contents. /// /// # Examples /// /// Basic usage: /// /// ``` /// let s = String::from("hello"); /// let bytes = s.into_bytes(); /// /// assert_eq!(&[104, 101, 108, 108, 111][..], &bytes[..]); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn into_bytes(self) -> Vec<u8> { self.vec } /// Extracts a string slice containing the entire string. #[inline] #[stable(feature = "string_as_str", since = "1.7.0")] pub fn as_str(&self) -> &str { self } /// Extracts a string slice containing the entire string. #[inline] #[stable(feature = "string_as_str", since = "1.7.0")] pub fn as_mut_str(&mut self) -> &mut str { self } /// Appends a given string slice onto the end of this `String`. /// /// # Examples /// /// Basic usage: /// /// ``` /// let mut s = String::from("foo"); /// /// s.push_str("bar"); /// /// assert_eq!("foobar", s); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn push_str(&mut self, string: &str) { self.vec.extend_from_slice(string.as_bytes()) } /// Returns this `String`'s capacity, in bytes. /// /// # Examples /// /// Basic usage: /// /// ``` /// let s = String::with_capacity(10); /// /// assert!(s.capacity() >= 10); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn capacity(&self) -> usize { self.vec.capacity() } /// Ensures that this `String`'s capacity is at least `additional` bytes /// larger than its length. /// /// The capacity may be increased by more than `additional` bytes if it /// chooses, to prevent frequent reallocations. /// /// If you do not want this "at least" behavior, see the [`reserve_exact()`] /// method. /// /// [`reserve_exact()`]: #method.reserve_exact /// /// # Panics /// /// Panics if the new capacity overflows `usize`. /// /// # Examples /// /// Basic usage: /// /// ``` /// let mut s = String::new(); /// /// s.reserve(10); /// /// assert!(s.capacity() >= 10); /// ``` /// /// This may not actually increase the capacity: /// /// ``` /// let mut s = String::with_capacity(10); /// s.push('a'); /// s.push('b'); /// /// // s now has a length of 2 and a capacity of 10 /// assert_eq!(2, s.len()); /// assert_eq!(10, s.capacity()); /// /// // Since we already have an extra 8 capacity, calling this... /// s.reserve(8); /// /// // ... doesn't actually increase. /// assert_eq!(10, s.capacity()); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn reserve(&mut self, additional: usize) { self.vec.reserve(additional) } /// Ensures that this `String`'s capacity is `additional` bytes /// larger than its length. /// /// Consider using the [`reserve()`] method unless you absolutely know /// better than the allocator. /// /// [`reserve()`]: #method.reserve /// /// # Panics /// /// Panics if the new capacity overflows `usize`. /// /// # Examples /// /// Basic usage: /// /// ``` /// let mut s = String::new(); /// /// s.reserve_exact(10); /// /// assert!(s.capacity() >= 10); /// ``` /// /// This may not actually increase the capacity: /// /// ``` /// let mut s = String::with_capacity(10); /// s.push('a'); /// s.push('b'); /// /// // s now has a length of 2 and a capacity of 10 /// assert_eq!(2, s.len()); /// assert_eq!(10, s.capacity()); /// /// // Since we already have an extra 8 capacity, calling this... /// s.reserve_exact(8); /// /// // ... doesn't actually increase. /// assert_eq!(10, s.capacity()); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn reserve_exact(&mut self, additional: usize) { self.vec.reserve_exact(additional) } /// Shrinks the capacity of this `String` to match its length. /// /// # Examples /// /// Basic usage: /// /// ``` /// let mut s = String::from("foo"); /// /// s.reserve(100); /// assert!(s.capacity() >= 100); /// /// s.shrink_to_fit(); /// assert_eq!(3, s.capacity()); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn shrink_to_fit(&mut self) { self.vec.shrink_to_fit() } /// Appends the given `char` to the end of this `String`. /// /// # Examples /// /// Basic usage: /// /// ``` /// let mut s = String::from("abc"); /// /// s.push('1'); /// s.push('2'); /// s.push('3'); /// /// assert_eq!("abc123", s); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn push(&mut self, ch: char) { match ch.len_utf8() { 1 => self.vec.push(ch as u8), _ => self.vec.extend_from_slice(ch.encode_utf8().as_slice()), } } /// Returns a byte slice of this `String`'s contents. /// /// # Examples /// /// Basic usage: /// /// ``` /// let s = String::from("hello"); /// /// assert_eq!(&[104, 101, 108, 108, 111], s.as_bytes()); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn as_bytes(&self) -> &[u8] { &self.vec } /// Shortens this `String` to the specified length. /// /// If `new_len` is greater than the string's current length, this has no /// effect. /// /// # Panics /// /// Panics if `new_len` does not lie on a [`char`] boundary. /// /// [`char`]: ../../std/primitive.char.html /// /// # Examples /// /// Basic usage: /// /// ``` /// let mut s = String::from("hello"); /// /// s.truncate(2); /// /// assert_eq!("he", s); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn truncate(&mut self, new_len: usize) { if new_len <= self.len() { assert!(self.is_char_boundary(new_len)); self.vec.truncate(new_len) } } /// Removes the last character from the string buffer and returns it. /// /// Returns `None` if this `String` is empty. /// /// # Examples /// /// Basic usage: /// /// ``` /// let mut s = String::from("foo"); /// /// assert_eq!(s.pop(), Some('o')); /// assert_eq!(s.pop(), Some('o')); /// assert_eq!(s.pop(), Some('f')); /// /// assert_eq!(s.pop(), None); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn pop(&mut self) -> Option<char> { let ch = match self.chars().rev().next() { Some(ch) => ch, None => return None, }; let newlen = self.len() - ch.len_utf8(); unsafe { self.vec.set_len(newlen); } Some(ch) } /// Removes a `char` from this `String` at a byte position and returns it. /// /// This is an `O(n)` operation, as it requires copying every element in the /// buffer. /// /// # Panics /// /// Panics if `idx` is larger than or equal to the `String`'s length, /// or if it does not lie on a [`char`] boundary. /// /// [`char`]: ../../std/primitive.char.html /// /// # Examples /// /// Basic usage: /// /// ``` /// let mut s = String::from("foo"); /// /// assert_eq!(s.remove(0), 'f'); /// assert_eq!(s.remove(1), 'o'); /// assert_eq!(s.remove(0), 'o'); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn remove(&mut self, idx: usize) -> char { let ch = match self[idx..].chars().next() { Some(ch) => ch, None => panic!("cannot remove a char from the end of a string"), }; let next = idx + ch.len_utf8(); let len = self.len(); unsafe { ptr::copy(self.vec.as_ptr().offset(next as isize), self.vec.as_mut_ptr().offset(idx as isize), len - next); self.vec.set_len(len - (next - idx)); } ch } /// Inserts a character into this `String` at a byte position. /// /// This is an `O(n)` operation as it requires copying every element in the /// buffer. /// /// # Panics /// /// Panics if `idx` is larger than the `String`'s length, or if it does not /// lie on a [`char`] boundary. /// /// [`char`]: ../../std/primitive.char.html /// /// # Examples /// /// Basic usage: /// /// ``` /// let mut s = String::with_capacity(3); /// /// s.insert(0, 'f'); /// s.insert(1, 'o'); /// s.insert(2, 'o'); /// /// assert_eq!("foo", s); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn insert(&mut self, idx: usize, ch: char) { let len = self.len(); assert!(idx <= len); assert!(self.is_char_boundary(idx)); let bits = ch.encode_utf8(); unsafe { self.insert_bytes(idx, bits.as_slice()); } } unsafe fn insert_bytes(&mut self, idx: usize, bytes: &[u8]) { let len = self.len(); let amt = bytes.len(); self.vec.reserve(amt); ptr::copy(self.vec.as_ptr().offset(idx as isize), self.vec.as_mut_ptr().offset((idx + amt) as isize), len - idx); ptr::copy(bytes.as_ptr(), self.vec.as_mut_ptr().offset(idx as isize), amt); self.vec.set_len(len + amt); } /// Inserts a string slice into this `String` at a byte position. /// /// This is an `O(n)` operation as it requires copying every element in the /// buffer. /// /// # Panics /// /// Panics if `idx` is larger than the `String`'s length, or if it does not /// lie on a [`char`] boundary. /// /// [`char`]: ../../std/primitive.char.html /// /// # Examples /// /// Basic usage: /// /// ``` /// #![feature(insert_str)] /// /// let mut s = String::from("bar"); /// /// s.insert_str(0, "foo"); /// /// assert_eq!("foobar", s); /// ``` #[inline] #[unstable(feature = "insert_str", reason = "recent addition", issue = "35553")] pub fn insert_str(&mut self, idx: usize, string: &str) { assert!(idx <= self.len()); assert!(self.is_char_boundary(idx)); unsafe { self.insert_bytes(idx, string.as_bytes()); } } /// Returns a mutable reference to the contents of this `String`. /// /// # Safety /// /// This function is unsafe because it does not check that the bytes passed /// to it are valid UTF-8. If this constraint is violated, it may cause /// memory unsafety issues with future users of the `String`, as the rest of /// the standard library assumes that `String`s are valid UTF-8. /// /// # Examples /// /// Basic usage: /// /// ``` /// let mut s = String::from("hello"); /// /// unsafe { /// let vec = s.as_mut_vec(); /// assert_eq!(&[104, 101, 108, 108, 111][..], &vec[..]); /// /// vec.reverse(); /// } /// assert_eq!(s, "olleh"); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub unsafe fn as_mut_vec(&mut self) -> &mut Vec<u8> { &mut self.vec } /// Returns the length of this `String`, in bytes. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = String::from("foo"); /// /// assert_eq!(a.len(), 3); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn len(&self) -> usize { self.vec.len() } /// Returns `true` if this `String` has a length of zero. /// /// Returns `false` otherwise. /// /// # Examples /// /// Basic usage: /// /// ``` /// let mut v = String::new(); /// assert!(v.is_empty()); /// /// v.push('a'); /// assert!(!v.is_empty()); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn is_empty(&self) -> bool { self.len() == 0 } /// Truncates this `String`, removing all contents. /// /// While this means the `String` will have a length of zero, it does not /// touch its capacity. /// /// # Examples /// /// Basic usage: /// /// ``` /// let mut s = String::from("foo"); /// /// s.clear(); /// /// assert!(s.is_empty()); /// assert_eq!(0, s.len()); /// assert_eq!(3, s.capacity()); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn clear(&mut self) { self.vec.clear() } /// Create a draining iterator that removes the specified range in the string /// and yields the removed chars. /// /// Note: The element range is removed even if the iterator is not /// consumed until the end. /// /// # Panics /// /// Panics if the starting point or end point do not lie on a [`char`] /// boundary, or if they're out of bounds. /// /// [`char`]: ../../std/primitive.char.html /// /// # Examples /// /// Basic usage: /// /// ``` /// let mut s = String::from("α is alpha, β is beta"); /// let beta_offset = s.find('β').unwrap_or(s.len()); /// /// // Remove the range up until the β from the string /// let t: String = s.drain(..beta_offset).collect(); /// assert_eq!(t, "α is alpha, "); /// assert_eq!(s, "β is beta"); /// /// // A full range clears the string /// s.drain(..); /// assert_eq!(s, ""); /// ``` #[stable(feature = "drain", since = "1.6.0")] pub fn drain<R>(&mut self, range: R) -> Drain where R: RangeArgument<usize> { // Memory safety // // The String version of Drain does not have the memory safety issues // of the vector version. The data is just plain bytes. // Because the range removal happens in Drop, if the Drain iterator is leaked, // the removal will not happen. let len = self.len(); let start = *range.start().unwrap_or(&0); let end = *range.end().unwrap_or(&len); // Take out two simultaneous borrows. The &mut String won't be accessed // until iteration is over, in Drop. let self_ptr = self as *mut _; // slicing does the appropriate bounds checks let chars_iter = self[start..end].chars(); Drain { start: start, end: end, iter: chars_iter, string: self_ptr, } } /// Converts this `String` into a `Box<str>`. /// /// This will drop any excess capacity. /// /// # Examples /// /// Basic usage: /// /// ``` /// let s = String::from("hello"); /// /// let b = s.into_boxed_str(); /// ``` #[stable(feature = "box_str", since = "1.4.0")] pub fn into_boxed_str(self) -> Box<str> { let slice = self.vec.into_boxed_slice(); unsafe { mem::transmute::<Box<[u8]>, Box<str>>(slice) } } } impl FromUtf8Error { /// Returns the bytes that were attempted to convert to a `String`. /// /// This method is carefully constructed to avoid allocation. It will /// consume the error, moving out the bytes, so that a copy of the bytes /// does not need to be made. /// /// # Examples /// /// Basic usage: /// /// ``` /// // some invalid bytes, in a vector /// let bytes = vec![0, 159]; /// /// let value = String::from_utf8(bytes); /// /// assert_eq!(vec![0, 159], value.unwrap_err().into_bytes()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn into_bytes(self) -> Vec<u8> { self.bytes } /// Fetch a `Utf8Error` to get more details about the conversion failure. /// /// The [`Utf8Error`] type provided by [`std::str`] represents an error that may /// occur when converting a slice of [`u8`]s to a [`&str`]. In this sense, it's /// an analogue to `FromUtf8Error`. See its documentation for more details /// on using it. /// /// [`Utf8Error`]: ../../std/str/struct.Utf8Error.html /// [`std::str`]: ../../std/str/index.html /// [`u8`]: ../../std/primitive.u8.html /// [`&str`]: ../../std/primitive.str.html /// /// # Examples /// /// Basic usage: /// /// ``` /// // some invalid bytes, in a vector /// let bytes = vec![0, 159]; /// /// let error = String::from_utf8(bytes).unwrap_err().utf8_error(); /// /// // the first byte is invalid here /// assert_eq!(1, error.valid_up_to()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn utf8_error(&self) -> Utf8Error { self.error } } #[stable(feature = "rust1", since = "1.0.0")] impl fmt::Display for FromUtf8Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Display::fmt(&self.error, f) } } #[stable(feature = "rust1", since = "1.0.0")] impl fmt::Display for FromUtf16Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Display::fmt("invalid utf-16: lone surrogate found", f) } } #[stable(feature = "rust1", since = "1.0.0")] impl Clone for String { fn clone(&self) -> Self { String { vec: self.vec.clone() } } fn clone_from(&mut self, source: &Self) { self.vec.clone_from(&source.vec); } } #[stable(feature = "rust1", since = "1.0.0")] impl FromIterator<char> for String { fn from_iter<I: IntoIterator<Item = char>>(iter: I) -> String { let mut buf = String::new(); buf.extend(iter); buf } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a> FromIterator<&'a str> for String { fn from_iter<I: IntoIterator<Item = &'a str>>(iter: I) -> String { let mut buf = String::new(); buf.extend(iter); buf } } #[stable(feature = "extend_string", since = "1.4.0")] impl FromIterator<String> for String { fn from_iter<I: IntoIterator<Item = String>>(iter: I) -> String { let mut buf = String::new(); buf.extend(iter); buf } } #[stable(feature = "rust1", since = "1.0.0")] impl Extend<char> for String { fn extend<I: IntoIterator<Item = char>>(&mut self, iter: I) { let iterator = iter.into_iter(); let (lower_bound, _) = iterator.size_hint(); self.reserve(lower_bound); for ch in iterator { self.push(ch) } } } #[stable(feature = "extend_ref", since = "1.2.0")] impl<'a> Extend<&'a char> for String { fn extend<I: IntoIterator<Item = &'a char>>(&mut self, iter: I) { self.extend(iter.into_iter().cloned()); } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a> Extend<&'a str> for String { fn extend<I: IntoIterator<Item = &'a str>>(&mut self, iter: I) { for s in iter { self.push_str(s) } } } #[stable(feature = "extend_string", since = "1.4.0")] impl Extend<String> for String { fn extend<I: IntoIterator<Item = String>>(&mut self, iter: I) { for s in iter { self.push_str(&s) } } } /// A convenience impl that delegates to the impl for `&str` #[unstable(feature = "pattern", reason = "API not fully fleshed out and ready to be stabilized", issue = "27721")] impl<'a, 'b> Pattern<'a> for &'b String { type Searcher = <&'b str as Pattern<'a>>::Searcher; fn into_searcher(self, haystack: &'a str) -> <&'b str as Pattern<'a>>::Searcher { self[..].into_searcher(haystack) } #[inline] fn is_contained_in(self, haystack: &'a str) -> bool { self[..].is_contained_in(haystack) } #[inline] fn is_prefix_of(self, haystack: &'a str) -> bool { self[..].is_prefix_of(haystack) } } #[stable(feature = "rust1", since = "1.0.0")] impl PartialEq for String { #[inline] fn eq(&self, other: &String) -> bool { PartialEq::eq(&self[..], &other[..]) } #[inline] fn ne(&self, other: &String) -> bool { PartialEq::ne(&self[..], &other[..]) } } macro_rules! impl_eq { ($lhs:ty, $rhs: ty) => { #[stable(feature = "rust1", since = "1.0.0")] impl<'a, 'b> PartialEq<$rhs> for $lhs { #[inline] fn eq(&self, other: &$rhs) -> bool { PartialEq::eq(&self[..], &other[..]) } #[inline] fn ne(&self, other: &$rhs) -> bool { PartialEq::ne(&self[..], &other[..]) } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, 'b> PartialEq<$lhs> for $rhs { #[inline] fn eq(&self, other: &$lhs) -> bool { PartialEq::eq(&self[..], &other[..]) } #[inline] fn ne(&self, other: &$lhs) -> bool { PartialEq::ne(&self[..], &other[..]) } } } } impl_eq! { String, str } impl_eq! { String, &'a str } impl_eq! { Cow<'a, str>, str } impl_eq! { Cow<'a, str>, &'b str } impl_eq! { Cow<'a, str>, String } #[stable(feature = "rust1", since = "1.0.0")] impl Default for String { #[inline] fn default() -> String { String::new() } } #[stable(feature = "rust1", since = "1.0.0")] impl fmt::Display for String { #[inline] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Display::fmt(&**self, f) } } #[stable(feature = "rust1", since = "1.0.0")] impl fmt::Debug for String { #[inline] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Debug::fmt(&**self, f) } } #[stable(feature = "rust1", since = "1.0.0")] impl hash::Hash for String { #[inline] fn hash<H: hash::Hasher>(&self, hasher: &mut H) { (**self).hash(hasher) } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a> Add<&'a str> for String { type Output = String; #[inline] fn add(mut self, other: &str) -> String { self.push_str(other); self } } #[stable(feature = "stringaddassign", since = "1.12.0")] impl<'a> AddAssign<&'a str> for String { #[inline] fn add_assign(&mut self, other: &str) { self.push_str(other); } } #[stable(feature = "rust1", since = "1.0.0")] impl ops::Index<ops::Range<usize>> for String { type Output = str; #[inline] fn index(&self, index: ops::Range<usize>) -> &str { &self[..][index] } } #[stable(feature = "rust1", since = "1.0.0")] impl ops::Index<ops::RangeTo<usize>> for String { type Output = str; #[inline] fn index(&self, index: ops::RangeTo<usize>) -> &str { &self[..][index] } } #[stable(feature = "rust1", since = "1.0.0")] impl ops::Index<ops::RangeFrom<usize>> for String { type Output = str; #[inline] fn index(&self, index: ops::RangeFrom<usize>) -> &str { &self[..][index] } } #[stable(feature = "rust1", since = "1.0.0")] impl ops::Index<ops::RangeFull> for String { type Output = str; #[inline] fn index(&self, _index: ops::RangeFull) -> &str { unsafe { str::from_utf8_unchecked(&self.vec) } } } #[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] impl ops::Index<ops::RangeInclusive<usize>> for String { type Output = str; #[inline] fn index(&self, index: ops::RangeInclusive<usize>) -> &str { Index::index(&**self, index) } } #[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] impl ops::Index<ops::RangeToInclusive<usize>> for String { type Output = str; #[inline] fn index(&self, index: ops::RangeToInclusive<usize>) -> &str { Index::index(&**self, index) } } #[stable(feature = "derefmut_for_string", since = "1.2.0")] impl ops::IndexMut<ops::Range<usize>> for String { #[inline] fn index_mut(&mut self, index: ops::Range<usize>) -> &mut str { &mut self[..][index] } } #[stable(feature = "derefmut_for_string", since = "1.2.0")] impl ops::IndexMut<ops::RangeTo<usize>> for String { #[inline] fn index_mut(&mut self, index: ops::RangeTo<usize>) -> &mut str { &mut self[..][index] } } #[stable(feature = "derefmut_for_string", since = "1.2.0")] impl ops::IndexMut<ops::RangeFrom<usize>> for String { #[inline] fn index_mut(&mut self, index: ops::RangeFrom<usize>) -> &mut str { &mut self[..][index] } } #[stable(feature = "derefmut_for_string", since = "1.2.0")] impl ops::IndexMut<ops::RangeFull> for String { #[inline] fn index_mut(&mut self, _index: ops::RangeFull) -> &mut str { unsafe { mem::transmute(&mut *self.vec) } } } #[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] impl ops::IndexMut<ops::RangeInclusive<usize>> for String { #[inline] fn index_mut(&mut self, index: ops::RangeInclusive<usize>) -> &mut str { IndexMut::index_mut(&mut **self, index) } } #[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] impl ops::IndexMut<ops::RangeToInclusive<usize>> for String { #[inline] fn index_mut(&mut self, index: ops::RangeToInclusive<usize>) -> &mut str { IndexMut::index_mut(&mut **self, index) } } #[stable(feature = "rust1", since = "1.0.0")] impl ops::Deref for String { type Target = str; #[inline] fn deref(&self) -> &str { unsafe { str::from_utf8_unchecked(&self.vec) } } } #[stable(feature = "derefmut_for_string", since = "1.2.0")] impl ops::DerefMut for String { #[inline] fn deref_mut(&mut self) -> &mut str { unsafe { mem::transmute(&mut *self.vec) } } } /// An error when parsing a `String`. /// /// This `enum` is slightly awkward: it will never actually exist. This error is /// part of the type signature of the implementation of [`FromStr`] on /// [`String`]. The return type of [`from_str()`], requires that an error be /// defined, but, given that a [`String`] can always be made into a new /// [`String`] without error, this type will never actually be returned. As /// such, it is only here to satisfy said signature, and is useless otherwise. /// /// [`FromStr`]: ../../std/str/trait.FromStr.html /// [`String`]: struct.String.html /// [`from_str()`]: ../../std/str/trait.FromStr.html#tymethod.from_str #[stable(feature = "str_parse_error", since = "1.5.0")] #[derive(Copy)] pub enum ParseError {} #[stable(feature = "rust1", since = "1.0.0")] impl FromStr for String { type Err = ParseError; #[inline] fn from_str(s: &str) -> Result<String, ParseError> { Ok(String::from(s)) } } #[stable(feature = "str_parse_error", since = "1.5.0")] impl Clone for ParseError { fn clone(&self) -> ParseError { match *self {} } } #[stable(feature = "str_parse_error", since = "1.5.0")] impl fmt::Debug for ParseError { fn fmt(&self, _: &mut fmt::Formatter) -> fmt::Result { match *self {} } } #[stable(feature = "str_parse_error2", since = "1.8.0")] impl fmt::Display for ParseError { fn fmt(&self, _: &mut fmt::Formatter) -> fmt::Result { match *self {} } } #[stable(feature = "str_parse_error", since = "1.5.0")] impl PartialEq for ParseError { fn eq(&self, _: &ParseError) -> bool { match *self {} } } #[stable(feature = "str_parse_error", since = "1.5.0")] impl Eq for ParseError {} /// A trait for converting a value to a `String`. /// /// This trait is automatically implemented for any type which implements the /// [`Display`] trait. As such, `ToString` shouldn't be implemented directly: /// [`Display`] should be implemented instead, and you get the `ToString` /// implementation for free. /// /// [`Display`]: ../../std/fmt/trait.Display.html #[stable(feature = "rust1", since = "1.0.0")] pub trait ToString { /// Converts the given value to a `String`. /// /// # Examples /// /// Basic usage: /// /// ``` /// let i = 5; /// let five = String::from("5"); /// /// assert_eq!(five, i.to_string()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] fn to_string(&self) -> String; } #[stable(feature = "rust1", since = "1.0.0")] impl<T: fmt::Display + ?Sized> ToString for T { #[inline] default fn to_string(&self) -> String { use core::fmt::Write; let mut buf = String::new(); let _ = buf.write_fmt(format_args!("{}", self)); buf.shrink_to_fit(); buf } } #[stable(feature = "str_to_string_specialization", since = "1.9.0")] impl ToString for str { #[inline] fn to_string(&self) -> String { String::from(self) } } #[stable(feature = "rust1", since = "1.0.0")] impl AsRef<str> for String { #[inline] fn as_ref(&self) -> &str { self } } #[stable(feature = "rust1", since = "1.0.0")] impl AsRef<[u8]> for String { #[inline] fn as_ref(&self) -> &[u8] { self.as_bytes() } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a> From<&'a str> for String { fn from(s: &'a str) -> String { s.to_owned() } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a> From<&'a str> for Cow<'a, str> { #[inline] fn from(s: &'a str) -> Cow<'a, str> { Cow::Borrowed(s) } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a> From<String> for Cow<'a, str> { #[inline] fn from(s: String) -> Cow<'a, str> { Cow::Owned(s) } } #[stable(feature = "cow_str_from_iter", since = "1.12.0")] impl<'a> FromIterator<char> for Cow<'a, str> { fn from_iter<I: IntoIterator<Item = char>>(it: I) -> Cow<'a, str> { Cow::Owned(FromIterator::from_iter(it)) } } #[stable(feature = "cow_str_from_iter", since = "1.12.0")] impl<'a, 'b> FromIterator<&'b str> for Cow<'a, str> { fn from_iter<I: IntoIterator<Item = &'b str>>(it: I) -> Cow<'a, str> { Cow::Owned(FromIterator::from_iter(it)) } } #[stable(feature = "cow_str_from_iter", since = "1.12.0")] impl<'a> FromIterator<String> for Cow<'a, str> { fn from_iter<I: IntoIterator<Item = String>>(it: I) -> Cow<'a, str> { Cow::Owned(FromIterator::from_iter(it)) } } #[stable(feature = "rust1", since = "1.0.0")] impl Into<Vec<u8>> for String { fn into(self) -> Vec<u8> { self.into_bytes() } } #[stable(feature = "stringfromchars", since = "1.12.0")] impl<'a> From<&'a [char]> for String { #[inline] fn from(v: &'a [char]) -> String { let mut s = String::with_capacity(v.len()); for c in v { s.push(*c); } s } } #[stable(feature = "stringfromchars", since = "1.12.0")] impl From<Vec<char>> for String { #[inline] fn from(v: Vec<char>) -> String { String::from(v.as_slice()) } } #[stable(feature = "rust1", since = "1.0.0")] impl fmt::Write for String { #[inline] fn write_str(&mut self, s: &str) -> fmt::Result { self.push_str(s); Ok(()) } #[inline] fn write_char(&mut self, c: char) -> fmt::Result { self.push(c); Ok(()) } } /// A draining iterator for `String`. /// /// This struct is created by the [`drain()`] method on [`String`]. See its /// documentation for more. /// /// [`drain()`]: struct.String.html#method.drain /// [`String`]: struct.String.html #[stable(feature = "drain", since = "1.6.0")] pub struct Drain<'a> { /// Will be used as &'a mut String in the destructor string: *mut String, /// Start of part to remove start: usize, /// End of part to remove end: usize, /// Current remaining range to remove iter: Chars<'a>, } #[stable(feature = "drain", since = "1.6.0")] unsafe impl<'a> Sync for Drain<'a> {} #[stable(feature = "drain", since = "1.6.0")] unsafe impl<'a> Send for Drain<'a> {} #[stable(feature = "drain", since = "1.6.0")] impl<'a> Drop for Drain<'a> { fn drop(&mut self) { unsafe { // Use Vec::drain. "Reaffirm" the bounds checks to avoid // panic code being inserted again. let self_vec = (*self.string).as_mut_vec(); if self.start <= self.end && self.end <= self_vec.len() { self_vec.drain(self.start..self.end); } } } } #[stable(feature = "drain", since = "1.6.0")] impl<'a> Iterator for Drain<'a> { type Item = char; #[inline] fn next(&mut self) -> Option<char> { self.iter.next() } fn size_hint(&self) -> (usize, Option<usize>) { self.iter.size_hint() } } #[stable(feature = "drain", since = "1.6.0")] impl<'a> DoubleEndedIterator for Drain<'a> { #[inline] fn next_back(&mut self) -> Option<char> { self.iter.next_back() } } #[unstable(feature = "fused", issue = "35602")] impl<'a> FusedIterator for Drain<'a> {}
29.046477
97
0.53109
ab1db7d00daaf17116661718830fa57ea7a6140f
12,681
// PASS | finalize_alloc_locations // --------------------------------------------------------------------------- // USAGE | finalize_alloc_locations : alloc_lang::Program -> // | alloc_lang::Program // --------------------------------------------------------------------------- // RETURNS | The expression, after substituting variables for their allocatrd // | locations // --------------------------------------------------------------------------- // DESCRIPTION // --------------------------------------------------------------------------- // This pass walks through expression using the allocation map, performing the // appropriate replacements. //// --------------------------------------------------------------------------- use util::Binop; use util::Relop; // use util::Label; use util::Ident; use util::Location; use util::mk_uvar; use alloc_lang::Program; use alloc_lang::LetrecEntry; use alloc_lang::RegAllocForm; use alloc_lang::RegAllocInfo; use alloc_lang::Body; use alloc_lang::Exp; use alloc_lang::Pred; use alloc_lang::Effect; use alloc_lang::Variable; use alloc_lang::Triv; use alloc_lang::RegConflict; use alloc_lang::loc_is_reg; use alloc_lang::reg_to_conflict; use alloc_lang::var_to_reg_conflict; use petgraph::graph::Graph; use petgraph::graph::NodeIndex; use petgraph::Undirected; use std::collections::HashMap; use std::collections::HashSet; // --------------------------------------------------------------------------- // INPUT / OUTPUT LANGUAGE // --------------------------------------------------------------------------- // #[derive(Debug)] // pub enum Program { Letrec(Vec<LetrecEntry>, Body) } // // ^ Stores allocation info for the body // // #[derive(Debug)] // pub struct LetrecEntry // { label : Label // , rhs : Body // } // // #[derive(Debug)] // pub struct Body // { alloc : RegAllocForm // , exp : Exp // } // // pub enum RegAllocForm // { Allocated(HashMap<Ident, Location>) // , Unallocated(mut RegAllocInfo, mut HashMap<Ident, Location>) // } // // pub struct RegAllocInfo // { pub locals : Vec<Ident> // , pub unspillables : Vec<Ident> // , pub spills : Vec<Ident> // , pub call_lives : Vec<Variable> // , pub frame_conflicts : Vec<(Ident, Vec<FrameConflict>)> // , pub register_conflicts : Vec<(Ident, Vec<RegConflict>)> // , pub new_frames : Vec<Vec<Ident>> // } // // pub enum Exp // { Call(Triv, Vec<Location>) // , If(Pred,Box<Exp>,Box<Exp>) // , Begin(Vec<Effect>,Box<Exp>) // } // // pub enum Pred // { True // , False // , Op(Relop,Triv,Triv) // , If(Box<Pred>,Box<Pred>,Box<Pred>) // , Begin(Vec<Effect>, Box<Pred>) // } // // pub enum Effect // { SetOp(Triv, (Binop, Triv, Triv)) // , Set(Triv, Triv) // , Nop // , MSet(Triv, Triv, Triv) // dest, offset, src // , ReturnPoint(Label, Exp, i64) // , If(Pred, Box<Effect>, Box<Effect>) // , Begin(Box<Vec<Effect>>) // } // // pub enum Variable // { Loc(Location) // , UVar(Ident) // } // // pub enum Triv // { Var(Variable) // , Num(i64) // , Label(Label) // , MRef(Triv, Triv) // src, offset // } // //pub enum FrameConflict // { Var(Ident) // , FrameVar(i64) // } // // pub enum RegConflict // { Var(Ident) // , Reg(Ident) // } macro_rules! mk_box { ($e:expr) => [Box::new($e)] } // --------------------------------------------------------------------------- // IMPLEMENTATION // --------------------------------------------------------------------------- pub fn finalize_alloc_locations(input : Program) -> Program { return match input { Program::Letrec(letrecs, body_exp) => Program::Letrec( letrecs.into_iter().map(|x| letrec_entry(x)).collect() , body(body_exp)) } } fn letrec_entry(input : LetrecEntry) -> LetrecEntry { LetrecEntry { label : input.label , rhs : body(input.rhs) } } fn body(input: Body) -> Body { let new_exp = { match &input.alloc { RegAllocForm::Allocated(var_map) => exp(input.expression, var_map) , RegAllocForm::Unallocated(_, var_map) => exp(input.expression, var_map) } }; Body { alloc : input.alloc , expression : new_exp } } fn exp(input : Exp, var_map : &HashMap<Ident, Location>) -> Exp { match input { Exp::Call(target, lives) => Exp::Call(triv(target, var_map), lives) , Exp::If(test, con, alt) => Exp::If(pred(test, var_map), mk_box!(exp(*con, var_map)), mk_box!(exp(*alt, var_map))) , Exp::Begin(effs, tail) => Exp::Begin(effs.into_iter().map(|e| effect(e, var_map)).collect(), mk_box!(exp(*tail, var_map))) } } fn pred(input : Pred, var_map : &HashMap<Ident, Location>) -> Pred { match input { Pred::True => Pred::True , Pred::False => Pred::False , Pred::Op(op, triv1, triv2) => Pred::Op(op, triv(triv1, var_map), triv(triv2, var_map)) , Pred::If(test, conseq, alt) => Pred::If(mk_box!(pred(*test, var_map)), mk_box!(pred(*conseq, var_map)), mk_box!(pred(*alt, var_map))) , Pred::Begin(effs, test) => Pred::Begin(effs.into_iter().map(|e| effect(e, var_map)).collect(), mk_box!(pred(*test, var_map))) } } fn effect(input : Effect, var_map : &HashMap<Ident, Location>) -> Effect { match input { Effect::SetOp(dest, (op, arg1, arg2)) => Effect::SetOp(triv(dest, var_map), (op, triv(arg1, var_map), triv(arg2, var_map))) , Effect::Set(dest, src) => Effect::Set(triv(dest, var_map), triv(src, var_map)) , Effect::Nop => Effect::Nop , Effect::MSet(dest, offset, src) => Effect::MSet(triv(dest, var_map), triv(offset, var_map), triv(src, var_map)) , Effect::ReturnPoint(lbl, body, size) => Effect::ReturnPoint(lbl, exp(body, var_map), size) , Effect::If(test, conseq, alt) => Effect::If(pred(test, var_map), mk_box!(effect(*conseq, var_map)), mk_box!(effect(*alt, var_map))) , Effect::Begin(effs) => Effect::Begin(mk_box!((*effs).into_iter().map(|e| effect(e, var_map)).collect())) } } fn try_var_lookup(input : Variable, var_map : &HashMap<Ident, Location>) -> Triv { match input { Variable::Loc(_) => Triv::Var(input) , Variable::UVar(v) => if let Some(location) = var_map.get(&v) { Triv::Var(Variable::Loc(location.clone())) } else { Triv::Var(Variable::UVar(v)) } } } fn triv(input : Triv, var_map : &HashMap<Ident, Location>) -> Triv { match input { Triv::Var(v) => try_var_lookup(v, var_map) , Triv::Num(_) => input , Triv::Label(_) => input , Triv::MRef(t1, t2) => Triv::MRef(mk_box!(triv(*t1, var_map)), mk_box!(triv(*t2, var_map))) } } // --------------------------------------------------------------------------- // --------------------------------------------------------------------------- // TESTING // --------------------------------------------------------------------------- pub mod test { macro_rules! mk_box { ($e:expr) => [Box::new($e)] } use util::index_fvar; use util::mk_uvar; use util::Binop; use util::Relop; use util::Label; use util::Ident; use util::Location; use alloc_lang::Program; use alloc_lang::LetrecEntry; use alloc_lang::RegAllocForm; use alloc_lang::RegAllocInfo; use alloc_lang::Exp; use alloc_lang::Pred; use alloc_lang::Body; use alloc_lang::Effect; use alloc_lang::Variable; use alloc_lang::Triv; use petgraph::graph::Graph; use std::collections::HashMap; #[allow(dead_code)] fn calle(call : Triv, args : Vec<Location>) -> Exp { Exp::Call(call, args) } #[allow(dead_code)] fn ife(test : Pred, conseq : Exp, alt : Exp) -> Exp { Exp::If(test, mk_box!(conseq), mk_box!(alt)) } #[allow(dead_code)] fn begine(args : Vec<Effect>, base : Exp) -> Exp { Exp::Begin(args, mk_box!(base)) } #[allow(dead_code)] fn rop(op : Relop, t1 : Triv, t2 : Triv) -> Pred { Pred::Op(op, t1, t2) } #[allow(dead_code)] fn ifp(test : Pred, conseq : Pred, alt : Pred) -> Pred { Pred::If(mk_box!(test), mk_box!(conseq), mk_box!(alt)) } #[allow(dead_code)] fn beginp(args : Vec<Effect>, base : Pred) -> Pred { Pred::Begin(args, mk_box!(base)) } #[allow(dead_code)] fn setopf(t1 : Triv, op : Binop, arg1 : Triv, arg2 : Triv) -> Effect { Effect::SetOp(t1, (op, arg1, arg2)) } #[allow(dead_code)] fn setf(dest : Triv, src : Triv) -> Effect { Effect::Set(dest, src) } #[allow(dead_code)] fn nopf() -> Effect { Effect::Nop } #[allow(dead_code)] fn msetf(dest : Triv, src : Triv, offset : Triv) -> Effect { Effect::MSet(dest, src, offset) } #[allow(dead_code)] fn retf(lbl : Label, frame_size : i64, body : Exp) -> Effect { Effect::ReturnPoint(lbl, body, frame_size) } #[allow(dead_code)] fn iff(test : Pred, conseq : Effect, alt : Effect) -> Effect { Effect::If(test, mk_box!(conseq), mk_box!(alt)) } #[allow(dead_code)] fn beginf(args : Vec<Effect>) -> Effect { Effect::Begin(mk_box!(args)) } #[allow(dead_code)] fn uv(name : Ident) -> Variable { Variable::UVar(name) } #[allow(dead_code)] fn vt(name : Ident) -> Triv { Triv::Var(Variable::UVar(name)) } #[allow(dead_code)] fn nt(val : i64) -> Triv { Triv::Num(val) } #[allow(dead_code)] fn lt(lbl : Label) -> Triv { Triv::Label(lbl) } #[allow(dead_code)] fn mreft(src : Triv, offset : Triv) -> Triv { Triv::MRef(mk_box!(src), mk_box!(offset)) } #[allow(dead_code)] fn fvar(n: i64) -> Triv { Triv::Var(Variable::Loc(index_fvar(n))) } #[allow(dead_code)] fn reg(s: &str) -> Triv { Triv::Var(Variable::Loc(Location::Reg(Ident::from_str(s)))) } #[allow(dead_code)] fn regl(s: &str) -> Location { Location::Reg(Ident::from_str(s)) } #[allow(dead_code)] fn mk_lbl(s : &str) -> Label { Label { label : Ident::from_str(s) } } fn mk_alloc_form() -> RegAllocInfo { RegAllocInfo { locals : Vec::new() , unspillables : Vec::new() , spills : Vec::new() , call_lives : Vec::new() , frame_conflicts : Graph::new_undirected() , register_conflicts : Graph::new_undirected() , new_frames : Vec::new() } } pub fn test1() -> Program { let x0 = mk_uvar("x"); let x1 = mk_uvar("x"); let x2 = mk_uvar("x"); let x3 = mk_uvar("x"); let y4 = mk_uvar("y"); let z5 = mk_uvar("z"); let z6 = mk_uvar("z"); let z7 = mk_uvar("z"); let z8 = mk_uvar("z"); let z9 = mk_uvar("z"); let z10 = mk_uvar("z"); let z11 = mk_uvar("z"); let z12 = mk_uvar("z"); let mut map = HashMap::new(); map.insert(x0, regl("rbx")); map.insert(z6, Location::FrameVar(2)); map.insert(x2, regl("r8")); map.insert(x3, regl("r9")); map.insert(y4, regl("r15")); let mut body_map = HashMap::new(); body_map.insert(x2, regl("r8")); body_map.insert(x3, regl("r9")); let binding1_alloc = mk_alloc_form(); let binding1 = LetrecEntry { label : mk_lbl("X1") , rhs : Body { alloc : RegAllocForm::Unallocated(binding1_alloc, map) , expression : ife(rop(Relop::LT, vt(x2), vt(x3)) , begine(vec![ setopf(fvar(1), Binop::Plus, fvar(1), fvar(2)) , msetf(vt(x0), nt(1), nt(40)) , msetf(vt(x0), vt(y4), nt(25)) , retf(mk_lbl("foo"), 4, begine( vec![ setf(reg("rax"), fvar(1)) ] , calle(lt(mk_lbl("X3")), vec![])) ) , setf(vt(x0), mreft(reg("rax"), nt(1))) ] , calle(lt(mk_lbl("X2")), vec![])) , begine(vec![ setopf(vt(x1), Binop::Plus, vt(x1), nt(35)) ] , calle(lt(mk_lbl("X2")), vec![]))) } }; let mut body_map = HashMap::new(); body_map.insert(x2, regl("r8")); body_map.insert(x3, regl("r9")); let test_body = Body { alloc : RegAllocForm::Allocated(body_map) , expression : begine(vec![ setf(vt(x2), nt(0)) , setf(vt(x3), nt(1)) ] , calle(lt(mk_lbl("X1")), vec![regl("rax"), regl("rbp")])) }; Program::Letrec(vec![binding1], test_body) } }
32.432225
143
0.518808
0388b9c99135bd550d7719833c6d1616215023f0
5,787
use std::cmp::Ordering; use std::ops::{Add, Div, Mul, Sub}; use colored::*; use itertools::Itertools; use env::Env; use std::fmt; #[derive(Clone, PartialEq)] pub enum Expr { Nil, Bool(bool), Int(i64), Float(f64), Str(String), Symbol(String), Quote(Box<Expr>), Fun(Function, Arguments), Special(Function, Arguments), List(Vec<Expr>), Error(String), } impl Eq for Expr {} impl PartialOrd for Expr { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { match (self, other) { (Nil, Nil) => Some(Ordering::Equal), (Bool(a), Bool(b)) => PartialOrd::partial_cmp(a, b), (Int(a), Int(b)) => PartialOrd::partial_cmp(a, b), (Int(a), Float(b)) => PartialOrd::partial_cmp(&(*a as f64), b), (Float(a), Float(b)) => PartialOrd::partial_cmp(a, b), (Float(a), Int(b)) => PartialOrd::partial_cmp(a, &(*b as f64)), (Str(a), Str(b)) => PartialOrd::partial_cmp(a, b), (Symbol(a), Symbol(b)) => PartialOrd::partial_cmp(a, b), (Quote(a), Quote(b)) => PartialOrd::partial_cmp(a, b), (Fun(a, _), Fun(b, _)) => PartialOrd::partial_cmp(a, b), (Special(a, _), Special(b, _)) => PartialOrd::partial_cmp(a, b), _ => None, } } } use Expr::*; impl fmt::Debug for Expr { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { Nil => write!(f, "nil"), Bool(x) => write!(f, "{:?}", x), Int(x) => write!(f, "{:?}", x), Float(x) => write!(f, "{:?}", x), Str(x) => write!(f, "{:?}", x), Symbol(x) => write!(f, "{}", x), Quote(x) => write!(f, "'{:?}", x), Fun(_, args) => write!(f, "<function {:?}>", args), Special(_, _) => write!(f, "<special>"), List(xs) => write!(f, "({})", xs.iter().map(|x| format!("{:?}", x)).join(" ")), Error(err) => write!(f, "ERROR: {}", err), } } } impl fmt::Display for Expr { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { Nil => write!(f, "{}", format!("{:?}", self).cyan()), Bool(_) => write!(f, "{}", format!("{:?}", self).green()), Int(_) => write!(f, "{}", format!("{:?}", self).blue()), Float(_) => write!(f, "{}", format!("{:?}", self).blue()), Str(_) => write!(f, "{}", format!("{:?}", self).yellow()), Symbol(_) => write!(f, "{}", format!("{:?}", self).bright_white()), Quote(x) => write!(f, "'{}", x), Fun(_, _) => write!(f, "{}", format!("{:?}", self).magenta()), Special(_, _) => write!(f, "{}", format!("{:?}", self).magenta()), List(xs) => write!(f, "({})", xs.iter().map(|x| format!("{}", x)).join(" ")), Error(_) => write!(f, "{}", format!("{:?}", self).red()), } } } impl Add for Expr { type Output = Self; fn add(self, other: Self) -> Self { match (self, other) { (Int(a), Int(b)) => Int(a + b), (Int(a), Float(b)) => Float(a as f64 + b), (Float(a), Int(b)) => Float(a + b as f64), (Float(a), Float(b)) => Float(a + b), (Str(a), Str(b)) => Str(format!("{}{}", a, b)), (a, b) => Error(format!("Can't add {:?} and {:?}", a, b)), } } } impl Sub for Expr { type Output = Self; fn sub(self, other: Self) -> Self { match (self, other) { (Int(a), Int(b)) => Int(a - b), (Int(a), Float(b)) => Float(a as f64 - b), (Float(a), Int(b)) => Float(a - b as f64), (Float(a), Float(b)) => Float(a - b), (a, b) => Error(format!("Can't subtract {:?} from {:?}", b, a)), } } } impl Mul for Expr { type Output = Self; fn mul(self, other: Self) -> Self { match (self, other) { (Int(a), Int(b)) => Int(a * b), (Int(a), Float(b)) => Float(a as f64 * b), (Float(a), Int(b)) => Float(a * b as f64), (Float(a), Float(b)) => Float(a * b), (a, b) => Error(format!("Can't multiply {:?} with {:?}", a, b)), } } } impl Div for Expr { type Output = Self; fn div(self, other: Self) -> Self { match (self, other) { (Int(a), Int(b)) => Int(a / b), (Int(a), Float(b)) => Float(a as f64 / b), (Float(a), Int(b)) => Float(a / b as f64), (Float(a), Float(b)) => Float(a / b), (a, b) => Error(format!("Can't divide {:?} by {:?}", a, b)), } } } #[derive(Clone)] pub enum Function { Builtin(fn(&mut Env) -> Expr), Dynamic(Box<Expr>), } use self::Function::*; impl PartialEq for Function { fn eq(&self, other: &Self) -> bool { match (self, other) { (Dynamic(a), Dynamic(b)) => a == b, _ => false, } } } impl Eq for Function {} impl PartialOrd for Function { fn partial_cmp(&self, _other: &Self) -> Option<Ordering> { None } } #[derive(Clone, PartialEq, PartialOrd, Eq)] pub enum Arguments { Variadic, Fixed(Vec<String>), } impl fmt::Debug for Arguments { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { Arguments::Variadic => write!(f, "..."), Arguments::Fixed(args) => { let mut result = String::new(); for (i, arg) in args.iter().enumerate() { result.push_str(arg.as_str()); if i + 1 < args.len() { result.push_str(", "); } } write!(f, "({})", result) } } } }
30.619048
91
0.439779
ff04cfe99c0bc10b12bf066247f352640d348343
723
// Copyright 2018 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use std::ops::Deref; trait Trait {} struct Struct; impl Deref for Struct { type Target = Trait; fn deref(&self) -> &Trait { unimplemented!(); } } //~^^^^ ERROR cannot infer an appropriate lifetime for lifetime parameter fn main() {}
28.92
73
0.704011
ccb81a4db63952f1cd319d0932a054812cc30de5
8,598
use super::utils::{test_get_default_device, test_ops_stream_operation, Scope}; use super::*; use std::sync::atomic::{AtomicI64, Ordering}; #[test] fn test_dial_tone() { use std::f32::consts::PI; use std::thread; use std::time::Duration; const SAMPLE_FREQUENCY: u32 = 48_000; // Do nothing if there is no available output device. if test_get_default_device(Scope::Output).is_none() { println!("No output device."); return; } // Make sure the parameters meet the requirements of AudioUnitContext::stream_init // (in the comments). let mut output_params = ffi::cubeb_stream_params::default(); output_params.format = ffi::CUBEB_SAMPLE_S16NE; output_params.rate = SAMPLE_FREQUENCY; output_params.channels = 1; output_params.layout = ffi::CUBEB_LAYOUT_MONO; output_params.prefs = ffi::CUBEB_STREAM_PREF_NONE; struct Closure { buffer_size: AtomicI64, phase: i64, }; let mut closure = Closure { buffer_size: AtomicI64::new(0), phase: 0, }; let closure_ptr = &mut closure as *mut Closure as *mut c_void; test_ops_stream_operation( "stream: North American dial tone", ptr::null_mut(), // Use default input device. ptr::null_mut(), // No input parameters. ptr::null_mut(), // Use default output device. &mut output_params, 4096, // TODO: Get latency by get_min_latency instead ? Some(data_callback), Some(state_callback), closure_ptr, |stream| { assert_eq!(unsafe { OPS.stream_start.unwrap()(stream) }, ffi::CUBEB_OK); #[derive(Debug)] enum State { WaitingForStart, PositionIncreasing, Paused, Resumed, End, }; let mut state = State::WaitingForStart; let mut position: u64 = 0; let mut prev_position: u64 = 0; let mut count = 0; const CHECK_COUNT: i32 = 10; loop { thread::sleep(Duration::from_millis(50)); assert_eq!( unsafe { OPS.stream_get_position.unwrap()(stream, &mut position) }, ffi::CUBEB_OK ); println!( "State: {:?}, position: {}, previous position: {}", state, position, prev_position ); match &mut state { State::WaitingForStart => { // It's expected to have 0 for a few iterations here: the stream can take // some time to start. if position != prev_position { assert!(position > prev_position); prev_position = position; state = State::PositionIncreasing; } } State::PositionIncreasing => { // wait a few iterations, check monotony if position != prev_position { assert!(position > prev_position); prev_position = position; count += 1; if count > CHECK_COUNT { state = State::Paused; count = 0; assert_eq!( unsafe { OPS.stream_stop.unwrap()(stream) }, ffi::CUBEB_OK ); // Update the position once paused. assert_eq!( unsafe { OPS.stream_get_position.unwrap()(stream, &mut position) }, ffi::CUBEB_OK ); prev_position = position; } } } State::Paused => { // The cubeb_stream_stop call above should synchrously stop the callbacks, // hence the clock, the assert below must always holds, modulo the client // side interpolation. assert!( position == prev_position || position - prev_position <= closure.buffer_size.load(Ordering::SeqCst) as u64 ); count += 1; prev_position = position; if count > CHECK_COUNT { state = State::Resumed; count = 0; assert_eq!(unsafe { OPS.stream_start.unwrap()(stream) }, ffi::CUBEB_OK); } } State::Resumed => { // wait a few iterations, this can take some time to start if position != prev_position { assert!(position > prev_position); prev_position = position; count += 1; if count > CHECK_COUNT { state = State::End; count = 0; assert_eq!( unsafe { OPS.stream_stop.unwrap()(stream) }, ffi::CUBEB_OK ); assert_eq!( unsafe { OPS.stream_get_position.unwrap()(stream, &mut position) }, ffi::CUBEB_OK ); prev_position = position; } } } State::End => { // The cubeb_stream_stop call above should synchrously stop the callbacks, // hence the clock, the assert below must always holds, modulo the client // side interpolation. assert!( position == prev_position || position - prev_position <= closure.buffer_size.load(Ordering::SeqCst) as u64 ); if position == prev_position { count += 1; if count > CHECK_COUNT { break; } } } } } assert_eq!(unsafe { OPS.stream_stop.unwrap()(stream) }, ffi::CUBEB_OK); }, ); extern "C" fn state_callback( stream: *mut ffi::cubeb_stream, user_ptr: *mut c_void, state: ffi::cubeb_state, ) { assert!(!stream.is_null()); assert!(!user_ptr.is_null()); assert_ne!(state, ffi::CUBEB_STATE_ERROR); } extern "C" fn data_callback( stream: *mut ffi::cubeb_stream, user_ptr: *mut c_void, _input_buffer: *const c_void, output_buffer: *mut c_void, nframes: i64, ) -> i64 { assert!(!stream.is_null()); assert!(!user_ptr.is_null()); assert!(!output_buffer.is_null()); let buffer = unsafe { let ptr = output_buffer as *mut i16; let len = nframes as usize; slice::from_raw_parts_mut(ptr, len) }; let closure = unsafe { &mut *(user_ptr as *mut Closure) }; closure.buffer_size.store(nframes, Ordering::SeqCst); // Generate tone on the fly. for data in buffer.iter_mut() { let t1 = (2.0 * PI * 350.0 * (closure.phase) as f32 / SAMPLE_FREQUENCY as f32).sin(); let t2 = (2.0 * PI * 440.0 * (closure.phase) as f32 / SAMPLE_FREQUENCY as f32).sin(); *data = f32_to_i16_sample(0.5 * (t1 + t2)); closure.phase += 1; } nframes } fn f32_to_i16_sample(x: f32) -> i16 { (x * f32::from(i16::max_value())) as i16 } }
39.805556
100
0.433938
484391eb6b43db616de3fce9cd084035159f87b6
1,310
/* * * * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: 1.0.0 * * Generated by: https://openapi-generator.tech */ #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct LolChatChatDomainConfig { #[serde(rename = "ChampSelectDomainName", skip_serializing_if = "Option::is_none")] pub champ_select_domain_name: Option<String>, #[serde(rename = "ClubDomainName", skip_serializing_if = "Option::is_none")] pub club_domain_name: Option<String>, #[serde(rename = "CustomGameDomainName", skip_serializing_if = "Option::is_none")] pub custom_game_domain_name: Option<String>, #[serde(rename = "P2PDomainName", skip_serializing_if = "Option::is_none")] pub p2_p_domain_name: Option<String>, #[serde(rename = "PostGameDomainName", skip_serializing_if = "Option::is_none")] pub post_game_domain_name: Option<String>, } impl LolChatChatDomainConfig { pub fn new() -> LolChatChatDomainConfig { LolChatChatDomainConfig { champ_select_domain_name: None, club_domain_name: None, custom_game_domain_name: None, p2_p_domain_name: None, post_game_domain_name: None, } } }
31.95122
109
0.696947
e99c648afbe13800db0e6308d0e1f99371132fe6
774
#![warn(clippy::all)] #![allow(clippy::pedantic)] #![forbid(unsafe_code)] extern crate base64; extern crate bech32; extern crate num256; extern crate num_bigint; extern crate num_traits; extern crate ripemd160; extern crate serde; extern crate sha2; #[macro_use] extern crate log; #[macro_use] extern crate serde_derive; pub mod address; pub mod client; pub mod coin; pub mod decimal; pub mod error; pub mod mnemonic; pub mod msg; pub mod private_key; pub mod public_key; pub mod signature; pub mod utils; pub use address::Address; pub use client::Contact; pub use coin::Coin; pub use coin::Fee; pub use mnemonic::Mnemonic; pub use msg::Msg; pub use private_key::MessageArgs; pub use private_key::PrivateKey; pub use public_key::PublicKey; pub use signature::Signature;
18.878049
33
0.76615
23b4755c1bebf650cd73ae8e17b2ab8d44df9158
17,834
extern crate impl2001_rs; use anyhow::{anyhow, Result}; use chrono::prelude::Utc; use diesel::pg::PgConnection; use std::fs; use std::io::{sink, Write}; use std::path::Path; use crate::crypto_util; use crate::impl2001_rs::pip::pip2001::Pip2001; use crate::impl2001_rs::pip::pip2001::Pip2001MessageType; use crate::impl2001_rs::pip::InputObject; use crate::prs_utility_rust::utility; use atom_syndication::{Feed, Generator, Person}; use super::SETTINGS; use crate::db; use crate::db::models::{Post, PostPartial}; use crate::frontmatter; use crate::prs; pub fn process_pip2001_message<'a>( conn: &PgConnection, pipobject: &Pip2001, tx_id: &'a str, user_pubaddr: &'a str, trx_table_num: i64, encryption: &str, ) -> bool { match pipobject.msg_type { Pip2001MessageType::PUBLISH_MANAGEMENT => { let mut users_action = ""; let mut users_list = ""; let mut topic = ""; if pipobject.data.contains_key("allow") { users_action = "allow"; users_list = &pipobject.data["allow"]; } else if pipobject.data.contains_key("deny") { users_action = "deny"; users_list = &pipobject.data["deny"]; } if pipobject.data.contains_key("topic") { topic = &pipobject.data["topic"]; } else { error!( "can not find topic key from pipobject.data = {:?}", &pipobject.data ); } let now = Utc::now().naive_utc(); for user_pubaddr in users_list.split(',') { debug!( "tx_id = {} user = {} user_action = {:?}", tx_id, user_pubaddr, users_action ); db::save_user(&conn, &user_pubaddr, &users_action, &tx_id, &topic, now) .expect("save user failed"); db::update_last_status(&conn, "tx_num", trx_table_num) .expect("update last_tx_num failed"); } } Pip2001MessageType::PUBLISH => { let file_hash = &pipobject.data["file_hash"]; let hash_alg = &pipobject.data["hash_alg"]; let topic = &pipobject.data["topic"]; let url: &str; let uris = &pipobject.meta["uris"]; match uris { InputObject::String(_s) => { error!( "uris should be a url list, tx_id = {}\npipobject = {:?}", tx_id, pipobject ); return false; } InputObject::VecOfString(v) => { url = &v[0]; } } let now = Utc::now().naive_utc(); // save updated_tx_id let mut updated_tx_id = ""; if let Some(v) = pipobject.data.get("updated_tx_id") { updated_tx_id = v; } let _post = db::save_post( &conn, &tx_id, &user_pubaddr, &updated_tx_id, &file_hash, &hash_alg, &topic, &url, encryption, now, ) .expect("save post failed"); debug!( "post saved, file_hash = {} encryption = {}", _post.file_hash, _post.encryption ); db::update_last_status(&conn, "tx_num", trx_table_num).expect(&format!( "update last_tx_num failed, tx_num = {}", trx_table_num )); } Pip2001MessageType::NA => warn!("Pip2001MessageType is NA"), } true } pub fn process_post_updated(connection: &PgConnection, post: &Post) -> bool { // 被更新的 publish_tx_id let updated_publish_tx_id = post.updated_tx_id.trim(); if updated_publish_tx_id.len() == 0 { return true; } let updated_post_res = db::get_post_by_publish_tx_id(connection, &updated_publish_tx_id); match updated_post_res { Ok(updated_post) => { debug!( "process post updated, updated_publish_tx_id = {}", updated_publish_tx_id ); // check user_address of updated post if updated_post.user_address != post.user_address { error!( "update post failed, publish_tx_id: {}, updated user_address {} != post.user_address {}", &post.publish_tx_id, updated_post.user_address, post.user_address ); return false; } else { // delete old content debug!("delete content, file_hash = {}", updated_post.file_hash); if let Err(e) = db::delete_content(connection, &updated_post.file_hash) { error!( "delete content failed, file_hash = {}, error = {}", updated_post.file_hash, e ); return false; } // delete old post debug!("delete post, file_hash = {}", updated_post.file_hash); if let Err(e) = db::delete_post(connection, &updated_post.file_hash) { error!( "delete old content failed, file_hash = {}, error = {}", updated_post.file_hash, e ); return false; } } } Err(e) => { error!( "get_post_by_publish_tx_id failed, updated_publish_tx_id = {}, error: {}", updated_publish_tx_id, e ); return false; } } true } pub fn fetchcontent(connection: &PgConnection) { let result_posts = db::get_posts(connection, false, 1000); match result_posts { Ok(posts) => { for post in posts { debug!("fetch file_hash = {} url = {}", post.file_hash, post.url); let response = fetch_markdown(post.url.clone()); match response { Ok(data) => { let html; if !post.encryption.is_empty() { let enc_post: prs::EncPost = match serde_json::from_slice(&data.as_bytes()) { Ok(v) => v, Err(e) => { error!( "parse encryption post failed, post.url = {}, error = {}", post.url, e ); continue; } }; if let Some(topic_conf) = SETTINGS.get_topic(&post.topic) { html = match decrypt_aes_256_cbc( &topic_conf.encryption_key, &topic_conf.iv_prefix, &enc_post.session, &enc_post.content, ) { Ok(v) => v, Err(e) => { error!( "decrypt enc post file_hash = {} failed: {:?}", post.file_hash, e ); continue; } } } else { error!("can not find topic = {} from toml config", &post.topic); continue; } } else { html = data; } let hex = utility::hash_text(&html, &post.hash_alg) .ok() .expect(&format!( "utility::hash_text failed, hash_alg = {} html = {}", &post.hash_alg, &html )); // just check and output error message if hex != post.file_hash { error!( "hex != file_hash, hash_alg = {} hex = {} file_hash = {} url = {}", &post.hash_alg, hex, post.file_hash, post.url ); } let content = db::get_content(connection, &post.file_hash); match content { Ok(_) => { debug!("content already exists, file_hash = {}", &post.file_hash); } Err(e) => { if e == diesel::NotFound { if let Err(e) = db::save_content( connection, &post.file_hash, &post.url, &html, ) { error!( "save_content file_hash = {} url = {} failed: {:?}", &post.file_hash, &post.url, e ); continue; } } else { error!("get_content failed: {}", e); } } } db::update_post_status(connection, &post.file_hash, true, true) .expect("update_post_status failed"); if !process_post_updated(connection, &post) { error!( "post/content update failed, post.file_hash = {}, skip", post.file_hash ); continue; } } Err(e) => { if format!("{}", e).contains("status code: 404") { // delete posts debug!("post.file_hash = {} fetch 404, delete it", &post.file_hash); db::delete_post(connection, &post.file_hash) .expect("update post.deleted failed"); db::update_notify_status(connection, &post.publish_tx_id, true) .expect("update deleted post notify status failed"); } else { error!("fetch_markdown {} failed: {:?}", &post.url, e); } continue; } } } } Err(e) => error!("get posts failed: {:?}", e), } } pub fn fetch_markdown(url: String) -> Result<String> { let mut easy = prs::get_curl_easy()?; easy.url(&url)?; let _redirect = easy.follow_location(true); let mut data = Vec::new(); { let mut transfer = easy.transfer(); transfer.write_function(|new_data| { data.extend_from_slice(new_data); Ok(new_data.len()) })?; transfer.perform()?; }; let html = String::from_utf8(data).expect("body is not valid UTF8!"); let result = easy.response_code(); match result { Ok(respcode) => { if respcode == 200 { Ok(html) } else { Err(anyhow!("url = {} error status code: {:?}", url, respcode,)) } } Err(e) => Err(anyhow!("url = {} error = {}", url, e)), } } fn decrypt_aes_256_cbc( encryption_key: &str, iv_prefix: &str, session: &str, content: &str, ) -> Result<String, String> { let hashiv = crypto_util::get_iv(&iv_prefix, session); let key = hex::decode(&encryption_key).expect(&format!( "hex::decode failed, encryption_key = {}", encryption_key )); crypto_util::decrypt_aes_256_cbc(String::from(content), &key, hashiv) } pub fn generate_atom_xml(connection: &PgConnection) -> Result<()> { let xml_output_dir = &SETTINGS.atom.xml_output_dir; fs::create_dir_all(&xml_output_dir).expect("create xml_output_dir failed"); for item in &SETTINGS.topics { let topic = &item.topic; debug!("generate atom for topic = {}", topic); let posts_result = db::get_allow_posts(&connection, topic); match posts_result { Ok(posts) => { let atomstring = atom(&connection, posts); let fpath = Path::new(&xml_output_dir).join(topic); let mut file = match fs::File::create(&fpath) { Ok(file) => file, Err(e) => { return Err(anyhow!( "create file failed: {}, fpath = {}", fpath.as_os_str().to_string_lossy(), e )) } }; file.write_all(atomstring.as_bytes())?; } Err(e) => error!("get_allow_posts failed: {}", e), } } Ok(()) } pub fn atom(connection: &PgConnection, posts: Vec<PostPartial>) -> String { use atom_syndication::Content; use atom_syndication::Entry; let mut generator = Generator::default(); generator.set_value("PRESSone Atom Generator"); let mut feed = Feed::default(); feed.set_generator(generator); let mut entries = Vec::new(); for post in posts { debug!("generate atom for post file_hash = {} ", post.file_hash); let result_content = db::get_content(connection, &post.file_hash); match result_content { Ok(post_content) => { let markdown_attrs = frontmatter::parse(&post_content.content); debug!( "post content title = {} author = {} published = {}", markdown_attrs.title, markdown_attrs.author, markdown_attrs.published ); let mut feed_content = Content::default(); feed_content.set_content_type("text/markdown".to_string()); feed_content.set_value(format!("<![CDATA[{}]]>", post_content.content)); let mut person = Person::default(); person.set_name(&markdown_attrs.author); let mut entry = Entry::default(); entry.set_id(&post.publish_tx_id); entry.set_title(&markdown_attrs.title); entry.set_published(markdown_attrs.published); entry.set_authors(vec![person]); entry.set_content(feed_content); entries.push(entry); // check and send webhook notify if let Err(e) = check_and_send_webhook(connection, &post.publish_tx_id) { error!("check_and_send_webhook failed: {}", e); } } Err(e) => error!("get content failed: {:?}", e), } } let mut feed = Feed::default(); feed.set_entries(entries); feed.write_to(sink()).expect("feed.write_to failed"); feed.to_string() } pub fn check_and_send_webhook(conn: &PgConnection, data_id: &str) -> Result<()> { let notify_result = db::get_notify_by_data_id(conn, data_id); match notify_result { Ok(notify) => { if notify.success || notify.retries >= 3 { debug!( "block_num = {} trx_id = {} notify webhook success or retries >= 3, skip ...", notify.block_num, notify.trx_id ); return Ok(()); } let payload = prs::NotifyPayload { block: prs::NotifyBlock { data_id: notify.data_id.clone(), block_num: notify.block_num, trx_id: notify.trx_id, }, }; if let Some(notify_url) = SETTINGS.get_webhook_by_topic(&notify.topic) { debug!( "notify data_id = {} topic = {} success = {}", notify.data_id, notify.topic, notify.success ); debug!("send notify payload to {}", notify_url); match prs::notify_webhook(&payload, &notify_url) { Ok(status_code) => { let success = status_code == 200; db::update_notify_status(conn, &notify.data_id, success)?; } Err(e) => error!( "block_num = {}, url = {}, notify_webhook failed: {}", notify.block_num, notify_url, e ), } } else { warn!( "can not find webhook url for topic = {}, skip ...", notify.topic ); } } Err(e) => error!("get notify by data_id = {} failed: {:?}", data_id, e), } Ok(()) }
38.854031
109
0.434227
29909f2408ac5a6f3ea1eff4af90577fb62a6302
2,828
// This file was generated by gir (https://github.com/gtk-rs/gir) // from gir-files (https://github.com/gtk-rs/gir-files) // DO NOT EDIT use ffi; use glib::StaticType; use glib::Type; use glib::translate::*; use glib::value::FromValue; use glib::value::FromValueOptional; use glib::value::SetValue; use glib::value::Value; use gobject_ffi; bitflags! { pub struct PipelineFlags: u32 { const AUDIO_PREVIEW = 1; const VIDEO_PREVIEW = 2; const FULL_PREVIEW = 3; const RENDER = 4; const SMART_RENDER = 8; } } #[doc(hidden)] impl ToGlib for PipelineFlags { type GlibType = ffi::GESPipelineFlags; fn to_glib(&self) -> ffi::GESPipelineFlags { self.bits() } } #[doc(hidden)] impl FromGlib<ffi::GESPipelineFlags> for PipelineFlags { fn from_glib(value: ffi::GESPipelineFlags) -> PipelineFlags { skip_assert_initialized!(); PipelineFlags::from_bits_truncate(value) } } impl StaticType for PipelineFlags { fn static_type() -> Type { unsafe { from_glib(ffi::ges_pipeline_flags_get_type()) } } } impl<'a> FromValueOptional<'a> for PipelineFlags { unsafe fn from_value_optional(value: &Value) -> Option<Self> { Some(FromValue::from_value(value)) } } impl<'a> FromValue<'a> for PipelineFlags { unsafe fn from_value(value: &Value) -> Self { from_glib(gobject_ffi::g_value_get_flags(value.to_glib_none().0)) } } impl SetValue for PipelineFlags { unsafe fn set_value(value: &mut Value, this: &Self) { gobject_ffi::g_value_set_flags(value.to_glib_none_mut().0, this.to_glib()) } } bitflags! { pub struct TrackType: u32 { const UNKNOWN = 1; const AUDIO = 2; const VIDEO = 4; const TEXT = 8; const CUSTOM = 16; } } #[doc(hidden)] impl ToGlib for TrackType { type GlibType = ffi::GESTrackType; fn to_glib(&self) -> ffi::GESTrackType { self.bits() } } #[doc(hidden)] impl FromGlib<ffi::GESTrackType> for TrackType { fn from_glib(value: ffi::GESTrackType) -> TrackType { skip_assert_initialized!(); TrackType::from_bits_truncate(value) } } impl StaticType for TrackType { fn static_type() -> Type { unsafe { from_glib(ffi::ges_track_type_get_type()) } } } impl<'a> FromValueOptional<'a> for TrackType { unsafe fn from_value_optional(value: &Value) -> Option<Self> { Some(FromValue::from_value(value)) } } impl<'a> FromValue<'a> for TrackType { unsafe fn from_value(value: &Value) -> Self { from_glib(gobject_ffi::g_value_get_flags(value.to_glib_none().0)) } } impl SetValue for TrackType { unsafe fn set_value(value: &mut Value, this: &Self) { gobject_ffi::g_value_set_flags(value.to_glib_none_mut().0, this.to_glib()) } }
24.17094
82
0.650636
d621ca1a3a50594aa52460eec6d01b2d6b72c1d4
10,392
use crate::contract::{execute, instantiate, query_config}; use crate::swap::MoneyMarketCw20HookMsg; use crate::testing::mock_querier::mock_dependencies; use cosmwasm_std::testing::{mock_env, mock_info, MOCK_CONTRACT_ADDR}; use cosmwasm_std::{to_binary, Coin, CosmosMsg, Decimal, SubMsg, Uint128, WasmMsg}; use cw20::Cw20ExecuteMsg; use mirror_protocol::collector::{ConfigResponse, ExecuteMsg, InstantiateMsg}; use mirror_protocol::gov::Cw20HookMsg::DepositReward; use terra_cosmwasm::{TerraMsg, TerraMsgWrapper, TerraRoute}; use terraswap::asset::{Asset, AssetInfo}; use terraswap::pair::{Cw20HookMsg as TerraswapCw20HookMsg, ExecuteMsg as TerraswapExecuteMsg}; #[test] fn proper_initialization() { let mut deps = mock_dependencies(&[]); let msg = InstantiateMsg { owner: "owner0000".to_string(), terraswap_factory: "terraswapfactory".to_string(), distribution_contract: "gov0000".to_string(), mirror_token: "mirror0000".to_string(), base_denom: "uusd".to_string(), aust_token: "aust0000".to_string(), anchor_market: "anchormarket0000".to_string(), bluna_token: "bluna0000".to_string(), bluna_swap_denom: "uluna".to_string(), }; let info = mock_info("addr0000", &[]); // we can just call .unwrap() to assert this was a success let _res = instantiate(deps.as_mut(), mock_env(), info, msg).unwrap(); // it worked, let's query the state let config: ConfigResponse = query_config(deps.as_ref()).unwrap(); assert_eq!("terraswapfactory", config.terraswap_factory.as_str()); assert_eq!("uusd", config.base_denom.as_str()); } #[test] fn test_convert() { let mut deps = mock_dependencies(&[Coin { denom: "uusd".to_string(), amount: Uint128::from(100u128), }]); deps.querier.with_token_balances(&[( &"tokenAPPL".to_string(), &[(&MOCK_CONTRACT_ADDR.to_string(), &Uint128::from(100u128))], )]); deps.querier.with_tax( Decimal::percent(1), &[(&"uusd".to_string(), &Uint128::from(1000000u128))], ); deps.querier.with_terraswap_pairs(&[ (&"uusdtokenAPPL".to_string(), &"pairAPPL".to_string()), (&"uusdtokenMIRROR".to_string(), &"pairMIRROR".to_string()), ]); let msg = InstantiateMsg { owner: "owner0000".to_string(), terraswap_factory: "terraswapfactory".to_string(), distribution_contract: "gov0000".to_string(), mirror_token: "tokenMIRROR".to_string(), base_denom: "uusd".to_string(), aust_token: "aust0000".to_string(), anchor_market: "anchormarket0000".to_string(), bluna_token: "bluna0000".to_string(), bluna_swap_denom: "uluna".to_string(), }; let info = mock_info("addr0000", &[]); let _res = instantiate(deps.as_mut(), mock_env(), info, msg).unwrap(); let msg = ExecuteMsg::Convert { asset_token: "tokenAPPL".to_string(), }; let info = mock_info("addr0000", &[]); let res = execute(deps.as_mut(), mock_env(), info, msg).unwrap(); assert_eq!( res.messages, vec![SubMsg::new(CosmosMsg::Wasm(WasmMsg::Execute { contract_addr: "tokenAPPL".to_string(), msg: to_binary(&Cw20ExecuteMsg::Send { contract: "pairAPPL".to_string(), amount: Uint128::from(100u128), msg: to_binary(&TerraswapCw20HookMsg::Swap { max_spread: None, belief_price: None, to: None, }) .unwrap(), }) .unwrap(), funds: vec![], }))] ); let msg = ExecuteMsg::Convert { asset_token: "tokenMIRROR".to_string(), }; let info = mock_info("addr0000", &[]); let res = execute(deps.as_mut(), mock_env(), info, msg).unwrap(); // tax deduct 100 => 99 assert_eq!( res.messages, vec![SubMsg::new(CosmosMsg::Wasm(WasmMsg::Execute { contract_addr: "pairMIRROR".to_string(), msg: to_binary(&TerraswapExecuteMsg::Swap { offer_asset: Asset { info: AssetInfo::NativeToken { denom: "uusd".to_string() }, amount: Uint128::from(99u128), }, max_spread: None, belief_price: None, to: None, }) .unwrap(), funds: vec![Coin { amount: Uint128::from(99u128), denom: "uusd".to_string(), }], }))] ); } #[test] fn test_convert_aust() { let mut deps = mock_dependencies(&[Coin { denom: "uusd".to_string(), amount: Uint128::from(100u128), }]); deps.querier.with_token_balances(&[( &"aust0000".to_string(), &[(&MOCK_CONTRACT_ADDR.to_string(), &Uint128::from(100u128))], )]); let msg = InstantiateMsg { owner: "owner0000".to_string(), terraswap_factory: "terraswapfactory".to_string(), distribution_contract: "gov0000".to_string(), mirror_token: "mirror0000".to_string(), base_denom: "uusd".to_string(), aust_token: "aust0000".to_string(), anchor_market: "anchormarket0000".to_string(), bluna_token: "bluna0000".to_string(), bluna_swap_denom: "uluna".to_string(), }; let info = mock_info("addr0000", &[]); let _res = instantiate(deps.as_mut(), mock_env(), info, msg).unwrap(); let msg = ExecuteMsg::Convert { asset_token: "aust0000".to_string(), }; let info = mock_info("addr0000", &[]); let res = execute(deps.as_mut(), mock_env(), info, msg).unwrap(); assert_eq!( res.messages, vec![SubMsg::new(CosmosMsg::Wasm(WasmMsg::Execute { contract_addr: "aust0000".to_string(), msg: to_binary(&Cw20ExecuteMsg::Send { contract: "anchormarket0000".to_string(), amount: Uint128::from(100u128), msg: to_binary(&MoneyMarketCw20HookMsg::RedeemStable {}).unwrap(), }) .unwrap(), funds: vec![], }))] ); } #[test] fn test_convert_bluna() { let mut deps = mock_dependencies(&[Coin { denom: "uluna".to_string(), amount: Uint128::from(100u128), }]); deps.querier.with_token_balances(&[( &"bluna0000".to_string(), &[(&MOCK_CONTRACT_ADDR.to_string(), &Uint128::from(100u128))], )]); deps.querier .with_terraswap_pairs(&[(&"ulunabluna0000".to_string(), &"pairbLuna".to_string())]); let msg = InstantiateMsg { owner: "owner0000".to_string(), terraswap_factory: "terraswapfactory".to_string(), distribution_contract: "gov0000".to_string(), mirror_token: "mirror0000".to_string(), base_denom: "uusd".to_string(), aust_token: "aust0000".to_string(), anchor_market: "anchormarket0000".to_string(), bluna_token: "bluna0000".to_string(), bluna_swap_denom: "uluna".to_string(), }; let info = mock_info("addr0000", &[]); let _res = instantiate(deps.as_mut(), mock_env(), info, msg).unwrap(); let msg = ExecuteMsg::Convert { asset_token: "bluna0000".to_string(), }; let info = mock_info("addr0000", &[]); let res = execute(deps.as_mut(), mock_env(), info, msg).unwrap(); assert_eq!( res.messages, vec![ SubMsg::new(CosmosMsg::Wasm(WasmMsg::Execute { contract_addr: "bluna0000".to_string(), msg: to_binary(&Cw20ExecuteMsg::Send { contract: "pairbLuna".to_string(), amount: Uint128::from(100u128), msg: to_binary(&TerraswapCw20HookMsg::Swap { max_spread: None, belief_price: None, to: None, }) .unwrap(), }) .unwrap(), funds: vec![], })), SubMsg::new(CosmosMsg::Wasm(WasmMsg::Execute { contract_addr: MOCK_CONTRACT_ADDR.to_string(), msg: to_binary(&ExecuteMsg::LunaSwapHook {}).unwrap(), funds: vec![], })), ] ); // suppose we sell the bluna for 100uluna let msg = ExecuteMsg::LunaSwapHook {}; let info = mock_info("owner0000", &[]); let res = execute(deps.as_mut(), mock_env(), info, msg).unwrap(); assert_eq!( res.messages, vec![SubMsg::new(CosmosMsg::Custom(TerraMsgWrapper { route: TerraRoute::Market, msg_data: TerraMsg::Swap { offer_coin: Coin { amount: Uint128::from(100u128), denom: "uluna".to_string() }, ask_denom: "uusd".to_string(), }, }))], ) } #[test] fn test_send() { let mut deps = mock_dependencies(&[]); deps.querier.with_token_balances(&[( &"mirror0000".to_string(), &[(&MOCK_CONTRACT_ADDR.to_string(), &Uint128::from(100u128))], )]); let msg = InstantiateMsg { owner: "owner0000".to_string(), terraswap_factory: "terraswapfactory".to_string(), distribution_contract: "gov0000".to_string(), mirror_token: "mirror0000".to_string(), base_denom: "uusd".to_string(), aust_token: "aust0000".to_string(), anchor_market: "anchormarket0000".to_string(), bluna_token: "bluna0000".to_string(), bluna_swap_denom: "uluna".to_string(), }; let info = mock_info("addr0000", &[]); let _res = instantiate(deps.as_mut(), mock_env(), info, msg).unwrap(); let msg = ExecuteMsg::Distribute {}; let info = mock_info("addr0000", &[]); let res = execute(deps.as_mut(), mock_env(), info, msg).unwrap(); assert_eq!( res.messages, vec![SubMsg::new(CosmosMsg::Wasm(WasmMsg::Execute { contract_addr: "mirror0000".to_string(), msg: to_binary(&Cw20ExecuteMsg::Send { contract: "gov0000".to_string(), amount: Uint128::from(100u128), msg: to_binary(&DepositReward {}).unwrap(), }) .unwrap(), funds: vec![], }))] ) }
34.410596
94
0.569861
e9cb4a8a175c16aa66e11e1f1064921d67ea7717
15,999
/* * Licensed to Elasticsearch B.V. under one or more contributor * license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright * ownership. Elasticsearch B.V. licenses this file to you under * the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ //! HTTP response components use crate::{ error::Error as ClientError, http::{headers::HeaderMap, Method, StatusCode, Url}, }; use serde::{ de, de::{DeserializeOwned, MapAccess, Visitor}, Deserialize, Deserializer, Serialize, }; use serde_json::Value; use std::{collections::BTreeMap, fmt, str::FromStr}; use void::Void; /// A response from Elasticsearch pub struct Response(reqwest::Response, Method); impl Response { /// Creates a new instance of an Elasticsearch response pub fn new(response: reqwest::Response, method: Method) -> Self { Self(response, method) } /// Get the response content-length, if known. /// /// Reasons it may not be known: /// /// - The server didn't send a `content-length` header. /// - The response is compressed and automatically decoded (thus changing /// the actual decoded length). pub fn content_length(&self) -> Option<u64> { self.0.content_length() } /// Gets the response content-type. pub fn content_type(&self) -> &str { self.0 .headers() .get(crate::http::headers::CONTENT_TYPE) .and_then(|value| value.to_str().ok()) .unwrap() } /// Turn the response into an [Error] if Elasticsearch returned an error. pub fn error_for_status_code(self) -> Result<Self, ClientError> { match self.0.error_for_status_ref() { Ok(_) => Ok(self), Err(err) => Err(err.into()), } } /// Turn the response into an [Error] if Elasticsearch returned an error. pub fn error_for_status_code_ref(&self) -> Result<&Self, ClientError> { match self.0.error_for_status_ref() { Ok(_) => Ok(self), Err(err) => Err(err.into()), } } /// Asynchronously reads the response body into an [Exception] if /// Elasticsearch returned a HTTP status code in the 400-599 range. /// /// Reading the response body consumes `self` pub async fn exception(self) -> Result<Option<Exception>, ClientError> { if self.status_code().is_client_error() || self.status_code().is_server_error() { let ex = self.json().await?; Ok(Some(ex)) } else { Ok(None) } } /// Asynchronously reads the response body as JSON /// /// Reading the response body consumes `self` pub async fn json<B>(self) -> Result<B, ClientError> where B: DeserializeOwned, { let body = self.0.json::<B>().await?; Ok(body) } /// Gets the response headers. pub fn headers(&self) -> &HeaderMap { self.0.headers() } /// Gets the request method. pub fn method(&self) -> Method { self.1 } /// Get the HTTP status code of the response pub fn status_code(&self) -> StatusCode { self.0.status() } /// Asynchronously reads the response body as plain text /// /// Reading the response body consumes `self` pub async fn text(self) -> Result<String, ClientError> { let body = self.0.text().await?; Ok(body) } /// Gets the request URL pub fn url(&self) -> &Url { self.0.url() } /// Gets the Deprecation warning response headers /// /// Deprecation headers signal the use of Elasticsearch functionality /// or features that are deprecated and will be removed in a future release. pub fn warning_headers(&self) -> impl Iterator<Item = &str> { self.0.headers().get_all("Warning").iter().map(|w| { let s = w.to_str().unwrap(); let first_quote = s.find('"').unwrap(); let last_quote = s.len() - 1; &s[first_quote + 1..last_quote] }) } } impl fmt::Debug for Response { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Response") .field("method", &self.method()) .field("url", self.url()) .field("status_code", &self.status_code()) .field("headers", self.headers()) .finish() } } /// An exception raised by Elasticsearch. /// /// Contains details that indicate why the exception was raised which can help to determine /// what subsequent action to take. #[serde_with::skip_serializing_none] #[derive(Debug, PartialEq, Deserialize, Serialize, Clone)] pub struct Exception { status: Option<u16>, #[serde(deserialize_with = "crate::string_or_struct")] error: Error, } impl Exception { /// The status code of the exception, if available. pub fn status(&self) -> Option<u16> { self.status } /// The details for the exception pub fn error(&self) -> &Error { &self.error } } /// Details about the exception raised by Elasticsearch #[serde_with::skip_serializing_none] #[derive(Debug, PartialEq, Deserialize, Serialize, Clone)] pub struct Error { #[serde(deserialize_with = "option_box_cause", default)] caused_by: Option<Box<Cause>>, #[serde(default = "BTreeMap::new", deserialize_with = "header_map")] header: BTreeMap<String, Vec<String>>, #[serde(default = "Vec::new")] root_cause: Vec<Cause>, reason: Option<String>, stack_trace: Option<String>, #[serde(rename = "type")] ty: Option<String>, #[serde(default = "BTreeMap::new", flatten)] additional_details: BTreeMap<String, Value>, } /// Deserializes the headers map where the map values may be a string or a sequence of strings fn header_map<'de, D>(deserializer: D) -> Result<BTreeMap<String, Vec<String>>, D::Error> where D: Deserializer<'de>, { #[derive(Deserialize)] struct Wrapper(#[serde(deserialize_with = "crate::string_or_seq_string")] Vec<String>); let v: BTreeMap<String, Wrapper> = BTreeMap::deserialize(deserializer)?; Ok(v.into_iter().map(|(k, Wrapper(v))| (k, v)).collect()) } impl Error { /// The cause of the exception pub fn caused_by(&self) -> Option<&Cause> { self.caused_by.as_deref() } /// The root causes for the exception pub fn root_cause(&self) -> &Vec<Cause> { &self.root_cause } /// The headers for the exception pub fn header(&self) -> &BTreeMap<String, Vec<String>> { &self.header } /// The reason for the exception, if available. pub fn reason(&self) -> Option<&str> { self.reason.as_deref() } /// The exception stack trace, if available. /// /// Available if `error_trace` is specified on the request pub fn stack_trace(&self) -> Option<&str> { self.stack_trace.as_deref() } /// The type of exception, if available. pub fn ty(&self) -> Option<&str> { self.ty.as_deref() } /// Additional details about the cause. /// /// Elasticsearch can return additional details about an exception, depending /// on context, which do not map to fields on [Error]. These are collected here pub fn additional_details(&self) -> &BTreeMap<String, Value> { &self.additional_details } } // An error in an Elasticsearch exception can be returned as a simple message string only, or // as a JSON object. Handle both cases by corralling the simple message into the reason field impl FromStr for Error { type Err = Void; fn from_str(s: &str) -> Result<Self, Self::Err> { Ok(Error { caused_by: None, header: Default::default(), root_cause: Vec::new(), reason: Some(s.to_string()), stack_trace: None, ty: None, additional_details: Default::default(), }) } } /// The cause of an exception #[serde_with::skip_serializing_none] #[derive(Debug, PartialEq, Deserialize, Serialize, Clone)] pub struct Cause { #[serde(deserialize_with = "option_box_cause", default)] caused_by: Option<Box<Cause>>, reason: Option<String>, stack_trace: Option<String>, #[serde(rename = "type")] ty: Option<String>, #[serde(default = "BTreeMap::new", flatten)] additional_details: BTreeMap<String, Value>, } /// Deserializes a string or a map into Some boxed [Cause]. A missing field /// for `caused_by` is handled by serde's default attribute on the struct field, /// which will assign None to the field. fn option_box_cause<'de, D>(deserializer: D) -> Result<Option<Box<Cause>>, D::Error> where D: Deserializer<'de>, { struct CauseVisitor; impl<'de> Visitor<'de> for CauseVisitor { type Value = Cause; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter.write_str("string or map") } fn visit_str<E>(self, value: &str) -> Result<Self::Value, E> where E: de::Error, { Ok(Cause { caused_by: None, reason: Some(value.to_string()), stack_trace: None, ty: None, additional_details: Default::default(), }) } fn visit_map<M>(self, map: M) -> Result<Self::Value, M::Error> where M: MapAccess<'de>, { Deserialize::deserialize(de::value::MapAccessDeserializer::new(map)) } } deserializer .deserialize_any(CauseVisitor) .map(|c| Some(Box::new(c))) } impl Cause { /// The cause of the exception pub fn caused_by(&self) -> Option<&Cause> { self.caused_by.as_deref() } /// The reason for the exception pub fn reason(&self) -> Option<&str> { self.reason.as_deref() } /// The exception stack trace, if available. /// /// Available if `error_trace` is specified on the request pub fn stack_trace(&self) -> Option<&str> { self.stack_trace.as_deref() } /// The type of exception, if available. pub fn ty(&self) -> Option<&str> { self.ty.as_deref() } /// Additional details about the cause. /// /// Elasticsearch can return additional details about an exception, depending /// on context, which do not map to fields on [Error]. These are collected here pub fn additional_details(&self) -> &BTreeMap<String, Value> { &self.additional_details } } #[cfg(test)] pub mod tests { use crate::http::response::Exception; use serde_json::json; #[test] fn deserialize_error_string() -> Result<(), failure::Error> { let json = r#"{"error":"no handler found for uri [/test_1/test/1/_update?_source=foo%2Cbar] and method [POST]"}"#; let ex: Exception = serde_json::from_str(json)?; assert_eq!(ex.status(), None); assert_eq!(ex.error().reason(), Some("no handler found for uri [/test_1/test/1/_update?_source=foo%2Cbar] and method [POST]")); assert_eq!(ex.error().ty(), None); Ok(()) } #[test] fn deserialize_illegal_argument_exception() -> Result<(), failure::Error> { let json = r#"{ "error": { "root_cause": [{ "type": "illegal_argument_exception", "reason": "Missing mandatory contexts in context query" }], "type": "search_phase_execution_exception", "reason": "all shards failed", "phase": "query", "grouped": true, "header": { "WWW-Authenticate": "Bearer: token", "x": ["y", "z"] }, "failed_shards": [{ "shard": 0, "index": "test", "node": "APOkVK-rQi2Ll6CcAdeR6Q", "reason": { "type": "illegal_argument_exception", "reason": "Missing mandatory contexts in context query" } }], "caused_by": { "type": "illegal_argument_exception", "reason": "Missing mandatory contexts in context query", "caused_by": { "type": "illegal_argument_exception", "reason": "Missing mandatory contexts in context query" } } }, "status": 400 }"#; let ex: Exception = serde_json::from_str(json)?; assert_eq!(ex.status(), Some(400)); let error = ex.error(); assert_eq!(error.root_cause().len(), 1); assert_eq!( error.root_cause()[0].ty(), Some("illegal_argument_exception") ); assert_eq!( error.root_cause()[0].reason(), Some("Missing mandatory contexts in context query") ); assert_eq!(error.header().len(), 2); assert_eq!( error.header().get("WWW-Authenticate"), Some(&vec!["Bearer: token".to_string()]) ); assert_eq!( error.header().get("x"), Some(&vec!["y".to_string(), "z".to_string()]) ); assert!(error.caused_by().is_some()); let caused_by = error.caused_by().unwrap(); assert_eq!(caused_by.ty(), Some("illegal_argument_exception")); assert_eq!( caused_by.reason(), Some("Missing mandatory contexts in context query") ); assert!(caused_by.caused_by().is_some()); let caused_by_caused_by = caused_by.caused_by().unwrap(); assert_eq!(caused_by_caused_by.ty(), Some("illegal_argument_exception")); assert_eq!( caused_by_caused_by.reason(), Some("Missing mandatory contexts in context query") ); assert!(error.additional_details().len() > 0); assert_eq!( error.additional_details().get("phase"), Some(&json!("query")) ); assert_eq!( error.additional_details().get("grouped"), Some(&json!(true)) ); Ok(()) } #[test] fn deserialize_index_not_found_exception() -> Result<(), failure::Error> { let json = r#"{ "error": { "root_cause": [{ "type": "index_not_found_exception", "reason": "no such index [test_index]", "resource.type": "index_or_alias", "resource.id": "test_index", "index_uuid": "_na_", "index": "test_index" }], "type": "index_not_found_exception", "reason": "no such index [test_index]", "resource.type": "index_or_alias", "resource.id": "test_index", "index_uuid": "_na_", "index": "test_index" }, "status": 404 }"#; let ex: Exception = serde_json::from_str(json)?; assert_eq!(ex.status(), Some(404)); let error = ex.error(); assert_eq!(error.ty(), Some("index_not_found_exception")); assert_eq!(error.reason(), Some("no such index [test_index]")); assert_eq!( error.additional_details().get("index").unwrap(), &json!("test_index") ); assert_eq!(error.root_cause().len(), 1); assert_eq!( error.root_cause()[0].ty(), Some("index_not_found_exception") ); Ok(()) } }
31.494094
135
0.584037
67da5fe3df0ff87aa5e49c59739b291e0e5b913f
1,348
use num::Num; use std::ops::{Add, Div, Mul, Sub}; enum Operation { ADD, SUB, MULT, DIV, IDIV, EXP, FACT, } // fn map_str_to_op(s: String) -> Option<Operation> { // return match s.as_ref() { // "+" => Some(Operation::ADD), // "-" => Some(Operation::SUB), // "*" => Some(Operation::MULT), // "/" => Some(Operation::DIV), // "//" => Some(Operation::IDIV), // "^" => Some(Operation::EXP), // "FACT" => Some(Operation::FACT), // _ => None, // }; // } // fn map_op_to_str(o: Operation) -> Option<String> { // return match o { // Operation::ADD => Some("+"), // "+" => Some(Operation::ADD), // "-" => Some(Operation::SUB), // "*" => Some(Operation::MULT), // "/" => Some(Operation::DIV), // "//" => Some(Operation::IDIV), // "^" => Some(Operation::EXP), // "FACT" => Some(Operation::FACT), // }; // } trait Evaluable<T: Num> { fn evaluate(&self, Operation) -> Option<T>; } trait Printable { fn to_infix_string(&self) -> String; fn to_string(&self) -> String; } struct Expr<T: Num> { l: T, r: Box<Option<Expr<T>>>, } impl<T: Num> Evaluable<T> for Expr<T> { fn evaluate(&self, o: Operation) -> Option<T> { panic!("not implemented yet!"); } }
22.847458
53
0.482196
2fba5ef312256b0592ba6e6602109aac12710d5d
2,896
/// org-agenda is a simple command-line application for displaying today's or this week's agenda extern crate orgmode; extern crate colored; extern crate chrono; use std::env; use std::path::Path; use orgmode::{Library, Agenda, AgendaRange, AgendaEntryKind, Timestamp, today, format_duration}; use colored::Colorize; fn main() { let mut library = Library::new(); for argument in env::args().skip(1) { library.open(Path::new(&argument)) .expect("Unable to open path"); } let agenda = library.agenda_this_week(); print_agenda(&agenda); } fn print_agenda(agenda: &Agenda) { let title = match agenda.range { AgendaRange::Day => "Daily", AgendaRange::Week => "Weekly", }; println!("{}", format!("==================== {} Agenda ====================", title).white().bold()); let mut first = true; let today = today(); for date in agenda.dates() { let date_format = if first { first = false; "%_d %B %Y W%W" } else { "%_d %B %Y" }; let color = if date == today { "green" } else { "normal" }; println!("{}", format!("{:11}{}", date.format("%A").to_string(), date.format(date_format)).bold().color(color)); if date == today { for entry in agenda.past_scheduled.iter() { } for entry in agenda.past_deadline.iter() { } } for entry in agenda.entries(&date) { print!(" {:10}", format!("{}:", entry.category)); print_time(&entry.timestamp); match entry.kind { AgendaEntryKind::Deadline => print!(" Deadline: "), AgendaEntryKind::Scheduled => print!(" Scheduled: "), _ => {} } if let Some(ref keyword) = entry.headline.keyword { print!(" {}", keyword.blue()); } if let Some(ref priority) = entry.headline.priority { print!(" {}", format!("[#{}]", priority).red()); } print!(" {}", entry.headline.title); if !entry.time_spent.is_zero() || entry.effort.is_some() { print!(" {}", format!("[{}", format_duration(&entry.time_spent)).bold()); if let Some(ref effort) = entry.effort { print!("{}", format!("/{}", format_duration(effort)).bold()); } print!("{}", "]".bold()); } println!(); } } } fn print_time(timestamp: &Timestamp) { if let Some(start_time) = timestamp.time { print!(" {}", start_time.format("%_H:%M")); if let Some(end_time) = timestamp.end_time { print!("-{}", end_time.format("%_H:%M")); } else { print!("......"); } } }
29.55102
105
0.491713
3a910dc0295e270c310f460ea4d99caed5ec79cf
567
use crate::core::H256; use crate::{packed, prelude::*, vec::Vec}; impl Pack<packed::Byte32> for H256 { fn pack(&self) -> packed::Byte32 { packed::Byte32::from_slice(self.as_slice()).expect("impossible: fail to pack CKB H256") } } impl<'r> Unpack<H256> for packed::Byte32Reader<'r> { fn unpack(&self) -> H256 { let ptr = self.as_slice().as_ptr() as *const [u8; 32]; let r = unsafe { *ptr }; r.into() } } impl_conversion_for_entity_unpack!(H256, Byte32); impl_conversion_for_vector!(H256, Byte32Vec, Byte32VecReader);
28.35
95
0.638448
4868708b89e6268f237489efcc9f09f4871f8a33
3,100
use crate::facts::*; use std::collections::HashMap; /// When we load facts out of the table, they are essentially random /// strings. We create an intern table to map those to small integers. crate struct Interner<TargetType: From<usize> + Copy> { strings: HashMap<String, TargetType>, rev_strings: Vec<String>, } impl<TargetType> Interner<TargetType> where TargetType: From<usize> + Into<usize> + Copy, { fn new() -> Self { Self { strings: HashMap::new(), rev_strings: vec![], } } crate fn untern(&self, data: TargetType) -> &str { let data: usize = data.into(); &self.rev_strings[data] } crate fn intern(&mut self, data: &str) -> TargetType { if let Some(&interned) = self.strings.get(data) { return interned; } let index = TargetType::from(self.strings.len()); self.rev_strings.push(data.to_string()); *self.strings.entry(data.to_string()).or_insert(index) } } crate struct InternerTables { crate regions: Interner<Region>, crate loans: Interner<Loan>, crate points: Interner<Point>, } impl InternerTables { crate fn new() -> Self { Self { regions: Interner::new(), loans: Interner::new(), points: Interner::new(), } } } crate trait InternTo<To> { fn intern(tables: &mut InternerTables, input: Self) -> To; } macro_rules! intern_impl { ($t:ident, $field:ident) => { impl InternTo<$t> for &str { fn intern(tables: &mut InternerTables, input: &str) -> $t { tables.$field.intern(input) } } }; } intern_impl!(Region, regions); intern_impl!(Loan, loans); intern_impl!(Point, points); impl<A, FromA, B, FromB> InternTo<(A, B)> for (FromA, FromB) where FromA: InternTo<A>, FromB: InternTo<B>, { fn intern(tables: &mut InternerTables, input: (FromA, FromB)) -> (A, B) { let (from_a, from_b) = input; (FromA::intern(tables, from_a), FromB::intern(tables, from_b)) } } impl<A, FromA, B, FromB, C, FromC> InternTo<(A, B, C)> for (FromA, FromB, FromC) where FromA: InternTo<A>, FromB: InternTo<B>, FromC: InternTo<C>, { fn intern(tables: &mut InternerTables, input: (FromA, FromB, FromC)) -> (A, B, C) { let (from_a, from_b, from_c) = input; ( FromA::intern(tables, from_a), FromB::intern(tables, from_b), FromC::intern(tables, from_c), ) } } impl<A, FromA, B, FromB, C, FromC, D, FromD> InternTo<(A, B, C, D)> for (FromA, FromB, FromC, FromD) where FromA: InternTo<A>, FromB: InternTo<B>, FromC: InternTo<C>, FromD: InternTo<D>, { fn intern(tables: &mut InternerTables, input: (FromA, FromB, FromC, FromD)) -> (A, B, C, D) { let (from_a, from_b, from_c, from_d) = input; ( FromA::intern(tables, from_a), FromB::intern(tables, from_b), FromC::intern(tables, from_c), FromD::intern(tables, from_d), ) } }
26.724138
100
0.577097
627fb7a8fc2c0bb85f58337f5e5a04d508ed6929
1,849
// External imports use shabal::{Digest, Shabal384}; // Internal imports use crate::error::HshResult; use crate::hasher::Hasher; use crate::types::HashOutput; pub struct Shabal384Hasher; impl Hasher for Shabal384Hasher { type HashInput = (); fn hash(&self, _input: (), bytes: &[u8]) -> HshResult<HashOutput> { let mut hasher = Shabal384::new(); hasher.update(bytes); Ok(HashOutput::new(hasher.finalize())) } } #[cfg(test)] mod test { use super::*; use proptest::prelude::*; #[test] fn test_shabal384_hash_password() { let password = "password"; let hash = Shabal384Hasher.hash_str((), password).unwrap(); assert_eq!("673f98958f04371edad63fe095e6903fdf894324b9944f36a6828e2b8b6dd2f4986cd4a61e29bf2866f021bbbaa02e8a", hash.as_hex()); } #[test] fn test_shabal384_hash_bytes() { let bytes = b"password"; let hash = Shabal384Hasher.hash((), bytes).unwrap(); assert_eq!("673f98958f04371edad63fe095e6903fdf894324b9944f36a6828e2b8b6dd2f4986cd4a61e29bf2866f021bbbaa02e8a", hash.as_hex()); } proptest! { #[test] fn fuzz_shabal384_hash_does_not_panic(pass in ".*") { let _ = Shabal384Hasher.hash_str((), &pass); } #[test] fn fuzz_shabal384_hash_bytes_does_not_panic( bytes in proptest::collection::vec(any::<u8>(), 0..1000) ) { let _ = Shabal384Hasher.hash((), &bytes); } #[test] fn fuzz_shabal384_hash_returns_ok(pass in ".*") { Shabal384Hasher.hash_str((), &pass).unwrap(); } #[test] fn fuzz_shabal384_hash_bytes_returns_ok( bytes in proptest::collection::vec(any::<u8>(), 0..1000) ) { Shabal384Hasher.hash((), &bytes).unwrap(); } } }
26.414286
134
0.611141
166347d99dba13228dc454700b6c1e3ce3f091e5
1,651
#[derive(Debug)] struct Stack<T> { top: usize, data: Vec<T>, } impl<T> Stack<T> { fn new() -> Self { Stack { top: 0, data: Vec::new() } } fn push(&mut self, val: T) { self.data.push(val); self.top += 1; } fn pop(&mut self) -> Option<T> { if self.top == 0 { return None; } self.top -= 1; self.data.pop() } fn is_empty(&self) -> bool { 0 == self.top } } // 同时检测多种开闭符号是否匹配 fn par_match(open: char, close: char) -> bool { let opens = "([{"; let closers = ")]}"; opens.find(open) == closers.find(close) } fn par_checker3(par: &str) -> bool { let mut char_list = Vec::new(); for c in par.chars() { char_list.push(c); } let mut index = 0; let mut balance = true; let mut stack = Stack::new(); while index < char_list.len() && balance { let c = char_list[index]; // 开符号入栈 if '(' == c || '[' == c || '{' == c { stack.push(c); } // 闭符号则判断是否平衡 if ')' == c || ']' == c || '}' == c { if stack.is_empty() { balance = false; } else { let top = stack.pop().unwrap(); if !par_match(top, c) { balance = false; } } } // 非括号直接跳过 index += 1; } balance && stack.is_empty() } fn main() { let sa = "(2+3){func}[abc]"; let sb = "(2+3)*(3-1"; let res1 = par_checker3(sa); let res2 = par_checker3(sb); println!("sa balanced: {res1}, sb balanced: {res2}"); }
20.382716
57
0.433071
f813e57fb59328d0a14317316b4fe48bc5e13c47
53,952
// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! The 32-bit floating point type. //! //! *[See also the `f32` primitive type](../primitive.f32.html).* #![allow(missing_docs)] #[cfg(not(test))] use core::num; #[cfg(not(test))] use std::intrinsics; #[cfg(not(test))] use libc::c_int; #[cfg(not(test))] use std::num::FpCategory; pub use core::f32::{RADIX, MANTISSA_DIGITS, DIGITS, EPSILON}; pub use core::f32::{MIN_EXP, MAX_EXP, MIN_10_EXP}; pub use core::f32::{MAX_10_EXP, NAN, INFINITY, NEG_INFINITY}; pub use core::f32::{MIN, MIN_POSITIVE, MAX}; pub use core::f32::consts; #[allow(dead_code)] mod cmath { use libc::{c_float, c_int}; extern "C" { pub fn cbrtf(n: c_float) -> c_float; pub fn erff(n: c_float) -> c_float; pub fn erfcf(n: c_float) -> c_float; pub fn expm1f(n: c_float) -> c_float; pub fn fdimf(a: c_float, b: c_float) -> c_float; pub fn fmaxf(a: c_float, b: c_float) -> c_float; pub fn fminf(a: c_float, b: c_float) -> c_float; pub fn fmodf(a: c_float, b: c_float) -> c_float; pub fn ilogbf(n: c_float) -> c_int; pub fn logbf(n: c_float) -> c_float; pub fn log1pf(n: c_float) -> c_float; pub fn modff(n: c_float, iptr: &mut c_float) -> c_float; pub fn nextafterf(x: c_float, y: c_float) -> c_float; pub fn tgammaf(n: c_float) -> c_float; #[cfg_attr(all(windows, target_env = "msvc"), link_name = "__lgammaf_r")] pub fn lgammaf_r(n: c_float, sign: &mut c_int) -> c_float; #[cfg_attr(all(windows, target_env = "msvc"), link_name = "_hypotf")] pub fn hypotf(x: c_float, y: c_float) -> c_float; } // See the comments in the `floor` function for why MSVC is special // here. #[cfg(not(target_env = "msvc"))] extern "C" { pub fn acosf(n: c_float) -> c_float; pub fn asinf(n: c_float) -> c_float; pub fn atan2f(a: c_float, b: c_float) -> c_float; pub fn atanf(n: c_float) -> c_float; pub fn coshf(n: c_float) -> c_float; pub fn frexpf(n: c_float, value: &mut c_int) -> c_float; pub fn ldexpf(x: c_float, n: c_int) -> c_float; pub fn sinhf(n: c_float) -> c_float; pub fn tanf(n: c_float) -> c_float; pub fn tanhf(n: c_float) -> c_float; } #[cfg(target_env = "msvc")] pub use self::shims::*; #[cfg(target_env = "msvc")] mod shims { use libc::{c_float, c_int}; #[inline] pub unsafe fn acosf(n: c_float) -> c_float { f64::acos(n as f64) as c_float } #[inline] pub unsafe fn asinf(n: c_float) -> c_float { f64::asin(n as f64) as c_float } #[inline] pub unsafe fn atan2f(n: c_float, b: c_float) -> c_float { f64::atan2(n as f64, b as f64) as c_float } #[inline] pub unsafe fn atanf(n: c_float) -> c_float { f64::atan(n as f64) as c_float } #[inline] pub unsafe fn coshf(n: c_float) -> c_float { f64::cosh(n as f64) as c_float } #[inline] #[allow(deprecated)] pub unsafe fn frexpf(x: c_float, value: &mut c_int) -> c_float { let (a, b) = f64::frexp(x as f64); *value = b as c_int; a as c_float } #[inline] #[allow(deprecated)] pub unsafe fn ldexpf(x: c_float, n: c_int) -> c_float { f64::ldexp(x as f64, n as isize) as c_float } #[inline] pub unsafe fn sinhf(n: c_float) -> c_float { f64::sinh(n as f64) as c_float } #[inline] pub unsafe fn tanf(n: c_float) -> c_float { f64::tan(n as f64) as c_float } #[inline] pub unsafe fn tanhf(n: c_float) -> c_float { f64::tanh(n as f64) as c_float } } } #[cfg(not(test))] #[lang = "f32"] impl f32 { /// Returns `true` if this value is `NaN` and false otherwise. /// /// ``` /// use std::f32; /// /// let nan = f32::NAN; /// let f = 7.0_f32; /// /// assert!(nan.is_nan()); /// assert!(!f.is_nan()); /// ``` #[inline] pub fn is_nan(self) -> bool { num::Float::is_nan(self) } /// Returns `true` if this value is positive infinity or negative infinity and /// false otherwise. /// /// ``` /// use std::f32; /// /// let f = 7.0f32; /// let inf = f32::INFINITY; /// let neg_inf = f32::NEG_INFINITY; /// let nan = f32::NAN; /// /// assert!(!f.is_infinite()); /// assert!(!nan.is_infinite()); /// /// assert!(inf.is_infinite()); /// assert!(neg_inf.is_infinite()); /// ``` #[inline] pub fn is_infinite(self) -> bool { num::Float::is_infinite(self) } /// Returns `true` if this number is neither infinite nor `NaN`. /// /// ``` /// use std::f32; /// /// let f = 7.0f32; /// let inf = f32::INFINITY; /// let neg_inf = f32::NEG_INFINITY; /// let nan = f32::NAN; /// /// assert!(f.is_finite()); /// /// assert!(!nan.is_finite()); /// assert!(!inf.is_finite()); /// assert!(!neg_inf.is_finite()); /// ``` #[inline] pub fn is_finite(self) -> bool { num::Float::is_finite(self) } /// Returns `true` if the number is neither zero, infinite, /// [subnormal][subnormal], or `NaN`. /// /// ``` /// use std::f32; /// /// let min = f32::MIN_POSITIVE; // 1.17549435e-38f32 /// let max = f32::MAX; /// let lower_than_min = 1.0e-40_f32; /// let zero = 0.0_f32; /// /// assert!(min.is_normal()); /// assert!(max.is_normal()); /// /// assert!(!zero.is_normal()); /// assert!(!f32::NAN.is_normal()); /// assert!(!f32::INFINITY.is_normal()); /// // Values between `0` and `min` are Subnormal. /// assert!(!lower_than_min.is_normal()); /// ``` /// [subnormal]: https://en.wikipedia.org/wiki/Denormal_number #[inline] pub fn is_normal(self) -> bool { num::Float::is_normal(self) } /// Returns the floating point category of the number. If only one property /// is going to be tested, it is generally faster to use the specific /// predicate instead. /// /// ``` /// use std::num::FpCategory; /// use std::f32; /// /// let num = 12.4_f32; /// let inf = f32::INFINITY; /// /// assert_eq!(num.classify(), FpCategory::Normal); /// assert_eq!(inf.classify(), FpCategory::Infinite); /// ``` #[inline] pub fn classify(self) -> FpCategory { num::Float::classify(self) } /// Returns the mantissa, base 2 exponent, and sign as integers, respectively. /// The original number can be recovered by `sign * mantissa * 2 ^ exponent`. /// The floating point encoding is documented in the [Reference][floating-point]. /// /// ``` /// #![feature(float_extras)] /// /// use std::f32; /// /// let num = 2.0f32; /// /// // (8388608, -22, 1) /// let (mantissa, exponent, sign) = num.integer_decode(); /// let sign_f = sign as f32; /// let mantissa_f = mantissa as f32; /// let exponent_f = num.powf(exponent as f32); /// /// // 1 * 8388608 * 2^(-22) == 2 /// let abs_difference = (sign_f * mantissa_f * exponent_f - num).abs(); /// /// assert!(abs_difference <= f32::EPSILON); /// ``` /// [floating-point]: ../reference.html#machine-types #[inline] #[allow(deprecated)] pub fn integer_decode(self) -> (u64, i16, i8) { num::Float::integer_decode(self) } /// Returns the largest integer less than or equal to a number. /// /// ``` /// let f = 3.99_f32; /// let g = 3.0_f32; /// /// assert_eq!(f.floor(), 3.0); /// assert_eq!(g.floor(), 3.0); /// ``` #[inline] pub fn floor(self) -> f32 { // On MSVC LLVM will lower many math intrinsics to a call to the // corresponding function. On MSVC, however, many of these functions // aren't actually available as symbols to call, but rather they are all // `static inline` functions in header files. This means that from a C // perspective it's "compatible", but not so much from an ABI // perspective (which we're worried about). // // The inline header functions always just cast to a f64 and do their // operation, so we do that here as well, but only for MSVC targets. // // Note that there are many MSVC-specific float operations which // redirect to this comment, so `floorf` is just one case of a missing // function on MSVC, but there are many others elsewhere. #[cfg(target_env = "msvc")] return (self as f64).floor() as f32; #[cfg(not(target_env = "msvc"))] return unsafe { intrinsics::floorf32(self) }; } /// Returns the smallest integer greater than or equal to a number. /// /// ``` /// let f = 3.01_f32; /// let g = 4.0_f32; /// /// assert_eq!(f.ceil(), 4.0); /// assert_eq!(g.ceil(), 4.0); /// ``` #[inline] pub fn ceil(self) -> f32 { // see notes above in `floor` #[cfg(target_env = "msvc")] return (self as f64).ceil() as f32; #[cfg(not(target_env = "msvc"))] return unsafe { intrinsics::ceilf32(self) }; } /// Returns the nearest integer to a number. Round half-way cases away from /// `0.0`. /// /// ``` /// let f = 3.3_f32; /// let g = -3.3_f32; /// /// assert_eq!(f.round(), 3.0); /// assert_eq!(g.round(), -3.0); /// ``` #[inline] pub fn round(self) -> f32 { unsafe { intrinsics::roundf32(self) } } /// Returns the integer part of a number. /// /// ``` /// let f = 3.3_f32; /// let g = -3.7_f32; /// /// assert_eq!(f.trunc(), 3.0); /// assert_eq!(g.trunc(), -3.0); /// ``` #[inline] pub fn trunc(self) -> f32 { unsafe { intrinsics::truncf32(self) } } /// Returns the fractional part of a number. /// /// ``` /// use std::f32; /// /// let x = 3.5_f32; /// let y = -3.5_f32; /// let abs_difference_x = (x.fract() - 0.5).abs(); /// let abs_difference_y = (y.fract() - (-0.5)).abs(); /// /// assert!(abs_difference_x <= f32::EPSILON); /// assert!(abs_difference_y <= f32::EPSILON); /// ``` #[inline] pub fn fract(self) -> f32 { self - self.trunc() } /// Computes the absolute value of `self`. Returns `NAN` if the /// number is `NAN`. /// /// ``` /// use std::f32; /// /// let x = 3.5_f32; /// let y = -3.5_f32; /// /// let abs_difference_x = (x.abs() - x).abs(); /// let abs_difference_y = (y.abs() - (-y)).abs(); /// /// assert!(abs_difference_x <= f32::EPSILON); /// assert!(abs_difference_y <= f32::EPSILON); /// /// assert!(f32::NAN.abs().is_nan()); /// ``` #[inline] pub fn abs(self) -> f32 { num::Float::abs(self) } /// Returns a number that represents the sign of `self`. /// /// - `1.0` if the number is positive, `+0.0` or `INFINITY` /// - `-1.0` if the number is negative, `-0.0` or `NEG_INFINITY` /// - `NAN` if the number is `NAN` /// /// ``` /// use std::f32; /// /// let f = 3.5_f32; /// /// assert_eq!(f.signum(), 1.0); /// assert_eq!(f32::NEG_INFINITY.signum(), -1.0); /// /// assert!(f32::NAN.signum().is_nan()); /// ``` #[inline] pub fn signum(self) -> f32 { num::Float::signum(self) } /// Returns `true` if `self`'s sign bit is positive, including /// `+0.0` and `INFINITY`. /// /// ``` /// use std::f32; /// /// let nan = f32::NAN; /// let f = 7.0_f32; /// let g = -7.0_f32; /// /// assert!(f.is_sign_positive()); /// assert!(!g.is_sign_positive()); /// // Requires both tests to determine if is `NaN` /// assert!(!nan.is_sign_positive() && !nan.is_sign_negative()); /// ``` #[inline] pub fn is_sign_positive(self) -> bool { num::Float::is_sign_positive(self) } /// Returns `true` if `self`'s sign is negative, including `-0.0` /// and `NEG_INFINITY`. /// /// ``` /// use std::f32; /// /// let nan = f32::NAN; /// let f = 7.0f32; /// let g = -7.0f32; /// /// assert!(!f.is_sign_negative()); /// assert!(g.is_sign_negative()); /// // Requires both tests to determine if is `NaN`. /// assert!(!nan.is_sign_positive() && !nan.is_sign_negative()); /// ``` #[inline] pub fn is_sign_negative(self) -> bool { num::Float::is_sign_negative(self) } /// Fused multiply-add. Computes `(self * a) + b` with only one rounding /// error. This produces a more accurate result with better performance than /// a separate multiplication operation followed by an add. /// /// ``` /// use std::f32; /// /// let m = 10.0_f32; /// let x = 4.0_f32; /// let b = 60.0_f32; /// /// // 100.0 /// let abs_difference = (m.mul_add(x, b) - (m*x + b)).abs(); /// /// assert!(abs_difference <= f32::EPSILON); /// ``` #[inline] pub fn mul_add(self, a: f32, b: f32) -> f32 { unsafe { intrinsics::fmaf32(self, a, b) } } /// Takes the reciprocal (inverse) of a number, `1/x`. /// /// ``` /// use std::f32; /// /// let x = 2.0_f32; /// let abs_difference = (x.recip() - (1.0/x)).abs(); /// /// assert!(abs_difference <= f32::EPSILON); /// ``` #[inline] pub fn recip(self) -> f32 { num::Float::recip(self) } /// Raises a number to an integer power. /// /// Using this function is generally faster than using `powf` /// /// ``` /// use std::f32; /// /// let x = 2.0_f32; /// let abs_difference = (x.powi(2) - x*x).abs(); /// /// assert!(abs_difference <= f32::EPSILON); /// ``` #[inline] pub fn powi(self, n: i32) -> f32 { num::Float::powi(self, n) } /// Raises a number to a floating point power. /// /// ``` /// use std::f32; /// /// let x = 2.0_f32; /// let abs_difference = (x.powf(2.0) - x*x).abs(); /// /// assert!(abs_difference <= f32::EPSILON); /// ``` #[inline] pub fn powf(self, n: f32) -> f32 { // see notes above in `floor` #[cfg(target_env = "msvc")] return (self as f64).powf(n as f64) as f32; #[cfg(not(target_env = "msvc"))] return unsafe { intrinsics::powf32(self, n) }; } /// Takes the square root of a number. /// /// Returns NaN if `self` is a negative number. /// /// ``` /// use std::f32; /// /// let positive = 4.0_f32; /// let negative = -4.0_f32; /// /// let abs_difference = (positive.sqrt() - 2.0).abs(); /// /// assert!(abs_difference <= f32::EPSILON); /// assert!(negative.sqrt().is_nan()); /// ``` #[inline] pub fn sqrt(self) -> f32 { if self < 0.0 { NAN } else { unsafe { intrinsics::sqrtf32(self) } } } /// Returns `e^(self)`, (the exponential function). /// /// ``` /// use std::f32; /// /// let one = 1.0f32; /// // e^1 /// let e = one.exp(); /// /// // ln(e) - 1 == 0 /// let abs_difference = (e.ln() - 1.0).abs(); /// /// assert!(abs_difference <= f32::EPSILON); /// ``` #[inline] pub fn exp(self) -> f32 { // see notes above in `floor` #[cfg(target_env = "msvc")] return (self as f64).exp() as f32; #[cfg(not(target_env = "msvc"))] return unsafe { intrinsics::expf32(self) }; } /// Returns `2^(self)`. /// /// ``` /// use std::f32; /// /// let f = 2.0f32; /// /// // 2^2 - 4 == 0 /// let abs_difference = (f.exp2() - 4.0).abs(); /// /// assert!(abs_difference <= f32::EPSILON); /// ``` #[inline] pub fn exp2(self) -> f32 { unsafe { intrinsics::exp2f32(self) } } /// Returns the natural logarithm of the number. /// /// ``` /// use std::f32; /// /// let one = 1.0f32; /// // e^1 /// let e = one.exp(); /// /// // ln(e) - 1 == 0 /// let abs_difference = (e.ln() - 1.0).abs(); /// /// assert!(abs_difference <= f32::EPSILON); /// ``` #[inline] pub fn ln(self) -> f32 { // see notes above in `floor` #[cfg(target_env = "msvc")] return (self as f64).ln() as f32; #[cfg(not(target_env = "msvc"))] return unsafe { intrinsics::logf32(self) }; } /// Returns the logarithm of the number with respect to an arbitrary base. /// /// ``` /// use std::f32; /// /// let ten = 10.0f32; /// let two = 2.0f32; /// /// // log10(10) - 1 == 0 /// let abs_difference_10 = (ten.log(10.0) - 1.0).abs(); /// /// // log2(2) - 1 == 0 /// let abs_difference_2 = (two.log(2.0) - 1.0).abs(); /// /// assert!(abs_difference_10 <= f32::EPSILON); /// assert!(abs_difference_2 <= f32::EPSILON); /// ``` #[inline] pub fn log(self, base: f32) -> f32 { self.ln() / base.ln() } /// Returns the base 2 logarithm of the number. /// /// ``` /// use std::f32; /// /// let two = 2.0f32; /// /// // log2(2) - 1 == 0 /// let abs_difference = (two.log2() - 1.0).abs(); /// /// assert!(abs_difference <= f32::EPSILON); /// ``` #[inline] pub fn log2(self) -> f32 { #[cfg(target_os = "android")] return ::sys::android::log2f32(self); #[cfg(not(target_os = "android"))] return unsafe { intrinsics::log2f32(self) }; } /// Returns the base 10 logarithm of the number. /// /// ``` /// use std::f32; /// /// let ten = 10.0f32; /// /// // log10(10) - 1 == 0 /// let abs_difference = (ten.log10() - 1.0).abs(); /// /// assert!(abs_difference <= f32::EPSILON); /// ``` #[inline] pub fn log10(self) -> f32 { // see notes above in `floor` #[cfg(target_env = "msvc")] return (self as f64).log10() as f32; #[cfg(not(target_env = "msvc"))] return unsafe { intrinsics::log10f32(self) }; } /// Converts radians to degrees. /// /// ``` /// use std::f32::{self, consts}; /// /// let angle = consts::PI; /// /// let abs_difference = (angle.to_degrees() - 180.0).abs(); /// /// assert!(abs_difference <= f32::EPSILON); /// ``` #[inline] pub fn to_degrees(self) -> f32 { num::Float::to_degrees(self) } /// Converts degrees to radians. /// /// ``` /// use std::f32::{self, consts}; /// /// let angle = 180.0f32; /// /// let abs_difference = (angle.to_radians() - consts::PI).abs(); /// /// assert!(abs_difference <= f32::EPSILON); /// ``` #[inline] pub fn to_radians(self) -> f32 { num::Float::to_radians(self) } /// Constructs a floating point number of `x*2^exp`. /// /// ``` /// #![feature(float_extras)] /// /// use std::f32; /// // 3*2^2 - 12 == 0 /// let abs_difference = (f32::ldexp(3.0, 2) - 12.0).abs(); /// /// assert!(abs_difference <= f32::EPSILON); /// ``` #[inline] pub fn ldexp(x: f32, exp: isize) -> f32 { unsafe { cmath::ldexpf(x, exp as c_int) } } /// Breaks the number into a normalized fraction and a base-2 exponent, /// satisfying: /// /// * `self = x * 2^exp` /// * `0.5 <= abs(x) < 1.0` /// /// ``` /// #![feature(float_extras)] /// /// use std::f32; /// /// let x = 4.0f32; /// /// // (1/2)*2^3 -> 1 * 8/2 -> 4.0 /// let f = x.frexp(); /// let abs_difference_0 = (f.0 - 0.5).abs(); /// let abs_difference_1 = (f.1 as f32 - 3.0).abs(); /// /// assert!(abs_difference_0 <= f32::EPSILON); /// assert!(abs_difference_1 <= f32::EPSILON); /// ``` #[inline] pub fn frexp(self) -> (f32, isize) { unsafe { let mut exp = 0; let x = cmath::frexpf(self, &mut exp); (x, exp as isize) } } /// Returns the next representable floating-point value in the direction of /// `other`. /// /// ``` /// #![feature(float_extras)] /// /// use std::f32; /// /// let x = 1.0f32; /// /// let abs_diff = (x.next_after(2.0) - 1.00000011920928955078125_f32).abs(); /// /// assert!(abs_diff <= f32::EPSILON); /// ``` #[inline] pub fn next_after(self, other: f32) -> f32 { unsafe { cmath::nextafterf(self, other) } } /// Returns the maximum of the two numbers. /// /// ``` /// let x = 1.0f32; /// let y = 2.0f32; /// /// assert_eq!(x.max(y), y); /// ``` /// /// If one of the arguments is NaN, then the other argument is returned. #[inline] pub fn max(self, other: f32) -> f32 { unsafe { cmath::fmaxf(self, other) } } /// Returns the minimum of the two numbers. /// /// ``` /// let x = 1.0f32; /// let y = 2.0f32; /// /// assert_eq!(x.min(y), x); /// ``` /// /// If one of the arguments is NaN, then the other argument is returned. #[inline] pub fn min(self, other: f32) -> f32 { unsafe { cmath::fminf(self, other) } } /// The positive difference of two numbers. /// /// * If `self <= other`: `0:0` /// * Else: `self - other` /// /// ``` /// use std::f32; /// /// let x = 3.0f32; /// let y = -3.0f32; /// /// let abs_difference_x = (x.abs_sub(1.0) - 2.0).abs(); /// let abs_difference_y = (y.abs_sub(1.0) - 0.0).abs(); /// /// assert!(abs_difference_x <= f32::EPSILON); /// assert!(abs_difference_y <= f32::EPSILON); /// ``` pub fn abs_sub(self, other: f32) -> f32 { unsafe { cmath::fdimf(self, other) } } /// Takes the cubic root of a number. /// /// ``` /// use std::f32; /// /// let x = 8.0f32; /// /// // x^(1/3) - 2 == 0 /// let abs_difference = (x.cbrt() - 2.0).abs(); /// /// assert!(abs_difference <= f32::EPSILON); /// ``` #[inline] pub fn cbrt(self) -> f32 { unsafe { cmath::cbrtf(self) } } /// Calculates the length of the hypotenuse of a right-angle triangle given /// legs of length `x` and `y`. /// /// ``` /// use std::f32; /// /// let x = 2.0f32; /// let y = 3.0f32; /// /// // sqrt(x^2 + y^2) /// let abs_difference = (x.hypot(y) - (x.powi(2) + y.powi(2)).sqrt()).abs(); /// /// assert!(abs_difference <= f32::EPSILON); /// ``` #[inline] pub fn hypot(self, other: f32) -> f32 { unsafe { cmath::hypotf(self, other) } } /// Computes the sine of a number (in radians). /// /// ``` /// use std::f32; /// /// let x = f32::consts::PI/2.0; /// /// let abs_difference = (x.sin() - 1.0).abs(); /// /// assert!(abs_difference <= f32::EPSILON); /// ``` #[inline] pub fn sin(self) -> f32 { // see notes in `core::f32::Float::floor` #[cfg(target_env = "msvc")] return (self as f64).sin() as f32; #[cfg(not(target_env = "msvc"))] return unsafe { intrinsics::sinf32(self) }; } /// Computes the cosine of a number (in radians). /// /// ``` /// use std::f32; /// /// let x = 2.0*f32::consts::PI; /// /// let abs_difference = (x.cos() - 1.0).abs(); /// /// assert!(abs_difference <= f32::EPSILON); /// ``` #[inline] pub fn cos(self) -> f32 { // see notes in `core::f32::Float::floor` #[cfg(target_env = "msvc")] return (self as f64).cos() as f32; #[cfg(not(target_env = "msvc"))] return unsafe { intrinsics::cosf32(self) }; } /// Computes the tangent of a number (in radians). /// /// ``` /// use std::f32; /// /// let x = f32::consts::PI / 4.0; /// let abs_difference = (x.tan() - 1.0).abs(); /// /// assert!(abs_difference <= f32::EPSILON); /// ``` #[inline] pub fn tan(self) -> f32 { unsafe { cmath::tanf(self) } } /// Computes the arcsine of a number. Return value is in radians in /// the range [-pi/2, pi/2] or NaN if the number is outside the range /// [-1, 1]. /// /// ``` /// use std::f32; /// /// let f = f32::consts::PI / 2.0; /// /// // asin(sin(pi/2)) /// let abs_difference = (f.sin().asin() - f32::consts::PI / 2.0).abs(); /// /// assert!(abs_difference <= f32::EPSILON); /// ``` #[inline] pub fn asin(self) -> f32 { unsafe { cmath::asinf(self) } } /// Computes the arccosine of a number. Return value is in radians in /// the range [0, pi] or NaN if the number is outside the range /// [-1, 1]. /// /// ``` /// use std::f32; /// /// let f = f32::consts::PI / 4.0; /// /// // acos(cos(pi/4)) /// let abs_difference = (f.cos().acos() - f32::consts::PI / 4.0).abs(); /// /// assert!(abs_difference <= f32::EPSILON); /// ``` #[inline] pub fn acos(self) -> f32 { unsafe { cmath::acosf(self) } } /// Computes the arctangent of a number. Return value is in radians in the /// range [-pi/2, pi/2]; /// /// ``` /// use std::f32; /// /// let f = 1.0f32; /// /// // atan(tan(1)) /// let abs_difference = (f.tan().atan() - 1.0).abs(); /// /// assert!(abs_difference <= f32::EPSILON); /// ``` #[inline] pub fn atan(self) -> f32 { unsafe { cmath::atanf(self) } } /// Computes the four quadrant arctangent of `self` (`y`) and `other` (`x`). /// /// * `x = 0`, `y = 0`: `0` /// * `x >= 0`: `arctan(y/x)` -> `[-pi/2, pi/2]` /// * `y >= 0`: `arctan(y/x) + pi` -> `(pi/2, pi]` /// * `y < 0`: `arctan(y/x) - pi` -> `(-pi, -pi/2)` /// /// ``` /// use std::f32; /// /// let pi = f32::consts::PI; /// // All angles from horizontal right (+x) /// // 45 deg counter-clockwise /// let x1 = 3.0f32; /// let y1 = -3.0f32; /// /// // 135 deg clockwise /// let x2 = -3.0f32; /// let y2 = 3.0f32; /// /// let abs_difference_1 = (y1.atan2(x1) - (-pi/4.0)).abs(); /// let abs_difference_2 = (y2.atan2(x2) - 3.0*pi/4.0).abs(); /// /// assert!(abs_difference_1 <= f32::EPSILON); /// assert!(abs_difference_2 <= f32::EPSILON); /// ``` #[inline] pub fn atan2(self, other: f32) -> f32 { unsafe { cmath::atan2f(self, other) } } /// Simultaneously computes the sine and cosine of the number, `x`. Returns /// `(sin(x), cos(x))`. /// /// ``` /// use std::f32; /// /// let x = f32::consts::PI/4.0; /// let f = x.sin_cos(); /// /// let abs_difference_0 = (f.0 - x.sin()).abs(); /// let abs_difference_1 = (f.1 - x.cos()).abs(); /// /// assert!(abs_difference_0 <= f32::EPSILON); /// assert!(abs_difference_1 <= f32::EPSILON); /// ``` #[inline] pub fn sin_cos(self) -> (f32, f32) { (self.sin(), self.cos()) } /// Returns `e^(self) - 1` in a way that is accurate even if the /// number is close to zero. /// /// ``` /// use std::f32; /// /// let x = 6.0f32; /// /// // e^(ln(6)) - 1 /// let abs_difference = (x.ln().exp_m1() - 5.0).abs(); /// /// assert!(abs_difference <= f32::EPSILON); /// ``` #[inline] pub fn exp_m1(self) -> f32 { unsafe { cmath::expm1f(self) } } /// Returns `ln(1+n)` (natural logarithm) more accurately than if /// the operations were performed separately. /// /// ``` /// use std::f32; /// /// let x = f32::consts::E - 1.0; /// /// // ln(1 + (e - 1)) == ln(e) == 1 /// let abs_difference = (x.ln_1p() - 1.0).abs(); /// /// assert!(abs_difference <= f32::EPSILON); /// ``` #[inline] pub fn ln_1p(self) -> f32 { unsafe { cmath::log1pf(self) } } /// Hyperbolic sine function. /// /// ``` /// use std::f32; /// /// let e = f32::consts::E; /// let x = 1.0f32; /// /// let f = x.sinh(); /// // Solving sinh() at 1 gives `(e^2-1)/(2e)` /// let g = (e*e - 1.0)/(2.0*e); /// let abs_difference = (f - g).abs(); /// /// assert!(abs_difference <= f32::EPSILON); /// ``` #[inline] pub fn sinh(self) -> f32 { unsafe { cmath::sinhf(self) } } /// Hyperbolic cosine function. /// /// ``` /// use std::f32; /// /// let e = f32::consts::E; /// let x = 1.0f32; /// let f = x.cosh(); /// // Solving cosh() at 1 gives this result /// let g = (e*e + 1.0)/(2.0*e); /// let abs_difference = (f - g).abs(); /// /// // Same result /// assert!(abs_difference <= f32::EPSILON); /// ``` #[inline] pub fn cosh(self) -> f32 { unsafe { cmath::coshf(self) } } /// Hyperbolic tangent function. /// /// ``` /// use std::f32; /// /// let e = f32::consts::E; /// let x = 1.0f32; /// /// let f = x.tanh(); /// // Solving tanh() at 1 gives `(1 - e^(-2))/(1 + e^(-2))` /// let g = (1.0 - e.powi(-2))/(1.0 + e.powi(-2)); /// let abs_difference = (f - g).abs(); /// /// assert!(abs_difference <= f32::EPSILON); /// ``` #[inline] pub fn tanh(self) -> f32 { unsafe { cmath::tanhf(self) } } /// Inverse hyperbolic sine function. /// /// ``` /// use std::f32; /// /// let x = 1.0f32; /// let f = x.sinh().asinh(); /// /// let abs_difference = (f - x).abs(); /// /// assert!(abs_difference <= f32::EPSILON); /// ``` #[inline] pub fn asinh(self) -> f32 { if self == NEG_INFINITY { NEG_INFINITY } else { (self + ((self * self) + 1.0).sqrt()).ln() } } /// Inverse hyperbolic cosine function. /// /// ``` /// use std::f32; /// /// let x = 1.0f32; /// let f = x.cosh().acosh(); /// /// let abs_difference = (f - x).abs(); /// /// assert!(abs_difference <= f32::EPSILON); /// ``` #[inline] pub fn acosh(self) -> f32 { match self { x if x < 1.0 => std::f32::NAN, x => (x + ((x * x) - 1.0).sqrt()).ln(), } } /// Inverse hyperbolic tangent function. /// /// ``` /// use std::f32; /// /// let e = f32::consts::E; /// let f = e.tanh().atanh(); /// /// let abs_difference = (f - e).abs(); /// /// assert!(abs_difference <= 1e-5); /// ``` #[inline] pub fn atanh(self) -> f32 { 0.5 * ((2.0 * self) / (1.0 - self)).ln_1p() } } #[cfg(test)] mod tests { use f32; use f32::*; use num::*; use num::FpCategory as Fp; #[test] fn test_num_f32() { test_num(10f32, 2f32); } #[test] fn test_min_nan() { assert_eq!(NAN.min(2.0), 2.0); assert_eq!(2.0f32.min(NAN), 2.0); } #[test] fn test_max_nan() { assert_eq!(NAN.max(2.0), 2.0); assert_eq!(2.0f32.max(NAN), 2.0); } #[test] fn test_nan() { let nan: f32 = f32::NAN; assert!(nan.is_nan()); assert!(!nan.is_infinite()); assert!(!nan.is_finite()); assert!(!nan.is_normal()); assert!(!nan.is_sign_positive()); assert!(!nan.is_sign_negative()); assert_eq!(Fp::Nan, nan.classify()); } #[test] fn test_infinity() { let inf: f32 = f32::INFINITY; assert!(inf.is_infinite()); assert!(!inf.is_finite()); assert!(inf.is_sign_positive()); assert!(!inf.is_sign_negative()); assert!(!inf.is_nan()); assert!(!inf.is_normal()); assert_eq!(Fp::Infinite, inf.classify()); } #[test] fn test_neg_infinity() { let neg_inf: f32 = f32::NEG_INFINITY; assert!(neg_inf.is_infinite()); assert!(!neg_inf.is_finite()); assert!(!neg_inf.is_sign_positive()); assert!(neg_inf.is_sign_negative()); assert!(!neg_inf.is_nan()); assert!(!neg_inf.is_normal()); assert_eq!(Fp::Infinite, neg_inf.classify()); } #[test] fn test_zero() { let zero: f32 = 0.0f32; assert_eq!(0.0, zero); assert!(!zero.is_infinite()); assert!(zero.is_finite()); assert!(zero.is_sign_positive()); assert!(!zero.is_sign_negative()); assert!(!zero.is_nan()); assert!(!zero.is_normal()); assert_eq!(Fp::Zero, zero.classify()); } #[test] fn test_neg_zero() { let neg_zero: f32 = -0.0; assert_eq!(0.0, neg_zero); assert!(!neg_zero.is_infinite()); assert!(neg_zero.is_finite()); assert!(!neg_zero.is_sign_positive()); assert!(neg_zero.is_sign_negative()); assert!(!neg_zero.is_nan()); assert!(!neg_zero.is_normal()); assert_eq!(Fp::Zero, neg_zero.classify()); } #[test] fn test_one() { let one: f32 = 1.0f32; assert_eq!(1.0, one); assert!(!one.is_infinite()); assert!(one.is_finite()); assert!(one.is_sign_positive()); assert!(!one.is_sign_negative()); assert!(!one.is_nan()); assert!(one.is_normal()); assert_eq!(Fp::Normal, one.classify()); } #[test] fn test_is_nan() { let nan: f32 = f32::NAN; let inf: f32 = f32::INFINITY; let neg_inf: f32 = f32::NEG_INFINITY; assert!(nan.is_nan()); assert!(!0.0f32.is_nan()); assert!(!5.3f32.is_nan()); assert!(!(-10.732f32).is_nan()); assert!(!inf.is_nan()); assert!(!neg_inf.is_nan()); } #[test] fn test_is_infinite() { let nan: f32 = f32::NAN; let inf: f32 = f32::INFINITY; let neg_inf: f32 = f32::NEG_INFINITY; assert!(!nan.is_infinite()); assert!(inf.is_infinite()); assert!(neg_inf.is_infinite()); assert!(!0.0f32.is_infinite()); assert!(!42.8f32.is_infinite()); assert!(!(-109.2f32).is_infinite()); } #[test] fn test_is_finite() { let nan: f32 = f32::NAN; let inf: f32 = f32::INFINITY; let neg_inf: f32 = f32::NEG_INFINITY; assert!(!nan.is_finite()); assert!(!inf.is_finite()); assert!(!neg_inf.is_finite()); assert!(0.0f32.is_finite()); assert!(42.8f32.is_finite()); assert!((-109.2f32).is_finite()); } #[test] fn test_is_normal() { let nan: f32 = f32::NAN; let inf: f32 = f32::INFINITY; let neg_inf: f32 = f32::NEG_INFINITY; let zero: f32 = 0.0f32; let neg_zero: f32 = -0.0; assert!(!nan.is_normal()); assert!(!inf.is_normal()); assert!(!neg_inf.is_normal()); assert!(!zero.is_normal()); assert!(!neg_zero.is_normal()); assert!(1f32.is_normal()); assert!(1e-37f32.is_normal()); assert!(!1e-38f32.is_normal()); } #[test] fn test_classify() { let nan: f32 = f32::NAN; let inf: f32 = f32::INFINITY; let neg_inf: f32 = f32::NEG_INFINITY; let zero: f32 = 0.0f32; let neg_zero: f32 = -0.0; assert_eq!(nan.classify(), Fp::Nan); assert_eq!(inf.classify(), Fp::Infinite); assert_eq!(neg_inf.classify(), Fp::Infinite); assert_eq!(zero.classify(), Fp::Zero); assert_eq!(neg_zero.classify(), Fp::Zero); assert_eq!(1f32.classify(), Fp::Normal); assert_eq!(1e-37f32.classify(), Fp::Normal); assert_eq!(1e-38f32.classify(), Fp::Subnormal); } #[test] #[allow(deprecated)] fn test_integer_decode() { assert_eq!(3.14159265359f32.integer_decode(), (13176795, -22, 1)); assert_eq!((-8573.5918555f32).integer_decode(), (8779358, -10, -1)); assert_eq!(2f32.powf(100.0).integer_decode(), (8388608, 77, 1)); assert_eq!(0f32.integer_decode(), (0, -150, 1)); assert_eq!((-0f32).integer_decode(), (0, -150, -1)); assert_eq!(INFINITY.integer_decode(), (8388608, 105, 1)); assert_eq!(NEG_INFINITY.integer_decode(), (8388608, 105, -1)); // Ignore the "sign" (quiet / signalling flag) of NAN. // It can vary between runtime operations and LLVM folding. let (nan_m, nan_e, _nan_s) = NAN.integer_decode(); assert_eq!((nan_m, nan_e), (12582912, 105)); } #[test] fn test_floor() { assert_approx_eq!(1.0f32.floor(), 1.0f32); assert_approx_eq!(1.3f32.floor(), 1.0f32); assert_approx_eq!(1.5f32.floor(), 1.0f32); assert_approx_eq!(1.7f32.floor(), 1.0f32); assert_approx_eq!(0.0f32.floor(), 0.0f32); assert_approx_eq!((-0.0f32).floor(), -0.0f32); assert_approx_eq!((-1.0f32).floor(), -1.0f32); assert_approx_eq!((-1.3f32).floor(), -2.0f32); assert_approx_eq!((-1.5f32).floor(), -2.0f32); assert_approx_eq!((-1.7f32).floor(), -2.0f32); } #[test] fn test_ceil() { assert_approx_eq!(1.0f32.ceil(), 1.0f32); assert_approx_eq!(1.3f32.ceil(), 2.0f32); assert_approx_eq!(1.5f32.ceil(), 2.0f32); assert_approx_eq!(1.7f32.ceil(), 2.0f32); assert_approx_eq!(0.0f32.ceil(), 0.0f32); assert_approx_eq!((-0.0f32).ceil(), -0.0f32); assert_approx_eq!((-1.0f32).ceil(), -1.0f32); assert_approx_eq!((-1.3f32).ceil(), -1.0f32); assert_approx_eq!((-1.5f32).ceil(), -1.0f32); assert_approx_eq!((-1.7f32).ceil(), -1.0f32); } #[test] fn test_round() { assert_approx_eq!(1.0f32.round(), 1.0f32); assert_approx_eq!(1.3f32.round(), 1.0f32); assert_approx_eq!(1.5f32.round(), 2.0f32); assert_approx_eq!(1.7f32.round(), 2.0f32); assert_approx_eq!(0.0f32.round(), 0.0f32); assert_approx_eq!((-0.0f32).round(), -0.0f32); assert_approx_eq!((-1.0f32).round(), -1.0f32); assert_approx_eq!((-1.3f32).round(), -1.0f32); assert_approx_eq!((-1.5f32).round(), -2.0f32); assert_approx_eq!((-1.7f32).round(), -2.0f32); } #[test] fn test_trunc() { assert_approx_eq!(1.0f32.trunc(), 1.0f32); assert_approx_eq!(1.3f32.trunc(), 1.0f32); assert_approx_eq!(1.5f32.trunc(), 1.0f32); assert_approx_eq!(1.7f32.trunc(), 1.0f32); assert_approx_eq!(0.0f32.trunc(), 0.0f32); assert_approx_eq!((-0.0f32).trunc(), -0.0f32); assert_approx_eq!((-1.0f32).trunc(), -1.0f32); assert_approx_eq!((-1.3f32).trunc(), -1.0f32); assert_approx_eq!((-1.5f32).trunc(), -1.0f32); assert_approx_eq!((-1.7f32).trunc(), -1.0f32); } #[test] fn test_fract() { assert_approx_eq!(1.0f32.fract(), 0.0f32); assert_approx_eq!(1.3f32.fract(), 0.3f32); assert_approx_eq!(1.5f32.fract(), 0.5f32); assert_approx_eq!(1.7f32.fract(), 0.7f32); assert_approx_eq!(0.0f32.fract(), 0.0f32); assert_approx_eq!((-0.0f32).fract(), -0.0f32); assert_approx_eq!((-1.0f32).fract(), -0.0f32); assert_approx_eq!((-1.3f32).fract(), -0.3f32); assert_approx_eq!((-1.5f32).fract(), -0.5f32); assert_approx_eq!((-1.7f32).fract(), -0.7f32); } #[test] fn test_abs() { assert_eq!(INFINITY.abs(), INFINITY); assert_eq!(1f32.abs(), 1f32); assert_eq!(0f32.abs(), 0f32); assert_eq!((-0f32).abs(), 0f32); assert_eq!((-1f32).abs(), 1f32); assert_eq!(NEG_INFINITY.abs(), INFINITY); assert_eq!((1f32 / NEG_INFINITY).abs(), 0f32); assert!(NAN.abs().is_nan()); } #[test] fn test_signum() { assert_eq!(INFINITY.signum(), 1f32); assert_eq!(1f32.signum(), 1f32); assert_eq!(0f32.signum(), 1f32); assert_eq!((-0f32).signum(), -1f32); assert_eq!((-1f32).signum(), -1f32); assert_eq!(NEG_INFINITY.signum(), -1f32); assert_eq!((1f32 / NEG_INFINITY).signum(), -1f32); assert!(NAN.signum().is_nan()); } #[test] fn test_is_sign_positive() { assert!(INFINITY.is_sign_positive()); assert!(1f32.is_sign_positive()); assert!(0f32.is_sign_positive()); assert!(!(-0f32).is_sign_positive()); assert!(!(-1f32).is_sign_positive()); assert!(!NEG_INFINITY.is_sign_positive()); assert!(!(1f32 / NEG_INFINITY).is_sign_positive()); assert!(!NAN.is_sign_positive()); } #[test] fn test_is_sign_negative() { assert!(!INFINITY.is_sign_negative()); assert!(!1f32.is_sign_negative()); assert!(!0f32.is_sign_negative()); assert!((-0f32).is_sign_negative()); assert!((-1f32).is_sign_negative()); assert!(NEG_INFINITY.is_sign_negative()); assert!((1f32 / NEG_INFINITY).is_sign_negative()); assert!(!NAN.is_sign_negative()); } #[test] fn test_mul_add() { let nan: f32 = f32::NAN; let inf: f32 = f32::INFINITY; let neg_inf: f32 = f32::NEG_INFINITY; assert_approx_eq!(12.3f32.mul_add(4.5, 6.7), 62.05); assert_approx_eq!((-12.3f32).mul_add(-4.5, -6.7), 48.65); assert_approx_eq!(0.0f32.mul_add(8.9, 1.2), 1.2); assert_approx_eq!(3.4f32.mul_add(-0.0, 5.6), 5.6); assert!(nan.mul_add(7.8, 9.0).is_nan()); assert_eq!(inf.mul_add(7.8, 9.0), inf); assert_eq!(neg_inf.mul_add(7.8, 9.0), neg_inf); assert_eq!(8.9f32.mul_add(inf, 3.2), inf); assert_eq!((-3.2f32).mul_add(2.4, neg_inf), neg_inf); } #[test] fn test_recip() { let nan: f32 = f32::NAN; let inf: f32 = f32::INFINITY; let neg_inf: f32 = f32::NEG_INFINITY; assert_eq!(1.0f32.recip(), 1.0); assert_eq!(2.0f32.recip(), 0.5); assert_eq!((-0.4f32).recip(), -2.5); assert_eq!(0.0f32.recip(), inf); assert!(nan.recip().is_nan()); assert_eq!(inf.recip(), 0.0); assert_eq!(neg_inf.recip(), 0.0); } #[test] fn test_powi() { let nan: f32 = f32::NAN; let inf: f32 = f32::INFINITY; let neg_inf: f32 = f32::NEG_INFINITY; assert_eq!(1.0f32.powi(1), 1.0); assert_approx_eq!((-3.1f32).powi(2), 9.61); assert_approx_eq!(5.9f32.powi(-2), 0.028727); assert_eq!(8.3f32.powi(0), 1.0); assert!(nan.powi(2).is_nan()); assert_eq!(inf.powi(3), inf); assert_eq!(neg_inf.powi(2), inf); } #[test] fn test_powf() { let nan: f32 = f32::NAN; let inf: f32 = f32::INFINITY; let neg_inf: f32 = f32::NEG_INFINITY; assert_eq!(1.0f32.powf(1.0), 1.0); assert_approx_eq!(3.4f32.powf(4.5), 246.408218); assert_approx_eq!(2.7f32.powf(-3.2), 0.041652); assert_approx_eq!((-3.1f32).powf(2.0), 9.61); assert_approx_eq!(5.9f32.powf(-2.0), 0.028727); assert_eq!(8.3f32.powf(0.0), 1.0); assert!(nan.powf(2.0).is_nan()); assert_eq!(inf.powf(2.0), inf); assert_eq!(neg_inf.powf(3.0), neg_inf); } #[test] fn test_sqrt_domain() { assert!(NAN.sqrt().is_nan()); assert!(NEG_INFINITY.sqrt().is_nan()); assert!((-1.0f32).sqrt().is_nan()); assert_eq!((-0.0f32).sqrt(), -0.0); assert_eq!(0.0f32.sqrt(), 0.0); assert_eq!(1.0f32.sqrt(), 1.0); assert_eq!(INFINITY.sqrt(), INFINITY); } #[test] fn test_exp() { assert_eq!(1.0, 0.0f32.exp()); assert_approx_eq!(2.718282, 1.0f32.exp()); assert_approx_eq!(148.413162, 5.0f32.exp()); let inf: f32 = f32::INFINITY; let neg_inf: f32 = f32::NEG_INFINITY; let nan: f32 = f32::NAN; assert_eq!(inf, inf.exp()); assert_eq!(0.0, neg_inf.exp()); assert!(nan.exp().is_nan()); } #[test] fn test_exp2() { assert_eq!(32.0, 5.0f32.exp2()); assert_eq!(1.0, 0.0f32.exp2()); let inf: f32 = f32::INFINITY; let neg_inf: f32 = f32::NEG_INFINITY; let nan: f32 = f32::NAN; assert_eq!(inf, inf.exp2()); assert_eq!(0.0, neg_inf.exp2()); assert!(nan.exp2().is_nan()); } #[test] fn test_ln() { let nan: f32 = f32::NAN; let inf: f32 = f32::INFINITY; let neg_inf: f32 = f32::NEG_INFINITY; assert_approx_eq!(1.0f32.exp().ln(), 1.0); assert!(nan.ln().is_nan()); assert_eq!(inf.ln(), inf); assert!(neg_inf.ln().is_nan()); assert!((-2.3f32).ln().is_nan()); assert_eq!((-0.0f32).ln(), neg_inf); assert_eq!(0.0f32.ln(), neg_inf); assert_approx_eq!(4.0f32.ln(), 1.386294); } #[test] fn test_log() { let nan: f32 = f32::NAN; let inf: f32 = f32::INFINITY; let neg_inf: f32 = f32::NEG_INFINITY; assert_eq!(10.0f32.log(10.0), 1.0); assert_approx_eq!(2.3f32.log(3.5), 0.664858); assert_eq!(1.0f32.exp().log(1.0f32.exp()), 1.0); assert!(1.0f32.log(1.0).is_nan()); assert!(1.0f32.log(-13.9).is_nan()); assert!(nan.log(2.3).is_nan()); assert_eq!(inf.log(10.0), inf); assert!(neg_inf.log(8.8).is_nan()); assert!((-2.3f32).log(0.1).is_nan()); assert_eq!((-0.0f32).log(2.0), neg_inf); assert_eq!(0.0f32.log(7.0), neg_inf); } #[test] fn test_log2() { let nan: f32 = f32::NAN; let inf: f32 = f32::INFINITY; let neg_inf: f32 = f32::NEG_INFINITY; assert_approx_eq!(10.0f32.log2(), 3.321928); assert_approx_eq!(2.3f32.log2(), 1.201634); assert_approx_eq!(1.0f32.exp().log2(), 1.442695); assert!(nan.log2().is_nan()); assert_eq!(inf.log2(), inf); assert!(neg_inf.log2().is_nan()); assert!((-2.3f32).log2().is_nan()); assert_eq!((-0.0f32).log2(), neg_inf); assert_eq!(0.0f32.log2(), neg_inf); } #[test] fn test_log10() { let nan: f32 = f32::NAN; let inf: f32 = f32::INFINITY; let neg_inf: f32 = f32::NEG_INFINITY; assert_eq!(10.0f32.log10(), 1.0); assert_approx_eq!(2.3f32.log10(), 0.361728); assert_approx_eq!(1.0f32.exp().log10(), 0.434294); assert_eq!(1.0f32.log10(), 0.0); assert!(nan.log10().is_nan()); assert_eq!(inf.log10(), inf); assert!(neg_inf.log10().is_nan()); assert!((-2.3f32).log10().is_nan()); assert_eq!((-0.0f32).log10(), neg_inf); assert_eq!(0.0f32.log10(), neg_inf); } #[test] fn test_to_degrees() { let pi: f32 = consts::PI; let nan: f32 = f32::NAN; let inf: f32 = f32::INFINITY; let neg_inf: f32 = f32::NEG_INFINITY; assert_eq!(0.0f32.to_degrees(), 0.0); assert_approx_eq!((-5.8f32).to_degrees(), -332.315521); assert_eq!(pi.to_degrees(), 180.0); assert!(nan.to_degrees().is_nan()); assert_eq!(inf.to_degrees(), inf); assert_eq!(neg_inf.to_degrees(), neg_inf); } #[test] fn test_to_radians() { let pi: f32 = consts::PI; let nan: f32 = f32::NAN; let inf: f32 = f32::INFINITY; let neg_inf: f32 = f32::NEG_INFINITY; assert_eq!(0.0f32.to_radians(), 0.0); assert_approx_eq!(154.6f32.to_radians(), 2.698279); assert_approx_eq!((-332.31f32).to_radians(), -5.799903); assert_eq!(180.0f32.to_radians(), pi); assert!(nan.to_radians().is_nan()); assert_eq!(inf.to_radians(), inf); assert_eq!(neg_inf.to_radians(), neg_inf); } #[test] #[allow(deprecated)] fn test_ldexp() { let f1 = 2.0f32.powi(-123); let f2 = 2.0f32.powi(-111); let f3 = 1.75 * 2.0f32.powi(-12); assert_eq!(f32::ldexp(1f32, -123), f1); assert_eq!(f32::ldexp(1f32, -111), f2); assert_eq!(f32::ldexp(1.75f32, -12), f3); assert_eq!(f32::ldexp(0f32, -123), 0f32); assert_eq!(f32::ldexp(-0f32, -123), -0f32); let inf: f32 = f32::INFINITY; let neg_inf: f32 = f32::NEG_INFINITY; let nan: f32 = f32::NAN; assert_eq!(f32::ldexp(inf, -123), inf); assert_eq!(f32::ldexp(neg_inf, -123), neg_inf); assert!(f32::ldexp(nan, -123).is_nan()); } #[test] #[allow(deprecated)] fn test_frexp() { let f1 = 2.0f32.powi(-123); let f2 = 2.0f32.powi(-111); let f3 = 1.75 * 2.0f32.powi(-123); let (x1, exp1) = f1.frexp(); let (x2, exp2) = f2.frexp(); let (x3, exp3) = f3.frexp(); assert_eq!((x1, exp1), (0.5f32, -122)); assert_eq!((x2, exp2), (0.5f32, -110)); assert_eq!((x3, exp3), (0.875f32, -122)); assert_eq!(f32::ldexp(x1, exp1), f1); assert_eq!(f32::ldexp(x2, exp2), f2); assert_eq!(f32::ldexp(x3, exp3), f3); assert_eq!(0f32.frexp(), (0f32, 0)); assert_eq!((-0f32).frexp(), (-0f32, 0)); } #[test] #[cfg_attr(windows, ignore)] // FIXME #8755 #[allow(deprecated)] fn test_frexp_nowin() { let inf: f32 = f32::INFINITY; let neg_inf: f32 = f32::NEG_INFINITY; let nan: f32 = f32::NAN; assert_eq!(match inf.frexp() { (x, _) => x, }, inf); assert_eq!(match neg_inf.frexp() { (x, _) => x, }, neg_inf); assert!(match nan.frexp() { (x, _) => x.is_nan(), }) } #[test] fn test_asinh() { assert_eq!(0.0f32.asinh(), 0.0f32); assert_eq!((-0.0f32).asinh(), -0.0f32); let inf: f32 = f32::INFINITY; let neg_inf: f32 = f32::NEG_INFINITY; let nan: f32 = f32::NAN; assert_eq!(inf.asinh(), inf); assert_eq!(neg_inf.asinh(), neg_inf); assert!(nan.asinh().is_nan()); assert_approx_eq!(2.0f32.asinh(), 1.443635475178810342493276740273105f32); assert_approx_eq!((-2.0f32).asinh(), -1.443635475178810342493276740273105f32); } #[test] fn test_acosh() { assert_eq!(1.0f32.acosh(), 0.0f32); assert!(0.999f32.acosh().is_nan()); let inf: f32 = f32::INFINITY; let neg_inf: f32 = f32::NEG_INFINITY; let nan: f32 = f32::NAN; assert_eq!(inf.acosh(), inf); assert!(neg_inf.acosh().is_nan()); assert!(nan.acosh().is_nan()); assert_approx_eq!(2.0f32.acosh(), 1.31695789692481670862504634730796844f32); assert_approx_eq!(3.0f32.acosh(), 1.76274717403908605046521864995958461f32); } #[test] fn test_atanh() { assert_eq!(0.0f32.atanh(), 0.0f32); assert_eq!((-0.0f32).atanh(), -0.0f32); let inf32: f32 = f32::INFINITY; let neg_inf32: f32 = f32::NEG_INFINITY; assert_eq!(1.0f32.atanh(), inf32); assert_eq!((-1.0f32).atanh(), neg_inf32); assert!(2f64.atanh().atanh().is_nan()); assert!((-2f64).atanh().atanh().is_nan()); let inf64: f32 = f32::INFINITY; let neg_inf64: f32 = f32::NEG_INFINITY; let nan32: f32 = f32::NAN; assert!(inf64.atanh().is_nan()); assert!(neg_inf64.atanh().is_nan()); assert!(nan32.atanh().is_nan()); assert_approx_eq!(0.5f32.atanh(), 0.54930614433405484569762261846126285f32); assert_approx_eq!((-0.5f32).atanh(), -0.54930614433405484569762261846126285f32); } #[test] fn test_real_consts() { use super::consts; let pi: f32 = consts::PI; let frac_pi_2: f32 = consts::FRAC_PI_2; let frac_pi_3: f32 = consts::FRAC_PI_3; let frac_pi_4: f32 = consts::FRAC_PI_4; let frac_pi_6: f32 = consts::FRAC_PI_6; let frac_pi_8: f32 = consts::FRAC_PI_8; let frac_1_pi: f32 = consts::FRAC_1_PI; let frac_2_pi: f32 = consts::FRAC_2_PI; let frac_2_sqrtpi: f32 = consts::FRAC_2_SQRT_PI; let sqrt2: f32 = consts::SQRT_2; let frac_1_sqrt2: f32 = consts::FRAC_1_SQRT_2; let e: f32 = consts::E; let log2_e: f32 = consts::LOG2_E; let log10_e: f32 = consts::LOG10_E; let ln_2: f32 = consts::LN_2; let ln_10: f32 = consts::LN_10; assert_approx_eq!(frac_pi_2, pi / 2f32); assert_approx_eq!(frac_pi_3, pi / 3f32); assert_approx_eq!(frac_pi_4, pi / 4f32); assert_approx_eq!(frac_pi_6, pi / 6f32); assert_approx_eq!(frac_pi_8, pi / 8f32); assert_approx_eq!(frac_1_pi, 1f32 / pi); assert_approx_eq!(frac_2_pi, 2f32 / pi); assert_approx_eq!(frac_2_sqrtpi, 2f32 / pi.sqrt()); assert_approx_eq!(sqrt2, 2f32.sqrt()); assert_approx_eq!(frac_1_sqrt2, 1f32 / 2f32.sqrt()); assert_approx_eq!(log2_e, e.log2()); assert_approx_eq!(log10_e, e.log10()); assert_approx_eq!(ln_2, 2f32.ln()); assert_approx_eq!(ln_10, 10f32.ln()); } }
29.530378
88
0.513012
cc28017672b3a31d42f533bf92ceb257d4113f3d
18,065
// This file was generated by gir (https://github.com/gtk-rs/gir) // from gir-files (https://github.com/gtk-rs/gir-files.git) // DO NOT EDIT use bitflags::bitflags; use glib::translate::*; use glib::value::FromValue; use glib::value::ToValue; use glib::StaticType; use glib::Type; use std::fmt; bitflags! { #[doc(alias = "GdkAnchorHints")] pub struct AnchorHints: u32 { #[doc(alias = "GDK_ANCHOR_FLIP_X")] const FLIP_X = 1; #[doc(alias = "GDK_ANCHOR_FLIP_Y")] const FLIP_Y = 2; #[doc(alias = "GDK_ANCHOR_SLIDE_X")] const SLIDE_X = 4; #[doc(alias = "GDK_ANCHOR_SLIDE_Y")] const SLIDE_Y = 8; #[doc(alias = "GDK_ANCHOR_RESIZE_X")] const RESIZE_X = 16; #[doc(alias = "GDK_ANCHOR_RESIZE_Y")] const RESIZE_Y = 32; #[doc(alias = "GDK_ANCHOR_FLIP")] const FLIP = 3; #[doc(alias = "GDK_ANCHOR_SLIDE")] const SLIDE = 12; #[doc(alias = "GDK_ANCHOR_RESIZE")] const RESIZE = 48; } } impl fmt::Display for AnchorHints { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { <Self as fmt::Debug>::fmt(self, f) } } #[doc(hidden)] impl IntoGlib for AnchorHints { type GlibType = ffi::GdkAnchorHints; fn into_glib(self) -> ffi::GdkAnchorHints { self.bits() } } #[doc(hidden)] impl FromGlib<ffi::GdkAnchorHints> for AnchorHints { unsafe fn from_glib(value: ffi::GdkAnchorHints) -> Self { skip_assert_initialized!(); Self::from_bits_truncate(value) } } impl StaticType for AnchorHints { fn static_type() -> Type { unsafe { from_glib(ffi::gdk_anchor_hints_get_type()) } } } impl glib::value::ValueType for AnchorHints { type Type = Self; } unsafe impl<'a> FromValue<'a> for AnchorHints { type Checker = glib::value::GenericValueTypeChecker<Self>; unsafe fn from_value(value: &'a glib::Value) -> Self { skip_assert_initialized!(); from_glib(glib::gobject_ffi::g_value_get_flags(value.to_glib_none().0)) } } impl ToValue for AnchorHints { fn to_value(&self) -> glib::Value { let mut value = glib::Value::for_value_type::<Self>(); unsafe { glib::gobject_ffi::g_value_set_flags(value.to_glib_none_mut().0, self.into_glib()); } value } fn value_type(&self) -> glib::Type { Self::static_type() } } bitflags! { #[doc(alias = "GdkAxisFlags")] pub struct AxisFlags: u32 { #[doc(alias = "GDK_AXIS_FLAG_X")] const X = 2; #[doc(alias = "GDK_AXIS_FLAG_Y")] const Y = 4; #[doc(alias = "GDK_AXIS_FLAG_DELTA_X")] const DELTA_X = 8; #[doc(alias = "GDK_AXIS_FLAG_DELTA_Y")] const DELTA_Y = 16; #[doc(alias = "GDK_AXIS_FLAG_PRESSURE")] const PRESSURE = 32; #[doc(alias = "GDK_AXIS_FLAG_XTILT")] const XTILT = 64; #[doc(alias = "GDK_AXIS_FLAG_YTILT")] const YTILT = 128; #[doc(alias = "GDK_AXIS_FLAG_WHEEL")] const WHEEL = 256; #[doc(alias = "GDK_AXIS_FLAG_DISTANCE")] const DISTANCE = 512; #[doc(alias = "GDK_AXIS_FLAG_ROTATION")] const ROTATION = 1024; #[doc(alias = "GDK_AXIS_FLAG_SLIDER")] const SLIDER = 2048; } } impl fmt::Display for AxisFlags { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { <Self as fmt::Debug>::fmt(self, f) } } #[doc(hidden)] impl IntoGlib for AxisFlags { type GlibType = ffi::GdkAxisFlags; fn into_glib(self) -> ffi::GdkAxisFlags { self.bits() } } #[doc(hidden)] impl FromGlib<ffi::GdkAxisFlags> for AxisFlags { unsafe fn from_glib(value: ffi::GdkAxisFlags) -> Self { skip_assert_initialized!(); Self::from_bits_truncate(value) } } impl StaticType for AxisFlags { fn static_type() -> Type { unsafe { from_glib(ffi::gdk_axis_flags_get_type()) } } } impl glib::value::ValueType for AxisFlags { type Type = Self; } unsafe impl<'a> FromValue<'a> for AxisFlags { type Checker = glib::value::GenericValueTypeChecker<Self>; unsafe fn from_value(value: &'a glib::Value) -> Self { skip_assert_initialized!(); from_glib(glib::gobject_ffi::g_value_get_flags(value.to_glib_none().0)) } } impl ToValue for AxisFlags { fn to_value(&self) -> glib::Value { let mut value = glib::Value::for_value_type::<Self>(); unsafe { glib::gobject_ffi::g_value_set_flags(value.to_glib_none_mut().0, self.into_glib()); } value } fn value_type(&self) -> glib::Type { Self::static_type() } } bitflags! { #[doc(alias = "GdkDragAction")] pub struct DragAction: u32 { #[doc(alias = "GDK_ACTION_COPY")] const COPY = 1; #[doc(alias = "GDK_ACTION_MOVE")] const MOVE = 2; #[doc(alias = "GDK_ACTION_LINK")] const LINK = 4; #[doc(alias = "GDK_ACTION_ASK")] const ASK = 8; } } impl DragAction { #[doc(alias = "gdk_drag_action_is_unique")] pub fn is_unique(self) -> bool { assert_initialized_main_thread!(); unsafe { from_glib(ffi::gdk_drag_action_is_unique(self.into_glib())) } } } impl fmt::Display for DragAction { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { <Self as fmt::Debug>::fmt(self, f) } } #[doc(hidden)] impl IntoGlib for DragAction { type GlibType = ffi::GdkDragAction; fn into_glib(self) -> ffi::GdkDragAction { self.bits() } } #[doc(hidden)] impl FromGlib<ffi::GdkDragAction> for DragAction { unsafe fn from_glib(value: ffi::GdkDragAction) -> Self { skip_assert_initialized!(); Self::from_bits_truncate(value) } } impl StaticType for DragAction { fn static_type() -> Type { unsafe { from_glib(ffi::gdk_drag_action_get_type()) } } } impl glib::value::ValueType for DragAction { type Type = Self; } unsafe impl<'a> FromValue<'a> for DragAction { type Checker = glib::value::GenericValueTypeChecker<Self>; unsafe fn from_value(value: &'a glib::Value) -> Self { skip_assert_initialized!(); from_glib(glib::gobject_ffi::g_value_get_flags(value.to_glib_none().0)) } } impl ToValue for DragAction { fn to_value(&self) -> glib::Value { let mut value = glib::Value::for_value_type::<Self>(); unsafe { glib::gobject_ffi::g_value_set_flags(value.to_glib_none_mut().0, self.into_glib()); } value } fn value_type(&self) -> glib::Type { Self::static_type() } } bitflags! { #[doc(alias = "GdkFrameClockPhase")] pub struct FrameClockPhase: u32 { #[doc(alias = "GDK_FRAME_CLOCK_PHASE_NONE")] const NONE = 0; #[doc(alias = "GDK_FRAME_CLOCK_PHASE_FLUSH_EVENTS")] const FLUSH_EVENTS = 1; #[doc(alias = "GDK_FRAME_CLOCK_PHASE_BEFORE_PAINT")] const BEFORE_PAINT = 2; #[doc(alias = "GDK_FRAME_CLOCK_PHASE_UPDATE")] const UPDATE = 4; #[doc(alias = "GDK_FRAME_CLOCK_PHASE_LAYOUT")] const LAYOUT = 8; #[doc(alias = "GDK_FRAME_CLOCK_PHASE_PAINT")] const PAINT = 16; #[doc(alias = "GDK_FRAME_CLOCK_PHASE_RESUME_EVENTS")] const RESUME_EVENTS = 32; #[doc(alias = "GDK_FRAME_CLOCK_PHASE_AFTER_PAINT")] const AFTER_PAINT = 64; } } impl fmt::Display for FrameClockPhase { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { <Self as fmt::Debug>::fmt(self, f) } } #[doc(hidden)] impl IntoGlib for FrameClockPhase { type GlibType = ffi::GdkFrameClockPhase; fn into_glib(self) -> ffi::GdkFrameClockPhase { self.bits() } } #[doc(hidden)] impl FromGlib<ffi::GdkFrameClockPhase> for FrameClockPhase { unsafe fn from_glib(value: ffi::GdkFrameClockPhase) -> Self { skip_assert_initialized!(); Self::from_bits_truncate(value) } } impl StaticType for FrameClockPhase { fn static_type() -> Type { unsafe { from_glib(ffi::gdk_frame_clock_phase_get_type()) } } } impl glib::value::ValueType for FrameClockPhase { type Type = Self; } unsafe impl<'a> FromValue<'a> for FrameClockPhase { type Checker = glib::value::GenericValueTypeChecker<Self>; unsafe fn from_value(value: &'a glib::Value) -> Self { skip_assert_initialized!(); from_glib(glib::gobject_ffi::g_value_get_flags(value.to_glib_none().0)) } } impl ToValue for FrameClockPhase { fn to_value(&self) -> glib::Value { let mut value = glib::Value::for_value_type::<Self>(); unsafe { glib::gobject_ffi::g_value_set_flags(value.to_glib_none_mut().0, self.into_glib()); } value } fn value_type(&self) -> glib::Type { Self::static_type() } } bitflags! { #[doc(alias = "GdkModifierType")] pub struct ModifierType: u32 { #[doc(alias = "GDK_SHIFT_MASK")] const SHIFT_MASK = 1; #[doc(alias = "GDK_LOCK_MASK")] const LOCK_MASK = 2; #[doc(alias = "GDK_CONTROL_MASK")] const CONTROL_MASK = 4; #[doc(alias = "GDK_ALT_MASK")] const ALT_MASK = 8; #[doc(alias = "GDK_BUTTON1_MASK")] const BUTTON1_MASK = 256; #[doc(alias = "GDK_BUTTON2_MASK")] const BUTTON2_MASK = 512; #[doc(alias = "GDK_BUTTON3_MASK")] const BUTTON3_MASK = 1024; #[doc(alias = "GDK_BUTTON4_MASK")] const BUTTON4_MASK = 2048; #[doc(alias = "GDK_BUTTON5_MASK")] const BUTTON5_MASK = 4096; #[doc(alias = "GDK_SUPER_MASK")] const SUPER_MASK = 67108864; #[doc(alias = "GDK_HYPER_MASK")] const HYPER_MASK = 134217728; #[doc(alias = "GDK_META_MASK")] const META_MASK = 268435456; } } impl fmt::Display for ModifierType { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { <Self as fmt::Debug>::fmt(self, f) } } #[doc(hidden)] impl IntoGlib for ModifierType { type GlibType = ffi::GdkModifierType; fn into_glib(self) -> ffi::GdkModifierType { self.bits() } } #[doc(hidden)] impl FromGlib<ffi::GdkModifierType> for ModifierType { unsafe fn from_glib(value: ffi::GdkModifierType) -> Self { skip_assert_initialized!(); Self::from_bits_truncate(value) } } impl StaticType for ModifierType { fn static_type() -> Type { unsafe { from_glib(ffi::gdk_modifier_type_get_type()) } } } impl glib::value::ValueType for ModifierType { type Type = Self; } unsafe impl<'a> FromValue<'a> for ModifierType { type Checker = glib::value::GenericValueTypeChecker<Self>; unsafe fn from_value(value: &'a glib::Value) -> Self { skip_assert_initialized!(); from_glib(glib::gobject_ffi::g_value_get_flags(value.to_glib_none().0)) } } impl ToValue for ModifierType { fn to_value(&self) -> glib::Value { let mut value = glib::Value::for_value_type::<Self>(); unsafe { glib::gobject_ffi::g_value_set_flags(value.to_glib_none_mut().0, self.into_glib()); } value } fn value_type(&self) -> glib::Type { Self::static_type() } } bitflags! { #[doc(alias = "GdkPaintableFlags")] pub struct PaintableFlags: u32 { #[doc(alias = "GDK_PAINTABLE_STATIC_SIZE")] const SIZE = 1; #[doc(alias = "GDK_PAINTABLE_STATIC_CONTENTS")] const CONTENTS = 2; } } impl fmt::Display for PaintableFlags { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { <Self as fmt::Debug>::fmt(self, f) } } #[doc(hidden)] impl IntoGlib for PaintableFlags { type GlibType = ffi::GdkPaintableFlags; fn into_glib(self) -> ffi::GdkPaintableFlags { self.bits() } } #[doc(hidden)] impl FromGlib<ffi::GdkPaintableFlags> for PaintableFlags { unsafe fn from_glib(value: ffi::GdkPaintableFlags) -> Self { skip_assert_initialized!(); Self::from_bits_truncate(value) } } impl StaticType for PaintableFlags { fn static_type() -> Type { unsafe { from_glib(ffi::gdk_paintable_flags_get_type()) } } } impl glib::value::ValueType for PaintableFlags { type Type = Self; } unsafe impl<'a> FromValue<'a> for PaintableFlags { type Checker = glib::value::GenericValueTypeChecker<Self>; unsafe fn from_value(value: &'a glib::Value) -> Self { skip_assert_initialized!(); from_glib(glib::gobject_ffi::g_value_get_flags(value.to_glib_none().0)) } } impl ToValue for PaintableFlags { fn to_value(&self) -> glib::Value { let mut value = glib::Value::for_value_type::<Self>(); unsafe { glib::gobject_ffi::g_value_set_flags(value.to_glib_none_mut().0, self.into_glib()); } value } fn value_type(&self) -> glib::Type { Self::static_type() } } bitflags! { #[doc(alias = "GdkSeatCapabilities")] pub struct SeatCapabilities: u32 { #[doc(alias = "GDK_SEAT_CAPABILITY_NONE")] const NONE = 0; #[doc(alias = "GDK_SEAT_CAPABILITY_POINTER")] const POINTER = 1; #[doc(alias = "GDK_SEAT_CAPABILITY_TOUCH")] const TOUCH = 2; #[doc(alias = "GDK_SEAT_CAPABILITY_TABLET_STYLUS")] const TABLET_STYLUS = 4; #[doc(alias = "GDK_SEAT_CAPABILITY_KEYBOARD")] const KEYBOARD = 8; #[doc(alias = "GDK_SEAT_CAPABILITY_TABLET_PAD")] const TABLET_PAD = 16; #[doc(alias = "GDK_SEAT_CAPABILITY_ALL_POINTING")] const ALL_POINTING = 7; #[doc(alias = "GDK_SEAT_CAPABILITY_ALL")] const ALL = 15; } } impl fmt::Display for SeatCapabilities { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { <Self as fmt::Debug>::fmt(self, f) } } #[doc(hidden)] impl IntoGlib for SeatCapabilities { type GlibType = ffi::GdkSeatCapabilities; fn into_glib(self) -> ffi::GdkSeatCapabilities { self.bits() } } #[doc(hidden)] impl FromGlib<ffi::GdkSeatCapabilities> for SeatCapabilities { unsafe fn from_glib(value: ffi::GdkSeatCapabilities) -> Self { skip_assert_initialized!(); Self::from_bits_truncate(value) } } impl StaticType for SeatCapabilities { fn static_type() -> Type { unsafe { from_glib(ffi::gdk_seat_capabilities_get_type()) } } } impl glib::value::ValueType for SeatCapabilities { type Type = Self; } unsafe impl<'a> FromValue<'a> for SeatCapabilities { type Checker = glib::value::GenericValueTypeChecker<Self>; unsafe fn from_value(value: &'a glib::Value) -> Self { skip_assert_initialized!(); from_glib(glib::gobject_ffi::g_value_get_flags(value.to_glib_none().0)) } } impl ToValue for SeatCapabilities { fn to_value(&self) -> glib::Value { let mut value = glib::Value::for_value_type::<Self>(); unsafe { glib::gobject_ffi::g_value_set_flags(value.to_glib_none_mut().0, self.into_glib()); } value } fn value_type(&self) -> glib::Type { Self::static_type() } } bitflags! { #[doc(alias = "GdkToplevelState")] pub struct ToplevelState: u32 { #[doc(alias = "GDK_TOPLEVEL_STATE_MINIMIZED")] const MINIMIZED = 1; #[doc(alias = "GDK_TOPLEVEL_STATE_MAXIMIZED")] const MAXIMIZED = 2; #[doc(alias = "GDK_TOPLEVEL_STATE_STICKY")] const STICKY = 4; #[doc(alias = "GDK_TOPLEVEL_STATE_FULLSCREEN")] const FULLSCREEN = 8; #[doc(alias = "GDK_TOPLEVEL_STATE_ABOVE")] const ABOVE = 16; #[doc(alias = "GDK_TOPLEVEL_STATE_BELOW")] const BELOW = 32; #[doc(alias = "GDK_TOPLEVEL_STATE_FOCUSED")] const FOCUSED = 64; #[doc(alias = "GDK_TOPLEVEL_STATE_TILED")] const TILED = 128; #[doc(alias = "GDK_TOPLEVEL_STATE_TOP_TILED")] const TOP_TILED = 256; #[doc(alias = "GDK_TOPLEVEL_STATE_TOP_RESIZABLE")] const TOP_RESIZABLE = 512; #[doc(alias = "GDK_TOPLEVEL_STATE_RIGHT_TILED")] const RIGHT_TILED = 1024; #[doc(alias = "GDK_TOPLEVEL_STATE_RIGHT_RESIZABLE")] const RIGHT_RESIZABLE = 2048; #[doc(alias = "GDK_TOPLEVEL_STATE_BOTTOM_TILED")] const BOTTOM_TILED = 4096; #[doc(alias = "GDK_TOPLEVEL_STATE_BOTTOM_RESIZABLE")] const BOTTOM_RESIZABLE = 8192; #[doc(alias = "GDK_TOPLEVEL_STATE_LEFT_TILED")] const LEFT_TILED = 16384; #[doc(alias = "GDK_TOPLEVEL_STATE_LEFT_RESIZABLE")] const LEFT_RESIZABLE = 32768; } } impl fmt::Display for ToplevelState { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { <Self as fmt::Debug>::fmt(self, f) } } #[doc(hidden)] impl IntoGlib for ToplevelState { type GlibType = ffi::GdkToplevelState; fn into_glib(self) -> ffi::GdkToplevelState { self.bits() } } #[doc(hidden)] impl FromGlib<ffi::GdkToplevelState> for ToplevelState { unsafe fn from_glib(value: ffi::GdkToplevelState) -> Self { skip_assert_initialized!(); Self::from_bits_truncate(value) } } impl StaticType for ToplevelState { fn static_type() -> Type { unsafe { from_glib(ffi::gdk_toplevel_state_get_type()) } } } impl glib::value::ValueType for ToplevelState { type Type = Self; } unsafe impl<'a> FromValue<'a> for ToplevelState { type Checker = glib::value::GenericValueTypeChecker<Self>; unsafe fn from_value(value: &'a glib::Value) -> Self { skip_assert_initialized!(); from_glib(glib::gobject_ffi::g_value_get_flags(value.to_glib_none().0)) } } impl ToValue for ToplevelState { fn to_value(&self) -> glib::Value { let mut value = glib::Value::for_value_type::<Self>(); unsafe { glib::gobject_ffi::g_value_set_flags(value.to_glib_none_mut().0, self.into_glib()); } value } fn value_type(&self) -> glib::Type { Self::static_type() } }
27.53811
95
0.617658
29c48311358af4edc1ed16958b19bae969f2fcf0
1,892
//! # Generic Wrapper for IO object //! `wait_io` is a function that can be used in coroutine //! context to wait on the io events //! use std::sync::atomic::Ordering; use crate::cancel::Cancel; use crate::coroutine_impl::{co_get_handle, CoroutineImpl, EventSource}; use crate::io as io_impl; use crate::yield_now::yield_with; pub struct RawIoBlock<'a> { io_data: &'a io_impl::IoData, } impl<'a> RawIoBlock<'a> { fn new(io_data: &'a io_impl::IoData) -> Self { RawIoBlock { io_data } } } impl<'a> EventSource for RawIoBlock<'a> { fn subscribe(&mut self, co: CoroutineImpl) { let handle = co_get_handle(&co); let io_data = (*self.io_data).clone(); self.io_data.co.swap(co); // there is event, re-run the coroutine if io_data.io_flag.load(Ordering::Acquire) { return io_data.schedule(); } let cancel = handle.get_cancel(); // register the cancel io data cancel.set_io(io_data); // re-check the cancel status if cancel.is_canceled() { unsafe { cancel.cancel() }; } } /// after yield back process fn yield_back(&self, cancel: &'static Cancel) { cancel.clear_cancel_bit(); } } /// This is trait that can block on io events but doing nothong about io pub trait WaitIo { /// reset the io before io operation fn reset_io(&self); /// block on read/write event fn wait_io(&self); } impl<T: io_impl::AsIoData> WaitIo for T { fn reset_io(&self) { let io_data = self.as_io_data(); io_data.reset(); } fn wait_io(&self) { let io_data = self.as_io_data(); // when io flag is set we do nothing if io_data.io_flag.load(Ordering::Relaxed) { return; } let blocker = RawIoBlock::new(self.as_io_data()); yield_with(&blocker); } }
26.647887
72
0.606765
f48851b1c997e637d4783687881738fe3fbc4c62
3,490
extern crate bellperson; extern crate ff; extern crate log; extern crate paired; extern crate rand; use bellperson::{Circuit, ConstraintSystem, SynthesisError}; use ff::{Field, PrimeField}; use paired::Engine; #[derive(Clone)] pub struct DummyDemo { pub interations: u64, } impl<E: Engine> Circuit<E> for DummyDemo { fn synthesize<CS: ConstraintSystem<E>>(self, cs: &mut CS) -> Result<(), SynthesisError> { let mut x_val = E::Fr::from_str("2"); let mut x = cs.alloc(|| "", || x_val.ok_or(SynthesisError::AssignmentMissing))?; for _ in 0..self.interations { // Allocate: x * x = x2 let x2_val = x_val.map(|mut e| { e.square(); e }); let x2 = cs.alloc(|| "", || x2_val.ok_or(SynthesisError::AssignmentMissing))?; // Enforce: x * x = x2 cs.enforce(|| "", |lc| lc + x, |lc| lc + x, |lc| lc + x2); x = x2; x_val = x2_val; } cs.enforce( || "", |lc| lc + (x_val.unwrap(), CS::one()), |lc| lc + CS::one(), |lc| lc + x, ); Ok(()) } } #[cfg(feature = "gpu")] #[test] pub fn test_parallel_prover() { use bellperson::groth16::{ create_random_proof, create_random_proof_in_priority, generate_random_parameters, prepare_verifying_key, verify_proof, }; use paired::bls12_381::Bls12; use rand::thread_rng; use std::thread; use std::time::{Duration, Instant}; env_logger::init(); let rng = &mut thread_rng(); println!("Initializing circuit..."); println!("Creating parameters..."); // Higher prio circuit let c = DummyDemo { interations: 10_000, }; // Lower prio circuit let c2 = DummyDemo { interations: 500_000, }; let params = generate_random_parameters::<Bls12, _, _>(c.clone(), rng).unwrap(); let params2 = generate_random_parameters::<Bls12, _, _>(c2.clone(), rng).unwrap(); // Prepare the verification key (for proof verification) let pvk = prepare_verifying_key(&params.vk); let pvk2 = prepare_verifying_key(&params2.vk); let higher_thread = thread::spawn(move || { for _ in 0..10 { let now = Instant::now(); let rng = &mut thread_rng(); let proof_higher = create_random_proof_in_priority(c.clone(), &params, rng).unwrap(); assert!(verify_proof(&pvk, &proof_higher, &[]).unwrap()); println!( "Higher proof gen finished in {}s and {}ms", now.elapsed().as_secs(), now.elapsed().subsec_nanos() / 1000000 ); // Sleep in between higher proofs so that LOWER thread can acquire GPU again thread::sleep(Duration::from_millis(3000)); } }); // Start lower proofs after a few seconds thread::sleep(Duration::from_millis(10000)); println!("Starting low priority proof gen..."); { for _ in 0..10 { let now = Instant::now(); let proof_lower = create_random_proof(c2.clone(), &params2, rng).unwrap(); assert!(verify_proof(&pvk2, &proof_lower, &[]).unwrap()); println!( "Lower proof gen finished in {}s and {}ms", now.elapsed().as_secs(), now.elapsed().subsec_nanos() / 1000000 ); } } higher_thread.join().unwrap(); }
29.327731
97
0.555587
33b29c3c92f702ba7fe1a99a650646f6d362286e
118
// sudo.rs // restricts callable functions based on user's access level #![no_std] #[macro_use] mod fs; mod memory;
13.111111
60
0.711864
71c10eb650754699eb8d98456c7fe0a4dd2e46ca
7,609
//! List of the removed feature gates. use super::{to_nonzero, Feature, State}; use rustc_span::symbol::sym; macro_rules! declare_features { ($( $(#[doc = $doc:tt])* (removed, $feature:ident, $ver:expr, $issue:expr, None, $reason:expr), )+) => { /// Represents unstable features which have since been removed (it was once Active) pub const REMOVED_FEATURES: &[Feature] = &[ $( Feature { state: State::Removed { reason: $reason }, name: sym::$feature, since: $ver, issue: to_nonzero($issue), edition: None, description: concat!($($doc,)*), } ),+ ]; }; ($( $(#[doc = $doc:tt])* (stable_removed, $feature:ident, $ver:expr, $issue:expr, None), )+) => { /// Represents stable features which have since been removed (it was once Accepted) pub const STABLE_REMOVED_FEATURES: &[Feature] = &[ $( Feature { state: State::Stabilized { reason: None }, name: sym::$feature, since: $ver, issue: to_nonzero($issue), edition: None, description: concat!($($doc,)*), } ),+ ]; }; } #[rustfmt::skip] declare_features! ( // ------------------------------------------------------------------------- // feature-group-start: removed features // ------------------------------------------------------------------------- (removed, import_shadowing, "1.0.0", None, None, None), (removed, managed_boxes, "1.0.0", None, None, None), /// Allows use of unary negate on unsigned integers, e.g., -e for e: u8 (removed, negate_unsigned, "1.0.0", Some(29645), None, None), (removed, reflect, "1.0.0", Some(27749), None, None), /// A way to temporarily opt out of opt in copy. This will *never* be accepted. (removed, opt_out_copy, "1.0.0", None, None, None), (removed, quad_precision_float, "1.0.0", None, None, None), (removed, struct_inherit, "1.0.0", None, None, None), (removed, test_removed_feature, "1.0.0", None, None, None), (removed, visible_private_types, "1.0.0", None, None, None), (removed, unsafe_no_drop_flag, "1.0.0", None, None, None), /// Allows using items which are missing stability attributes (removed, unmarked_api, "1.0.0", None, None, None), (removed, allocator, "1.0.0", None, None, None), (removed, simd, "1.0.0", Some(27731), None, Some("removed in favor of `#[repr(simd)]`")), (removed, advanced_slice_patterns, "1.0.0", Some(62254), None, Some("merged into `#![feature(slice_patterns)]`")), (removed, macro_reexport, "1.0.0", Some(29638), None, Some("subsumed by `pub use`")), /// Allows using custom attributes (RFC 572). (removed, custom_attribute, "1.0.0", Some(29642), None, Some("removed in favor of `#![register_tool]` and `#![register_attr]`")), /// Allows features specific to OIBIT (now called auto traits). /// Renamed to `auto_traits`. (removed, optin_builtin_traits, "1.0.0", Some(13231), None, Some("renamed to `auto_traits`")), (removed, pushpop_unsafe, "1.2.0", None, None, None), (removed, needs_allocator, "1.4.0", Some(27389), None, Some("subsumed by `#![feature(allocator_internals)]`")), /// Allows identifying crates that contain sanitizer runtimes. (removed, sanitizer_runtime, "1.17.0", None, None, None), /// Allows `#[doc(spotlight)]`. /// The attribute was renamed to `#[doc(notable_trait)]` /// and the feature to `doc_notable_trait`. (removed, doc_spotlight, "1.22.0", Some(45040), None, Some("renamed to `doc_notable_trait`")), (removed, proc_macro_mod, "1.27.0", Some(54727), None, Some("subsumed by `#![feature(proc_macro_hygiene)]`")), (removed, proc_macro_expr, "1.27.0", Some(54727), None, Some("subsumed by `#![feature(proc_macro_hygiene)]`")), (removed, proc_macro_non_items, "1.27.0", Some(54727), None, Some("subsumed by `#![feature(proc_macro_hygiene)]`")), (removed, proc_macro_gen, "1.27.0", Some(54727), None, Some("subsumed by `#![feature(proc_macro_hygiene)]`")), (removed, panic_implementation, "1.28.0", Some(44489), None, Some("subsumed by `#[panic_handler]`")), /// Allows the use of `#[derive(Anything)]` as sugar for `#[derive_Anything]`. (removed, custom_derive, "1.32.0", Some(29644), None, Some("subsumed by `#[proc_macro_derive]`")), /// Paths of the form: `extern::foo::bar` (removed, extern_in_paths, "1.33.0", Some(55600), None, Some("subsumed by `::foo::bar` paths")), (removed, quote, "1.33.0", Some(29601), None, None), /// Allows `[x; N]` where `x` is a constant (RFC 2203). (removed, const_in_array_repeat_expressions, "1.37.0", Some(49147), None, Some("removed due to causing promotable bugs")), /// Allows using `#[unsafe_destructor_blind_to_params]` (RFC 1238). (removed, dropck_parametricity, "1.38.0", Some(28498), None, None), (removed, await_macro, "1.38.0", Some(50547), None, Some("subsumed by `.await` syntax")), /// Allows defining `existential type`s. (removed, existential_type, "1.38.0", Some(63063), None, Some("removed in favor of `#![feature(min_type_alias_impl_trait)]`")), /// Allows using the macros: /// + `__diagnostic_used` /// + `__register_diagnostic` /// +`__build_diagnostic_array` (removed, rustc_diagnostic_macros, "1.38.0", None, None, None), /// Allows using `#[on_unimplemented(..)]` on traits. /// (Moved to `rustc_attrs`.) (removed, on_unimplemented, "1.40.0", None, None, None), /// Allows overlapping impls of marker traits. (removed, overlapping_marker_traits, "1.42.0", Some(29864), None, Some("removed in favor of `#![feature(marker_trait_attr)]`")), /// Allows `#[no_debug]`. (removed, no_debug, "1.43.0", Some(29721), None, Some("removed due to lack of demand")), /// Allows comparing raw pointers during const eval. (removed, const_compare_raw_pointers, "1.46.0", Some(53020), None, Some("cannot be allowed in const eval in any meaningful way")), /// Allows using the `#[link_args]` attribute. (removed, link_args, "1.53.0", Some(29596), None, Some("removed in favor of using `-C link-arg=ARG` on command line, \ which is available from cargo build scripts with `cargo:rustc-link-arg` now")), /// Allows using `#[main]` to replace the entrypoint `#[lang = "start"]` calls. (removed, main, "1.53.0", Some(29634), None, None), (removed, pub_macro_rules, "1.53.0", Some(78855), None, Some("removed due to being incomplete, in particular it does not work across crates")), /// Allows the definition of `const` functions with some advanced features. (removed, const_fn, "1.54.0", Some(57563), None, Some("split into finer-grained feature gates")), /// Allows `#[doc(include = "some-file")]`. (removed, external_doc, "1.54.0", Some(44732), None, Some("use #[doc = include_str!(\"filename\")] instead, which handles macro invocations")), // ------------------------------------------------------------------------- // feature-group-end: removed features // ------------------------------------------------------------------------- ); #[rustfmt::skip] declare_features! ( (stable_removed, no_stack_check, "1.0.0", None, None), );
48.775641
99
0.581942
481897e879cf8544cf47359f40603b86591d919d
48,300
use crate::model::{CachedMessage, CachedPresence}; use super::{config::ResourceType, InMemoryCache}; use dashmap::DashMap; use std::{borrow::Cow, collections::HashSet, hash::Hash, ops::Deref}; use twilight_model::{ application::interaction::Interaction, channel::{message::MessageReaction, Channel, GuildChannel, ReactionType}, gateway::{event::Event, payload::*}, id::GuildId, }; pub trait UpdateCache { // Allow this for presentation purposes in documentation. #[allow(unused_variables)] fn update(&self, cache: &InMemoryCache) {} } impl UpdateCache for Event { #[allow(clippy::cognitive_complexity)] fn update(&self, c: &InMemoryCache) { use Event::*; match self { BanAdd(_) => {} BanRemove(_) => {} ChannelCreate(v) => c.update(v), ChannelDelete(v) => c.update(v), ChannelPinsUpdate(v) => c.update(v), ChannelUpdate(v) => c.update(v), GatewayHeartbeat(_) => {} GatewayHeartbeatAck => {} GatewayHello(_) => {} GatewayInvalidateSession(_v) => {} GatewayReconnect => {} GiftCodeUpdate => {} GuildCreate(v) => c.update(v.deref()), GuildDelete(v) => c.update(v.deref()), GuildEmojisUpdate(v) => c.update(v), GuildIntegrationsUpdate(v) => c.update(v), GuildUpdate(v) => c.update(v.deref()), IntegrationCreate(v) => c.update(v.deref()), IntegrationDelete(v) => c.update(v.deref()), IntegrationUpdate(v) => c.update(v.deref()), InteractionCreate(v) => c.update(v.deref()), InviteCreate(_) => {} InviteDelete(_) => {} MemberAdd(v) => c.update(v.deref()), MemberRemove(v) => c.update(v), MemberUpdate(v) => c.update(v.deref()), MemberChunk(v) => c.update(v), MessageCreate(v) => c.update(v.deref()), MessageDelete(v) => c.update(v), MessageDeleteBulk(v) => c.update(v), MessageUpdate(v) => c.update(v.deref()), PresenceUpdate(v) => c.update(v.deref()), PresencesReplace => {} ReactionAdd(v) => c.update(v.deref()), ReactionRemove(v) => c.update(v.deref()), ReactionRemoveAll(v) => c.update(v), ReactionRemoveEmoji(v) => c.update(v), Ready(v) => c.update(v.deref()), Resumed => {} RoleCreate(v) => c.update(v), RoleDelete(v) => c.update(v), RoleUpdate(v) => c.update(v), ShardConnected(_) => {} ShardConnecting(_) => {} ShardDisconnected(_) => {} ShardIdentifying(_) => {} ShardReconnecting(_) => {} ShardPayload(_) => {} ShardResuming(_) => {} StageInstanceCreate(v) => c.update(v), StageInstanceDelete(v) => c.update(v), StageInstanceUpdate(v) => c.update(v), TypingStart(v) => c.update(v.deref()), UnavailableGuild(v) => c.update(v), UserUpdate(v) => c.update(v), VoiceServerUpdate(v) => c.update(v), VoiceStateUpdate(v) => c.update(v.deref()), WebhooksUpdate(v) => c.update(v), } } } impl UpdateCache for BanAdd {} impl UpdateCache for BanRemove {} impl UpdateCache for ChannelCreate { fn update(&self, cache: &InMemoryCache) { if !cache.wants(ResourceType::CHANNEL) { return; } match &self.0 { Channel::Group(c) => { super::upsert_item(&cache.0.groups, c.id, c.clone()); } Channel::Guild(c) => { if let Some(gid) = c.guild_id() { cache.cache_guild_channel(gid, c.clone()); } } Channel::Private(c) => { cache.cache_private_channel(c.clone()); } } } } impl UpdateCache for ChannelDelete { fn update(&self, cache: &InMemoryCache) { if !cache.wants(ResourceType::CHANNEL) { return; } match self.0 { Channel::Group(ref c) => { cache.delete_group(c.id); } Channel::Guild(ref c) => { cache.delete_guild_channel(c.id()); } Channel::Private(ref c) => { cache.0.channels_private.remove(&c.id); } } } } impl UpdateCache for ChannelPinsUpdate { fn update(&self, cache: &InMemoryCache) { if !cache.wants(ResourceType::CHANNEL) { return; } if let Some(mut r) = cache.0.channels_guild.get_mut(&self.channel_id) { let value = r.value_mut(); if let GuildChannel::Text(ref mut text) = value.data { text.last_pin_timestamp = self.last_pin_timestamp.clone(); } return; } if let Some(mut channel) = cache.0.channels_private.get_mut(&self.channel_id) { channel.last_pin_timestamp = self.last_pin_timestamp.clone(); return; } if let Some(mut group) = cache.0.groups.get_mut(&self.channel_id) { group.last_pin_timestamp = self.last_pin_timestamp.clone(); } } } impl UpdateCache for ChannelUpdate { fn update(&self, cache: &InMemoryCache) { if !cache.wants(ResourceType::CHANNEL) { return; } match self.0.clone() { Channel::Group(c) => { cache.cache_group(c); } Channel::Guild(c) => { if let Some(gid) = c.guild_id() { cache.cache_guild_channel(gid, c); } } Channel::Private(c) => { cache.cache_private_channel(c); } } } } impl UpdateCache for GuildCreate { fn update(&self, cache: &InMemoryCache) { if !cache.wants(ResourceType::GUILD) { return; } cache.cache_guild(self.0.clone()); } } impl UpdateCache for GuildDelete { fn update(&self, cache: &InMemoryCache) { fn remove_ids<T: Eq + Hash, U>( guild_map: &DashMap<GuildId, HashSet<T>>, container: &DashMap<T, U>, guild_id: GuildId, ) { if let Some((_, ids)) = guild_map.remove(&guild_id) { for id in ids { container.remove(&id); } } } if !cache.wants(ResourceType::GUILD) { return; } let id = self.id; cache.0.guilds.remove(&id); if cache.wants(ResourceType::CHANNEL) { remove_ids(&cache.0.guild_channels, &cache.0.channels_guild, id); } if cache.wants(ResourceType::EMOJI) { remove_ids(&cache.0.guild_emojis, &cache.0.emojis, id); } if cache.wants(ResourceType::ROLE) { remove_ids(&cache.0.guild_roles, &cache.0.roles, id); } if cache.wants(ResourceType::VOICE_STATE) { // Clear out a guilds voice states when a guild leaves cache.0.voice_state_guilds.remove(&id); } if cache.wants(ResourceType::MEMBER) { if let Some((_, ids)) = cache.0.guild_members.remove(&id) { for user_id in ids { cache.0.members.remove(&(id, user_id)); } } } if cache.wants(ResourceType::PRESENCE) { if let Some((_, ids)) = cache.0.guild_presences.remove(&id) { for user_id in ids { cache.0.presences.remove(&(id, user_id)); } } } } } impl UpdateCache for GuildEmojisUpdate { fn update(&self, cache: &InMemoryCache) { if !cache.wants(ResourceType::EMOJI) { return; } cache.cache_emojis(self.guild_id, self.emojis.clone()); } } impl UpdateCache for GuildIntegrationsUpdate {} impl UpdateCache for GuildUpdate { fn update(&self, cache: &InMemoryCache) { if !cache.wants(ResourceType::GUILD) { return; } if let Some(mut guild) = cache.0.guilds.get_mut(&self.0.id) { guild.afk_channel_id = self.afk_channel_id; guild.afk_timeout = self.afk_timeout; guild.banner = self.banner.clone(); guild.default_message_notifications = self.default_message_notifications; guild.description = self.description.clone(); guild.features = self.features.clone(); guild.icon = self.icon.clone(); guild.max_members = self.max_members; guild.max_presences = Some(self.max_presences.unwrap_or(25000)); guild.mfa_level = self.mfa_level; guild.name = self.name.clone(); guild.nsfw_level = self.nsfw_level; guild.owner = self.owner; guild.owner_id = self.owner_id; guild.permissions = self.permissions; guild.preferred_locale = self.preferred_locale.clone(); guild.premium_tier = self.premium_tier; guild .premium_subscription_count .replace(self.premium_subscription_count.unwrap_or_default()); guild.splash = self.splash.clone(); guild.system_channel_id = self.system_channel_id; guild.verification_level = self.verification_level; guild.vanity_url_code = self.vanity_url_code.clone(); guild.widget_channel_id = self.widget_channel_id; guild.widget_enabled = self.widget_enabled; }; } } impl UpdateCache for IntegrationCreate { fn update(&self, cache: &InMemoryCache) { if !cache.wants(ResourceType::INTEGRATION) { return; } if let Some(guild_id) = self.guild_id { super::upsert_guild_item( &cache.0.integrations, guild_id, (guild_id, self.id), self.0.clone(), ); } } } impl UpdateCache for IntegrationDelete { fn update(&self, cache: &InMemoryCache) { if !cache.wants(ResourceType::INTEGRATION) { return; } cache.delete_integration(self.guild_id, self.id); } } impl UpdateCache for IntegrationUpdate { fn update(&self, cache: &InMemoryCache) { if !cache.wants(ResourceType::INTEGRATION) { return; } if let Some(guild_id) = self.guild_id { cache.cache_integration(guild_id, self.0.clone()); } } } impl UpdateCache for InteractionCreate { fn update(&self, cache: &InMemoryCache) { #[allow(clippy::single_match)] match &self.0 { Interaction::ApplicationCommand(command) => { if cache.wants(ResourceType::MEMBER) { if let Some(member) = &command.member { if let Some(user) = &member.user { cache.cache_user(Cow::Borrowed(user), command.guild_id); cache.cache_borrowed_partial_member( command.guild_id.unwrap(), &member, user.id, ); } } } if let Some(user) = &command.user { cache.cache_user(Cow::Borrowed(user), None); } if let Some(resolved) = &command.data.resolved { for u in &resolved.users { cache.cache_user(Cow::Borrowed(u), command.guild_id); if !cache.wants(ResourceType::MEMBER) || command.guild_id.is_none() { continue; } // This should always match, because resolved members // are guaranteed to have a matching resolved user if let Some(member) = &resolved.members.iter().find(|m| m.id == u.id) { if let Some(guild_id) = command.guild_id { cache.cache_borrowed_interaction_member(guild_id, &member); } } } if cache.wants(ResourceType::ROLE) { if let Some(guild_id) = command.guild_id { cache.cache_roles(guild_id, resolved.roles.iter().cloned()); } } } } _ => {} }; } } impl UpdateCache for MemberAdd { fn update(&self, cache: &InMemoryCache) { if !cache.wants(ResourceType::MEMBER) { return; } cache.cache_member(self.guild_id, self.0.clone()); cache .0 .guild_members .entry(self.guild_id) .or_default() .insert(self.0.user.id); } } impl UpdateCache for MemberChunk { fn update(&self, cache: &InMemoryCache) { if !cache.wants(ResourceType::MEMBER) { return; } if self.members.is_empty() { return; } cache.cache_members(self.guild_id, self.members.clone()); let mut guild = cache.0.guild_members.entry(self.guild_id).or_default(); guild.extend(self.members.iter().map(|member| member.user.id)); } } impl UpdateCache for MemberRemove { fn update(&self, cache: &InMemoryCache) { if !cache.wants(ResourceType::MEMBER) { return; } cache.0.members.remove(&(self.guild_id, self.user.id)); if let Some(mut members) = cache.0.guild_members.get_mut(&self.guild_id) { members.remove(&self.user.id); } // Avoid a deadlock by mutating the user, dropping the lock to the map, // and then maybe conditionally removing the user later. let mut maybe_remove_user = false; if let Some(mut user_tuple) = cache.0.users.get_mut(&self.user.id) { user_tuple.1.remove(&self.guild_id); maybe_remove_user = true; } if maybe_remove_user { cache .0 .users .remove_if(&self.user.id, |_, guild_set| guild_set.1.is_empty()); } } } impl UpdateCache for MemberUpdate { fn update(&self, cache: &InMemoryCache) { if !cache.wants(ResourceType::MEMBER) { return; } let mut member = match cache.0.members.get_mut(&(self.guild_id, self.user.id)) { Some(member) => member, None => return, }; member.deaf = self.deaf.or(member.deaf); member.mute = self.mute.or(member.mute); member.nick = self.nick.clone(); member.roles = self.roles.clone(); member.joined_at.replace(self.joined_at.clone()); member.pending = self.pending; } } impl UpdateCache for MessageCreate { fn update(&self, cache: &InMemoryCache) { if cache.wants(ResourceType::USER) { cache.cache_user(Cow::Borrowed(&self.author), self.guild_id); } if let (Some(member), Some(guild_id), true) = ( &self.member, self.guild_id, cache.wants(ResourceType::MEMBER), ) { cache.cache_borrowed_partial_member(guild_id, member, self.author.id) } if !cache.wants(ResourceType::MESSAGE) { return; } let mut channel = cache.0.messages.entry(self.0.channel_id).or_default(); if channel.len() > cache.0.config.message_cache_size() { channel.pop_back(); } channel.push_front(CachedMessage::from(self.0.clone())); } } impl UpdateCache for MessageDelete { fn update(&self, cache: &InMemoryCache) { if !cache.wants(ResourceType::MESSAGE) { return; } let mut channel = cache.0.messages.entry(self.channel_id).or_default(); if let Some(idx) = channel.iter().position(|msg| msg.id == self.id) { channel.remove(idx); } } } impl UpdateCache for MessageDeleteBulk { fn update(&self, cache: &InMemoryCache) { if !cache.wants(ResourceType::MESSAGE) { return; } let mut channel = cache.0.messages.entry(self.channel_id).or_default(); for id in &self.ids { if let Some(idx) = channel.iter().position(|msg| &msg.id == id) { channel.remove(idx); } } } } impl UpdateCache for MessageUpdate { fn update(&self, cache: &InMemoryCache) { if !cache.wants(ResourceType::MESSAGE) { return; } let mut channel = cache.0.messages.entry(self.channel_id).or_default(); if let Some(mut message) = channel.iter_mut().find(|msg| msg.id == self.id) { if let Some(attachments) = &self.attachments { message.attachments = attachments.clone(); } if let Some(content) = &self.content { message.content = content.clone(); } if let Some(edited_timestamp) = &self.edited_timestamp { message.edited_timestamp.replace(edited_timestamp.clone()); } if let Some(embeds) = &self.embeds { message.embeds = embeds.clone(); } if let Some(mention_everyone) = self.mention_everyone { message.mention_everyone = mention_everyone; } if let Some(mention_roles) = &self.mention_roles { message.mention_roles = mention_roles.clone(); } if let Some(mentions) = &self.mentions { message.mentions = mentions.iter().map(|x| x.id).collect::<Vec<_>>(); } if let Some(pinned) = self.pinned { message.pinned = pinned; } if let Some(timestamp) = &self.timestamp { message.timestamp = timestamp.clone(); } if let Some(tts) = self.tts { message.tts = tts; } } } } impl UpdateCache for PresenceUpdate { fn update(&self, cache: &InMemoryCache) { if !cache.wants(ResourceType::PRESENCE) { return; } let presence = CachedPresence { activities: self.activities.clone(), client_status: self.client_status.clone(), guild_id: self.guild_id, status: self.status, user_id: crate::presence_user_id(&self.user), }; cache.cache_presence(self.guild_id, presence); } } impl UpdateCache for ReactionAdd { fn update(&self, cache: &InMemoryCache) { if !cache.wants(ResourceType::REACTION) { return; } let mut channel = cache.0.messages.entry(self.0.channel_id).or_default(); let message = match channel.iter_mut().find(|msg| msg.id == self.0.message_id) { Some(message) => message, None => return, }; if let Some(reaction) = message .reactions .iter_mut() .find(|r| r.emoji == self.0.emoji) { if !reaction.me { if let Some(current_user) = cache.current_user() { if current_user.id == self.0.user_id { reaction.me = true; } } } reaction.count += 1; } else { let me = cache .current_user() .map(|user| user.id == self.0.user_id) .unwrap_or_default(); message.reactions.push(MessageReaction { count: 1, emoji: self.0.emoji.clone(), me, }); } } } impl UpdateCache for ReactionRemove { fn update(&self, cache: &InMemoryCache) { if !cache.wants(ResourceType::REACTION) { return; } let mut channel = cache.0.messages.entry(self.0.channel_id).or_default(); let message = match channel.iter_mut().find(|msg| msg.id == self.0.message_id) { Some(message) => message, None => return, }; if let Some(reaction) = message .reactions .iter_mut() .find(|r| r.emoji == self.0.emoji) { if reaction.me { if let Some(current_user) = cache.current_user() { if current_user.id == self.0.user_id { reaction.me = false; } } } if reaction.count > 1 { reaction.count -= 1; } else { message.reactions.retain(|e| !(e.emoji == self.0.emoji)); } } } } impl UpdateCache for ReactionRemoveAll { fn update(&self, cache: &InMemoryCache) { if !cache.wants(ResourceType::REACTION) { return; } let mut channel = cache.0.messages.entry(self.channel_id).or_default(); let message = match channel.iter_mut().find(|msg| msg.id == self.message_id) { Some(message) => message, None => return, }; message.reactions.clear(); } } impl UpdateCache for ReactionRemoveEmoji { fn update(&self, cache: &InMemoryCache) { if !cache.wants(ResourceType::REACTION) { return; } let mut channel = cache.0.messages.entry(self.channel_id).or_default(); let message = match channel.iter_mut().find(|msg| msg.id == self.message_id) { Some(message) => message, None => return, }; let maybe_index = message.reactions.iter().position(|r| { matches!(&r.emoji, ReactionType::Unicode { name, .. } | ReactionType::Custom { name: Some(name), .. } if *name == self.emoji.name ) }); if let Some(index) = maybe_index { message.reactions.remove(index); } } } impl UpdateCache for Ready { fn update(&self, cache: &InMemoryCache) { if cache.wants(ResourceType::USER_CURRENT) { cache.cache_current_user(self.user.clone()); } if cache.wants(ResourceType::GUILD) { for guild in &self.guilds { cache.unavailable_guild(guild.id); } } } } impl UpdateCache for RoleCreate { fn update(&self, cache: &InMemoryCache) { if !cache.wants(ResourceType::ROLE) { return; } super::upsert_guild_item( &cache.0.roles, self.guild_id, self.role.id, self.role.clone(), ); } } impl UpdateCache for RoleDelete { fn update(&self, cache: &InMemoryCache) { if !cache.wants(ResourceType::ROLE) { return; } cache.delete_role(self.role_id); } } impl UpdateCache for RoleUpdate { fn update(&self, cache: &InMemoryCache) { if !cache.wants(ResourceType::ROLE) { return; } cache.cache_role(self.guild_id, self.role.clone()); } } impl UpdateCache for StageInstanceCreate { fn update(&self, cache: &InMemoryCache) { if !cache.wants(ResourceType::STAGE_INSTANCE) { return; } cache.cache_stage_instance(self.guild_id, self.0.clone()); } } impl UpdateCache for StageInstanceDelete { fn update(&self, cache: &InMemoryCache) { if !cache.wants(ResourceType::STAGE_INSTANCE) { return; } cache.delete_stage_instance(self.id); } } impl UpdateCache for StageInstanceUpdate { fn update(&self, cache: &InMemoryCache) { if !cache.wants(ResourceType::STAGE_INSTANCE) { return; } cache.cache_stage_instance(self.guild_id, self.0.clone()); } } impl UpdateCache for TypingStart {} impl UpdateCache for UnavailableGuild { fn update(&self, cache: &InMemoryCache) { if !cache.wants(ResourceType::GUILD) { return; } cache.0.guilds.remove(&self.id); cache.0.unavailable_guilds.insert(self.id); } } impl UpdateCache for UserUpdate { fn update(&self, cache: &InMemoryCache) { if !cache.wants(ResourceType::USER_CURRENT) { return; } cache.cache_current_user(self.0.clone()); } } impl UpdateCache for VoiceServerUpdate { fn update(&self, _: &InMemoryCache) {} } impl UpdateCache for VoiceStateUpdate { fn update(&self, cache: &InMemoryCache) { if !cache.wants(ResourceType::VOICE_STATE) { return; } cache.cache_voice_state(self.0.clone()); if let (Some(guild_id), Some(member)) = (self.0.guild_id, &self.0.member) { cache.cache_member(guild_id, member.clone()); } } } impl UpdateCache for WebhooksUpdate {} #[cfg(test)] mod tests { use super::*; use crate::config::ResourceType; use twilight_model::{ application::interaction::{ application_command::{CommandData, CommandInteractionDataResolved, InteractionMember}, ApplicationCommand, InteractionType, }, channel::{ message::{MessageFlags, MessageType}, ChannelType, GuildChannel, Message, Reaction, TextChannel, }, gateway::payload::{reaction_remove_emoji::PartialEmoji, ChannelDelete}, guild::{ DefaultMessageNotificationLevel, ExplicitContentFilter, Guild, Member, MfaLevel, NSFWLevel, PartialGuild, PartialMember, Permissions, PremiumTier, Role, SystemChannelFlags, VerificationLevel, }, id::{ ApplicationId, ChannelId, CommandId, GuildId, InteractionId, MessageId, RoleId, UserId, }, user::User, voice::VoiceState, }; fn guild_channel_text() -> (GuildId, ChannelId, GuildChannel) { let guild_id = GuildId(1); let channel_id = ChannelId(2); let channel = GuildChannel::Text(TextChannel { guild_id: Some(guild_id), id: channel_id, kind: ChannelType::GuildText, last_message_id: None, last_pin_timestamp: None, name: "test".to_owned(), nsfw: false, parent_id: None, permission_overwrites: Vec::new(), position: 3, rate_limit_per_user: None, topic: None, }); (guild_id, channel_id, channel) } fn cache_with_message_and_reactions() -> InMemoryCache { let cache = InMemoryCache::new(); let msg = Message { activity: None, application: None, application_id: None, attachments: Vec::new(), author: User { avatar: Some("".to_owned()), bot: false, discriminator: "0001".to_owned(), email: None, flags: None, id: UserId(3), locale: None, mfa_enabled: None, name: "test".to_owned(), premium_type: None, public_flags: None, system: None, verified: None, }, channel_id: ChannelId(2), content: "ping".to_owned(), edited_timestamp: None, embeds: Vec::new(), flags: Some(MessageFlags::empty()), guild_id: Some(GuildId(1)), id: MessageId(4), interaction: None, kind: MessageType::Regular, member: Some(PartialMember { deaf: false, joined_at: None, mute: false, nick: Some("member nick".to_owned()), permissions: None, premium_since: None, roles: Vec::new(), user: None, }), mention_channels: Vec::new(), mention_everyone: false, mention_roles: Vec::new(), mentions: Vec::new(), pinned: false, reactions: Vec::new(), reference: None, stickers: Vec::new(), referenced_message: None, timestamp: String::new(), tts: false, webhook_id: None, }; cache.update(&MessageCreate(msg)); let mut reaction = ReactionAdd(Reaction { channel_id: ChannelId(2), emoji: ReactionType::Unicode { name: "😀".to_owned(), }, guild_id: Some(GuildId(1)), member: Some(Member { deaf: false, guild_id: GuildId(1), hoisted_role: None, joined_at: None, mute: false, nick: Some("member nick".to_owned()), pending: false, premium_since: None, roles: Vec::new(), user: User { avatar: Some("".to_owned()), bot: false, discriminator: "0001".to_owned(), email: None, flags: None, id: UserId(3), locale: None, mfa_enabled: None, name: "test".to_owned(), premium_type: None, public_flags: None, system: None, verified: None, }, }), message_id: MessageId(4), user_id: UserId(3), }); cache.update(&reaction); reaction.member.replace(Member { deaf: false, guild_id: GuildId(1), hoisted_role: None, joined_at: None, mute: false, nick: None, pending: false, premium_since: None, roles: Vec::new(), user: User { avatar: Some("".to_owned()), bot: false, discriminator: "0002".to_owned(), email: None, flags: None, id: UserId(5), locale: None, mfa_enabled: None, name: "test".to_owned(), premium_type: None, public_flags: None, system: None, verified: None, }, }); reaction.user_id = UserId(5); cache.update(&reaction); reaction.emoji = ReactionType::Unicode { name: "🗺️".to_owned(), }; cache.update(&reaction); cache } #[test] fn test_guild_update() { let cache = InMemoryCache::new(); let guild = Guild { afk_channel_id: None, afk_timeout: 0, application_id: None, approximate_member_count: None, approximate_presence_count: None, banner: None, channels: Vec::new(), default_message_notifications: DefaultMessageNotificationLevel::Mentions, description: None, discovery_splash: None, emojis: Vec::new(), explicit_content_filter: ExplicitContentFilter::None, features: Vec::new(), icon: None, id: GuildId(1), joined_at: None, large: false, max_members: None, max_presences: None, max_video_channel_users: None, member_count: None, members: Vec::new(), mfa_level: MfaLevel::None, name: "test".to_owned(), nsfw_level: NSFWLevel::Default, owner_id: UserId(1), owner: None, permissions: None, preferred_locale: "en_us".to_owned(), premium_subscription_count: None, premium_tier: PremiumTier::None, presences: Vec::new(), roles: Vec::new(), rules_channel_id: None, splash: None, stage_instances: Vec::new(), system_channel_flags: SystemChannelFlags::empty(), system_channel_id: None, unavailable: false, vanity_url_code: None, verification_level: VerificationLevel::VeryHigh, voice_states: Vec::new(), widget_channel_id: None, widget_enabled: None, }; cache.update(&GuildCreate(guild.clone())); let mutation = PartialGuild { id: guild.id, afk_channel_id: guild.afk_channel_id, afk_timeout: guild.afk_timeout, application_id: guild.application_id, banner: guild.banner, default_message_notifications: guild.default_message_notifications, description: guild.description, discovery_splash: guild.discovery_splash, emojis: guild.emojis, explicit_content_filter: guild.explicit_content_filter, features: guild.features, icon: guild.icon, max_members: guild.max_members, max_presences: guild.max_presences, member_count: guild.member_count, mfa_level: guild.mfa_level, name: "test2222".to_owned(), nsfw_level: guild.nsfw_level, owner_id: UserId(2), owner: guild.owner, permissions: guild.permissions, preferred_locale: guild.preferred_locale, premium_subscription_count: guild.premium_subscription_count, premium_tier: guild.premium_tier, roles: guild.roles, rules_channel_id: guild.rules_channel_id, splash: guild.splash, system_channel_flags: guild.system_channel_flags, system_channel_id: guild.system_channel_id, verification_level: guild.verification_level, vanity_url_code: guild.vanity_url_code, widget_channel_id: guild.widget_channel_id, widget_enabled: guild.widget_enabled, }; cache.update(&GuildUpdate(mutation.clone())); assert_eq!(cache.guild(guild.id).unwrap().name, mutation.name); assert_eq!(cache.guild(guild.id).unwrap().owner_id, mutation.owner_id); assert_eq!(cache.guild(guild.id).unwrap().id, mutation.id); } #[test] fn test_channel_delete_guild() { let cache = InMemoryCache::new(); let (guild_id, channel_id, channel) = guild_channel_text(); cache.cache_guild_channel(guild_id, channel.clone()); assert_eq!(1, cache.0.channels_guild.len()); assert!(cache .0 .guild_channels .get(&guild_id) .unwrap() .contains(&channel_id)); cache.update(&ChannelDelete(Channel::Guild(channel))); assert!(cache.0.channels_guild.is_empty()); assert!(cache.0.guild_channels.get(&guild_id).unwrap().is_empty()); } #[test] fn test_channel_update_guild() { let cache = InMemoryCache::new(); let (guild_id, channel_id, channel) = guild_channel_text(); cache.update(&ChannelUpdate(Channel::Guild(channel))); assert_eq!(1, cache.0.channels_guild.len()); assert!(cache .0 .guild_channels .get(&guild_id) .unwrap() .contains(&channel_id)); } #[test] fn test_voice_states_with_no_cached_guilds() { let cache = InMemoryCache::builder() .resource_types(ResourceType::VOICE_STATE) .build(); cache.update(&VoiceStateUpdate(VoiceState { channel_id: None, deaf: false, guild_id: Some(GuildId(1)), member: None, mute: false, self_deaf: false, self_mute: false, self_stream: false, session_id: "38fj3jfkh3pfho3prh2".to_string(), suppress: false, token: None, user_id: UserId(1), request_to_speak_timestamp: Some("2021-04-21T22:16:50+0000".to_owned()), })); } #[test] fn test_voice_states_members() { use twilight_model::{guild::member::Member, user::User}; let cache = InMemoryCache::new(); let mutation = VoiceStateUpdate(VoiceState { channel_id: Some(ChannelId(4)), deaf: false, guild_id: Some(GuildId(2)), member: Some(Member { deaf: false, guild_id: GuildId(2), hoisted_role: None, joined_at: None, mute: false, nick: None, pending: false, premium_since: None, roles: Vec::new(), user: User { avatar: Some("".to_owned()), bot: false, discriminator: "0001".to_owned(), email: None, flags: None, id: UserId(3), locale: None, mfa_enabled: None, name: "test".to_owned(), premium_type: None, public_flags: None, system: None, verified: None, }, }), mute: false, self_deaf: false, self_mute: false, self_stream: false, session_id: "".to_owned(), suppress: false, token: None, user_id: UserId(3), request_to_speak_timestamp: Some("2021-04-21T22:16:50+0000".to_owned()), }); cache.update(&mutation); assert_eq!(cache.0.members.len(), 1); { let entry = cache.0.users.get(&UserId(3)).unwrap(); assert_eq!(entry.value().1.len(), 1); } assert_eq!( cache.member(GuildId(2), UserId(3)).unwrap().user_id, UserId(3), ); } #[test] fn test_message_create() { let cache = InMemoryCache::builder() .resource_types(ResourceType::MESSAGE | ResourceType::MEMBER | ResourceType::USER) .message_cache_size(1) .build(); let msg = Message { activity: None, application: None, application_id: None, attachments: Vec::new(), author: User { avatar: Some("".to_owned()), bot: false, discriminator: "0001".to_owned(), email: None, flags: None, id: UserId(3), locale: None, mfa_enabled: None, name: "test".to_owned(), premium_type: None, public_flags: None, system: None, verified: None, }, channel_id: ChannelId(2), content: "ping".to_owned(), edited_timestamp: None, embeds: Vec::new(), flags: Some(MessageFlags::empty()), guild_id: Some(GuildId(1)), id: MessageId(4), interaction: None, kind: MessageType::Regular, member: Some(PartialMember { deaf: false, joined_at: None, mute: false, nick: Some("member nick".to_owned()), permissions: None, premium_since: None, roles: Vec::new(), user: None, }), mention_channels: Vec::new(), mention_everyone: false, mention_roles: Vec::new(), mentions: Vec::new(), pinned: false, reactions: Vec::new(), reference: None, stickers: Vec::new(), referenced_message: None, timestamp: String::new(), tts: false, webhook_id: None, }; cache.update(&MessageCreate(msg)); { let entry = cache.0.users.get(&UserId(3)).unwrap(); assert_eq!(entry.value().1.len(), 1); } assert_eq!( cache.member(GuildId(1), UserId(3)).unwrap().user_id, UserId(3), ); { let entry = cache.0.messages.get(&ChannelId(2)).unwrap(); assert_eq!(entry.value().len(), 1); } } #[test] fn test_reaction_add() { let cache = cache_with_message_and_reactions(); let msg = cache.message(ChannelId(2), MessageId(4)).unwrap(); assert_eq!(msg.reactions.len(), 2); let world_react = msg .reactions .iter() .find(|&r| matches!(&r.emoji, ReactionType::Unicode {name} if name == "🗺️")); let smiley_react = msg .reactions .iter() .find(|&r| matches!(&r.emoji, ReactionType::Unicode {name} if name == "😀")); assert!(world_react.is_some()); assert_eq!(world_react.unwrap().count, 1); assert!(smiley_react.is_some()); assert_eq!(smiley_react.unwrap().count, 2); } #[test] fn test_reaction_remove() { let cache = cache_with_message_and_reactions(); cache.update(&ReactionRemove(Reaction { channel_id: ChannelId(2), emoji: ReactionType::Unicode { name: "😀".to_owned(), }, guild_id: Some(GuildId(1)), member: None, message_id: MessageId(4), user_id: UserId(5), })); let msg = cache.message(ChannelId(2), MessageId(4)).unwrap(); assert_eq!(msg.reactions.len(), 2); let world_react = msg .reactions .iter() .find(|&r| matches!(&r.emoji, ReactionType::Unicode {name} if name == "🗺️")); let smiley_react = msg .reactions .iter() .find(|&r| matches!(&r.emoji, ReactionType::Unicode {name} if name == "😀")); assert!(world_react.is_some()); assert_eq!(world_react.unwrap().count, 1); assert!(smiley_react.is_some()); assert_eq!(smiley_react.unwrap().count, 1); } #[test] fn test_reaction_remove_emoji() { let cache = cache_with_message_and_reactions(); cache.update(&ReactionRemoveEmoji { channel_id: ChannelId(2), emoji: PartialEmoji { id: None, name: "😀".to_owned(), }, guild_id: GuildId(1), message_id: MessageId(4), }); let msg = cache.message(ChannelId(2), MessageId(4)).unwrap(); assert_eq!(msg.reactions.len(), 1); let world_react = msg .reactions .iter() .find(|&r| matches!(&r.emoji, ReactionType::Unicode {name} if name == "🗺️")); let smiley_react = msg .reactions .iter() .find(|&r| matches!(&r.emoji, ReactionType::Unicode {name} if name == "😀")); assert!(world_react.is_some()); assert_eq!(world_react.unwrap().count, 1); assert!(smiley_react.is_none()); } #[test] fn test_reaction_remove_all() { let cache = cache_with_message_and_reactions(); cache.update(&ReactionRemoveAll { channel_id: ChannelId(2), message_id: MessageId(4), guild_id: Some(GuildId(1)), }); let msg = cache.message(ChannelId(2), MessageId(4)).unwrap(); assert_eq!(msg.reactions.len(), 0); } #[test] fn test_interaction_create() { let cache = InMemoryCache::new(); cache.update(&InteractionCreate(Interaction::ApplicationCommand( Box::new(ApplicationCommand { application_id: ApplicationId(1), channel_id: ChannelId(2), data: CommandData { id: CommandId(5), name: "command name".into(), options: Vec::new(), resolved: Some(CommandInteractionDataResolved { channels: Vec::new(), members: vec![InteractionMember { hoisted_role: None, id: UserId(7), joined_at: Some("joined at date".into()), nick: None, premium_since: None, roles: vec![RoleId(8)], }], roles: vec![Role { color: 0u32, hoist: false, id: RoleId(8), managed: false, mentionable: true, name: "role name".into(), permissions: Permissions::empty(), position: 2i64, tags: None, }], users: vec![User { avatar: Some("different avatar".into()), bot: false, discriminator: "5678".into(), email: None, flags: None, id: UserId(7), locale: None, mfa_enabled: None, name: "different name".into(), premium_type: None, public_flags: None, system: None, verified: None, }], }), }, guild_id: Some(GuildId(3)), id: InteractionId(4), kind: InteractionType::ApplicationCommand, member: Some(PartialMember { deaf: false, joined_at: Some("joined at".into()), mute: false, nick: None, permissions: Some(Permissions::empty()), premium_since: None, roles: Vec::new(), user: Some(User { avatar: Some("avatar string".into()), bot: false, discriminator: "1234".into(), email: None, flags: None, id: UserId(6), locale: None, mfa_enabled: None, name: "username".into(), premium_type: None, public_flags: None, system: None, verified: None, }), }), token: "token".into(), user: None, }), ))); { let guild_members = cache.guild_members(GuildId(3)).unwrap(); assert_eq!(guild_members.len(), 2); } { let member = cache.member(GuildId(3), UserId(6)).unwrap(); let user = cache.user(member.user_id).unwrap(); assert_eq!(user.avatar.clone().unwrap(), "avatar string"); } { let member = cache.member(GuildId(3), UserId(7)).unwrap(); let user = cache.user(member.user_id).unwrap(); assert_eq!(user.avatar.clone().unwrap(), "different avatar"); } { let guild_roles = cache.guild_roles(GuildId(3)).unwrap(); assert_eq!(guild_roles.len(), 1); } } }
31.818182
99
0.506687
569953e13bf70cb24d899f15fc765d1df717b7ea
9,862
//! A thread-safe reference-counted slice type. use core::{cmp, fmt, ops}; use core::hash::{Hash, Hasher}; use alloc::sync::{Arc, Weak}; use alloc::boxed::Box; /// A reference-counted slice type. /// /// This is exactly like `&[T]` except without lifetimes, so the /// allocation only disappears once all `ArcSlice`s have disappeared. /// /// NB. this can lead to applications effectively leaking memory if a /// short subslice of a long `ArcSlice` is held. /// /// # Examples /// /// ```rust /// use shared_slice::arc::ArcSlice; /// /// let x = ArcSlice::new(Box::new(["foo", "bar", "baz"])); /// println!("{:?}", x); // ["foo", "bar", "baz"] /// println!("{:?}", x.slice(1, 3)); // ["bar", "baz"] /// ``` /// /// Constructing with a dynamic number of elements: /// /// ```rust /// # #![allow(unstable)] /// use shared_slice::arc::ArcSlice; /// /// let n = 5; /// /// let v: Vec<u8> = (0u8..n).collect(); // 0, ..., 4 /// /// let x = ArcSlice::new(v.into_boxed_slice()); /// assert_eq!(&*x, [0, 1, 2, 3, 4]); /// ``` pub struct ArcSlice<T> { data: *const [T], counts: Arc<Box<[T]>>, } unsafe impl<T: Send + Sync> Send for ArcSlice<T> {} unsafe impl<T: Send + Sync> Sync for ArcSlice<T> {} /// A non-owning reference-counted slice type. /// /// This is to `ArcSlice` as `std::sync::Weak` is to `std::sync::Arc`, and /// allows one to have cyclic references without stopping memory from /// being deallocated. pub struct WeakSlice<T> { data: *const [T], counts: Weak<Box<[T]>>, } unsafe impl<T: Send + Sync> Send for WeakSlice<T> {} unsafe impl<T: Send + Sync> Sync for WeakSlice<T> {} impl<T> ArcSlice<T> { /// Construct a new `ArcSlice` containing the elements of `slice`. /// /// This reuses the allocation of `slice`. pub fn new(slice: Box<[T]>) -> ArcSlice<T> { ArcSlice { data: &*slice, counts: Arc::new(slice), } } /// Downgrade self into a weak slice. pub fn downgrade(&self) -> WeakSlice<T> { WeakSlice { data: self.data, counts: Arc::downgrade(&self.counts) } } /// Construct a new `ArcSlice` that only points to elements at /// indices `lo` (inclusive) through `hi` (exclusive). /// /// This consumes `self` to avoid unnecessary reference-count /// modifications. Use `.clone()` if it is necessary to refer to /// `self` after calling this. /// /// # Panics /// /// Panics if `lo > hi` or if either are strictly greater than /// `self.len()`. pub fn slice(mut self, lo: usize, hi: usize) -> ArcSlice<T> { self.data = &self[lo..hi]; self } /// Construct a new `ArcSlice` that only points to elements at /// indices up to `hi` (exclusive). /// /// This consumes `self` to avoid unnecessary reference-count /// modifications. Use `.clone()` if it is necessary to refer to /// `self` after calling this. /// /// # Panics /// /// Panics if `hi > self.len()`. pub fn slice_to(self, hi: usize) -> ArcSlice<T> { self.slice(0, hi) } /// Construct a new `ArcSlice` that only points to elements at /// indices starting at `lo` (inclusive). /// /// This consumes `self` to avoid unnecessary reference-count /// modifications. Use `.clone()` if it is necessary to refer to /// `self` after calling this. /// /// # Panics /// /// Panics if `lo > self.len()`. pub fn slice_from(self, lo: usize) -> ArcSlice<T> { let hi = self.len(); self.slice(lo, hi) } } impl<T> Clone for ArcSlice<T> { fn clone(&self) -> ArcSlice<T> { ArcSlice { data: self.data, counts: self.counts.clone() } } } impl<T> ops::Deref for ArcSlice<T> { type Target = [T]; fn deref<'a>(&'a self) -> &'a [T] { unsafe {&*self.data} } } impl<T> AsRef<[T]> for ArcSlice<T> { fn as_ref(&self) -> &[T] { &**self } } impl<T: PartialEq> PartialEq for ArcSlice<T> { fn eq(&self, other: &ArcSlice<T>) -> bool { **self == **other } fn ne(&self, other: &ArcSlice<T>) -> bool { **self != **other } } impl<T: Eq> Eq for ArcSlice<T> {} impl<T: PartialOrd> PartialOrd for ArcSlice<T> { fn partial_cmp(&self, other: &ArcSlice<T>) -> Option<cmp::Ordering> { (**self).partial_cmp(&**other) } fn lt(&self, other: &ArcSlice<T>) -> bool { **self < **other } fn le(&self, other: &ArcSlice<T>) -> bool { **self <= **other } fn gt(&self, other: &ArcSlice<T>) -> bool { **self > **other } fn ge(&self, other: &ArcSlice<T>) -> bool { **self >= **other } } impl<T: Ord> Ord for ArcSlice<T> { fn cmp(&self, other: &ArcSlice<T>) -> cmp::Ordering { (**self).cmp(&**other) } } impl<T: Hash> Hash for ArcSlice<T> { fn hash<H: Hasher>(&self, state: &mut H) { Hash::hash(&**self, state) } } impl<T: fmt::Debug> fmt::Debug for ArcSlice<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Debug::fmt(&**self, f) } } impl<T> WeakSlice<T> { /// Attempt to upgrade `self` to a strongly-counted `ArcSlice`. /// /// Returns `None` if this is not possible (the data has already /// been freed). pub fn upgrade(&self) -> Option<ArcSlice<T>> { self.counts.upgrade().map(|counts| { ArcSlice { data: self.data, counts: counts } }) } } #[cfg(test)] mod tests { use super::{ArcSlice, WeakSlice}; use std::sync::{Arc, Mutex}; use std::cell::Cell; use std::cmp::Ordering; #[test] fn clone() { let x = ArcSlice::new(Box::new([Cell::new(false)])); let y = x.clone(); assert_eq!(x[0].get(), false); assert_eq!(y[0].get(), false); x[0].set(true); assert_eq!(x[0].get(), true); assert_eq!(y[0].get(), true); } #[test] fn test_upgrade_downgrade() { let x = ArcSlice::new(Box::new([1])); let y: WeakSlice<_> = x.downgrade(); assert_eq!(y.upgrade(), Some(x.clone())); drop(x); assert!(y.upgrade().is_none()) } #[test] fn test_total_cmp() { let x = ArcSlice::new(Box::new([1, 2, 3])); let y = ArcSlice::new(Box::new([1, 2, 3])); let z = ArcSlice::new(Box::new([1, 2, 4])); assert_eq!(x, x); assert_eq!(x, y); assert!(x != z); assert!(y != z); assert!(x < z); assert!(x <= z); assert!(!(x > z)); assert!(!(x >= z)); assert!(!(z < x)); assert!(!(z <= x)); assert!(z > x); assert!(z >= x); assert_eq!(x.partial_cmp(&x), Some(Ordering::Equal)); assert_eq!(x.partial_cmp(&y), Some(Ordering::Equal)); assert_eq!(x.partial_cmp(&z), Some(Ordering::Less)); assert_eq!(z.partial_cmp(&y), Some(Ordering::Greater)); assert_eq!(x.cmp(&x), Ordering::Equal); assert_eq!(x.cmp(&y), Ordering::Equal); assert_eq!(x.cmp(&z), Ordering::Less); assert_eq!(z.cmp(&y), Ordering::Greater); } #[test] fn test_partial_cmp() { use std::f64; let x = ArcSlice::new(Box::new([1.0, f64::NAN])); let y = ArcSlice::new(Box::new([1.0, f64::NAN])); let z = ArcSlice::new(Box::new([2.0, f64::NAN])); let w = ArcSlice::new(Box::new([f64::NAN, 1.0])); assert!(!(x == y)); assert!(x != y); assert!(!(x < y)); assert!(!(x <= y)); assert!(!(x > y)); assert!(!(x >= y)); assert!(x < z); assert!(x <= z); assert!(!(x > z)); assert!(!(x >= z)); assert!(!(z < w)); assert!(!(z <= w)); assert!(!(z > w)); assert!(!(z >= w)); assert_eq!(x.partial_cmp(&x), None); assert_eq!(x.partial_cmp(&y), None); assert_eq!(x.partial_cmp(&z), Some(Ordering::Less)); assert_eq!(z.partial_cmp(&x), Some(Ordering::Greater)); assert_eq!(x.partial_cmp(&w), None); assert_eq!(y.partial_cmp(&w), None); assert_eq!(z.partial_cmp(&w), None); assert_eq!(w.partial_cmp(&w), None); } #[test] fn test_show() { let x = ArcSlice::new(Box::new([1, 2])); assert_eq!(format!("{:?}", x), "[1, 2]"); let y: ArcSlice<i32> = ArcSlice::new(Box::new([])); assert_eq!(format!("{:?}", y), "[]"); } #[test] fn test_slice() { let x = ArcSlice::new(Box::new([1, 2, 3])); let real = [1, 2, 3]; for i in (0..3 + 1) { for j in (i..3 + 1) { let slice: ArcSlice<_> = x.clone().slice(i, j); assert_eq!(&*slice, &real[i..j]); } assert_eq!(&*x.clone().slice_to(i), &real[..i]); assert_eq!(&*x.clone().slice_from(i), &real[i..]); } } #[test] fn test_send_sync() { fn assert_send<T: Send>() {} fn assert_sync<T: Send>() {} assert_send::<ArcSlice<u8>>(); assert_sync::<ArcSlice<u8>>(); assert_send::<WeakSlice<u8>>(); assert_sync::<WeakSlice<u8>>(); } #[test] fn test_drop() { let drop_flag = Arc::new(Mutex::new(0)); struct Foo(Arc<Mutex<i32>>); impl Drop for Foo { fn drop(&mut self) { let mut n = self.0.lock().unwrap(); *n += 1; } } let whole = ArcSlice::new(Box::new([Foo(drop_flag.clone()), Foo(drop_flag.clone())])); drop(whole); assert_eq!(*drop_flag.lock().unwrap(), 2); *drop_flag.lock().unwrap() = 0; let whole = ArcSlice::new(Box::new([Foo(drop_flag.clone()), Foo(drop_flag.clone())])); let part = whole.slice(1, 2); drop(part); assert_eq!(*drop_flag.lock().unwrap(), 2); } }
28.25788
94
0.518759
1ea28ad6f4e88f36aaf022330d3b51a7256bf4e2
36,470
use std::str::FromStr; #[allow(unused_imports)] use tract_itertools::Itertools; use tract_core::internal::*; use tract_core::model::TypedModel; use tract_hir::internal::*; #[cfg(feature = "pulse")] use tract_pulse::internal::*; #[cfg(feature = "tf")] use tract_tensorflow::tfpb::tensorflow::GraphDef; use crate::display_params::DisplayParams; use crate::CliResult; use readings_probe::*; use super::display_params; use super::{info_usage, tensor}; use super::model::Model; use std::convert::*; #[derive(Debug)] pub enum SomeGraphDef { NoGraphDef, #[cfg(feature = "kaldi")] Kaldi(tract_kaldi::KaldiProtoModel), Nnef(tract_nnef::ProtoModel), #[cfg(feature = "onnx")] Onnx(tract_onnx::pb::ModelProto, tract_onnx::model::ParseResult), #[cfg(feature = "tf")] Tf(GraphDef), } #[derive(Debug)] pub struct ModelBuildingError(pub Box<dyn Model>, pub Box<dyn std::error::Error + Send + Sync>); impl std::fmt::Display for ModelBuildingError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { std::fmt::Display::fmt(&*self.1, f) } } impl std::error::Error for ModelBuildingError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { Some(&*self.1) } } #[cfg(not(feature = "pulse"))] type PulsedModel = (); /// Structure holding the parsed parameters. pub struct Parameters { pub graph: SomeGraphDef, pub pulsed_model: Option<Arc<PulsedModel>>, pub tract_model: Arc<dyn Model>, pub reference_model: Option<Arc<dyn Model>>, #[cfg(feature = "conform")] pub tf_model: Option<tract_tensorflow::conform::tf::Tensorflow>, #[cfg(not(feature = "conform"))] #[allow(dead_code)] pub tf_model: (), pub input_values: HashMap<String, Vec<Arc<Tensor>>>, pub assertions: Assertions, pub machine_friendly: bool, pub multiturn: bool, } #[cfg(feature = "tf")] type TfExt = tract_tensorflow::model::TfModelExtensions; #[cfg(not(feature = "tf"))] type TfExt = (); impl Parameters { fn disco_model(matches: &clap::ArgMatches) -> CliResult<(std::path::PathBuf, bool)> { let filename = matches.value_of("model").context("Model argument required")?; let filename = std::path::PathBuf::from(filename); let (filename, onnx_tc) = if !filename.exists() { bail!("model not found: {:?}", filename) } else if std::fs::metadata(&filename)?.is_dir() && filename.join("graph.nnef").exists() { (filename, false) } else if std::fs::metadata(&filename)?.is_dir() && filename.join("model.onnx").exists() { (filename.join("model.onnx"), true) } else { (filename, false) }; Ok((filename, onnx_tc)) } fn load_model( matches: &clap::ArgMatches, probe: Option<&Probe>, filename: &std::path::Path, ) -> CliResult<(SomeGraphDef, Box<dyn Model>, Option<TfExt>)> { let need_graph = matches.is_present("proto") || matches.subcommand_name() == Some("compare-pbdir"); let format = matches.value_of("format").unwrap_or( if filename.extension().map(|s| s == "onnx").unwrap_or(false) { "onnx" } else if filename.extension().map(|s| s == "raw" || s == "txt").unwrap_or(false) { "kaldi" } else if filename.is_dir() || filename.to_string_lossy().ends_with(".tar") || filename.to_string_lossy().ends_with(".tar.gz") || filename.extension().map(|s| s == "tgz").unwrap_or(false) { "nnef" } else { "tf" }, ); let triplet: (SomeGraphDef, Box<dyn Model>, Option<TfExt>) = match format { #[cfg(feature = "kaldi")] "kaldi" => { let kaldi = tract_kaldi::kaldi(); info_usage("loaded framework (kaldi)", probe); let mut graph = kaldi.proto_model_for_path(&filename)?; info_usage("proto model loaded", probe); if let Some(i) = matches.value_of("kaldi_adjust_final_offset") { graph.adjust_final_offset = i.parse()?; } let parsed = kaldi.model_for_proto_model(&graph)?; if need_graph { (SomeGraphDef::Kaldi(graph), Box::new(parsed), Option::<TfExt>::None) } else { (SomeGraphDef::NoGraphDef, Box::new(parsed), Option::<TfExt>::None) } } "nnef" => { let nnef = super::nnef(&matches); let mut file = std::fs::File::open(&filename)?; let proto_model = if filename.to_string_lossy().ends_with("gz") { nnef.proto_model_for_read(&mut flate2::read::GzDecoder::new(file))? } else { nnef.proto_model_for_read(&mut file)? }; info_usage("proto model loaded", probe); if need_graph { ( SomeGraphDef::Nnef(proto_model.clone()), Box::new( nnef.translate(&proto_model) .map_err(|(g, e)| ModelBuildingError(Box::new(g), e.into()))?, ), Option::<TfExt>::None, ) } else { ( SomeGraphDef::NoGraphDef, Box::new( nnef.translate(&proto_model) .map_err(|(g, e)| ModelBuildingError(Box::new(g), e.into()))?, ), Option::<TfExt>::None, ) } } #[cfg(feature = "onnx")] "onnx" => { let onnx = tract_onnx::onnx(); info_usage("loaded framework (onnx)", probe); let graph = onnx.proto_model_for_path(&filename)?; info_usage("proto model loaded", probe); let parsed = onnx.parse(&graph)?; if need_graph { ( SomeGraphDef::Onnx(graph, parsed.clone()), Box::new(parsed.model), Option::<TfExt>::None, ) } else { (SomeGraphDef::NoGraphDef, Box::new(parsed.model), Option::<TfExt>::None) } } #[cfg(feature = "tf")] "tf" => { let tf = tract_tensorflow::tensorflow(); info_usage("loaded framework (tf)", probe); let mut graph = tf.proto_model_for_path(&filename)?; info_usage("proto model loaded", probe); if matches.is_present("determinize") { tract_tensorflow::Tensorflow::determinize(&mut graph)?; } let mut model_and_ext = tf.parse_graph(&graph)?; model_and_ext.1.initializing_nodes = matches .values_of("tf_initializer_output_node") .map(|values| { values .map(|name| model_and_ext.0.node_id_by_name(name)) .collect::<TractResult<Vec<usize>>>() }) .transpose()? .unwrap_or(vec![]); if need_graph { (SomeGraphDef::Tf(graph), Box::new(model_and_ext.0), Some(model_and_ext.1)) } else { (SomeGraphDef::NoGraphDef, Box::new(model_and_ext.0), Some(model_and_ext.1)) } } _ => bail!( "Format {} not supported. You may need to recompile tract with the right features.", format ), }; Ok(triplet) } fn kaldi_downsample<F, O>(raw_model: &mut Graph<F, O>, period: isize) -> CliResult<()> where F: std::fmt::Debug + Clone + Hash + Fact, O: std::fmt::Debug + std::fmt::Display + AsRef<dyn Op> + AsMut<dyn Op> + Clone + Hash, Graph<F, O>: SpecialOps<F, O>, tract_core::ops::Downsample: Into<O>, { if period != 1 { let mut outputs = raw_model.output_outlets()?.to_vec(); let output_name = raw_model.node(outputs[0].node).name.clone(); raw_model.node_mut(outputs[0].node).name = format!("{}-old", output_name); let id = raw_model.wire_node( output_name, tract_core::ops::Downsample::new(0, period as _, 0), &outputs[0..1], )?[0]; outputs[0] = id; raw_model.set_output_outlets(&*outputs)?; } Ok(()) } fn kaldi_context<F, O>(raw_model: &mut Graph<F, O>, left: usize, right: usize) -> CliResult<()> where F: std::fmt::Debug + Clone + Hash + Fact, O: std::fmt::Debug + std::fmt::Display + AsRef<dyn Op> + AsMut<dyn Op> + Clone + Hash, Graph<F, O>: SpecialOps<F, O>, tract_hir::ops::array::Pad: Into<O>, { let op = tract_core::ops::array::Pad::new( vec![(left, right), (0, 0)], tract_core::ops::array::PadMode::Edge, ); let mut patch = ModelPatch::default(); for input in raw_model.input_outlets()? { let tap = patch.tap_model(raw_model, *input)?; let pad = patch.wire_node( format!("{}-pad", raw_model.node(input.node).name), op.clone(), &[tap], )?[0]; patch.shunt_outside(&raw_model, *input, pad)?; } patch.apply(raw_model)?; Ok(()) } fn use_onnx_test_case_data_set<F, O, E>( raw_model: &mut Graph<F, O>, input_values: &mut HashMap<String, Vec<Arc<Tensor>>>, assertions: &mut Assertions, inputs_dir: &std::path::Path, ) -> CliResult<()> where F: std::fmt::Debug + Clone + Hash + Fact + for<'a> TryFrom<&'a InferenceFact, Error = E>, O: std::fmt::Debug + std::fmt::Display + AsRef<dyn Op> + AsMut<dyn Op> + Clone + Hash, Graph<F, O>: SpecialOps<F, O>, E: std::fmt::Debug, { let files = inputs_dir .read_dir()? .map(|file| { let file = file?; let filename = file .file_name() .into_string() .map_err(|s| format_err!("Can't convert OSString to String ({:?})", s))?; let is_input = filename.starts_with("input_"); if is_input || filename.starts_with("output_") { let ix = filename .split("_") .nth(1) .unwrap() .split(".") .nth(0) .unwrap() .parse::<usize>()?; let (name, tensor) = tensor::for_data(file.path().to_str().unwrap())?; Ok(Some(( ix, is_input, filename, name.unwrap_or_else(|| { let nodes = if is_input { raw_model.input_outlets().unwrap() } else { raw_model.output_outlets().unwrap() }; raw_model.node(nodes[0].node).name.clone() }), tensor, ))) } else { Ok(None) } }) .collect::<CliResult<Vec<Option<_>>>>()?; let files = files.into_iter().filter_map(|x| x).collect::<Vec<_>>(); let (inputs, outputs) = files.iter().partition::<Vec<_>, _>(|f| f.1); let inputs = inputs.into_iter().sorted_by_key(|f| f.0).collect::<Vec<_>>(); let outputs = outputs.into_iter().sorted_by_key(|f| f.0).collect::<Vec<_>>(); let input_names = inputs.iter().map(|i| &*i.3).collect::<Vec<_>>(); let output_names = outputs.iter().map(|i| &*i.3).collect::<Vec<_>>(); debug!("input_names from files: {:?}", input_names); debug!("output_names from files: {:?}", output_names); if input_names.iter().all(|n| n.len() > 0) { raw_model.set_input_names(input_names)?; } if output_names.iter().all(|n| n.len() > 0) { raw_model.set_output_names(output_names)?; } for (ix, _, filename, name, tensor) in inputs.into_iter() { debug!("Using {} as input {} ({}): {:?}", filename, ix, name, tensor); if let Some(v) = tensor.value.concretize() { input_values.insert(name.to_string(), vec![v]); } raw_model.set_input_fact(*ix, (&tensor.clone().without_value()).try_into().unwrap())?; } let outputs = outputs .into_iter() .inspect(|(ix, _, filename, name, tensor)| { debug!("Using {} as output {} ({}): {:?}", filename, ix, name, tensor); }) .map(|(_, _, _, _, tensor)| tensor.concretize()) .collect(); assertions.assert_outputs = outputs; Ok(()) } fn inputs<F, O, E>( raw_model: &mut Graph<F, O>, assertions: &mut Assertions, matches: &clap::ArgMatches, filename: &std::path::Path, onnx_tc: bool, ) -> CliResult<HashMap<String, Vec<Arc<Tensor>>>> where F: std::fmt::Debug + Clone + Hash + Fact + for<'a> TryFrom<&'a InferenceFact, Error = E>, O: std::fmt::Debug + std::fmt::Display + AsRef<dyn Op> + AsMut<dyn Op> + Clone + Hash + Send + Sync, Graph<F, O>: SpecialOps<F, O> + Send, tract_core::ops::konst::Const: Into<O>, E: std::fmt::Debug, { let mut input_values = HashMap::new(); if let Some(inputs) = matches.values_of("input") { for (ix, v) in inputs.enumerate() { let (name, t) = tensor::for_string(v)?; let fact = t.clone().without_value(); let fact: F = (&fact).try_into().unwrap(); let outlet = if let Some(name) = name.filter(|s| s.len() > 0) { let node = raw_model.node_by_name(&*name)?; OutletId::new(node.id, 0) } else { raw_model.input_outlets()?[ix] }; if let Some(v) = t.value.concretize() { input_values.insert( raw_model.node(raw_model.inputs[ix].node).name.to_string(), vec![v], ); } if !raw_model.inputs.contains(&outlet) { // shed edges from parents to us for input in raw_model.node(outlet.node).inputs.clone() { raw_model.node_mut(input.node).outputs[input.slot] .successors .retain(|s| s.node != outlet.node); } // clear our inputs and change ourselves to a source raw_model.node_mut(outlet.node).inputs.clear(); raw_model.node_mut(outlet.node).op = raw_model.create_source(fact.clone()) } info!("Input #{}: {:?}", ix, t); raw_model.set_outlet_fact(outlet, fact)?; } } if let Some(bundle) = matches.values_of("input_bundle") { for input in bundle { let mut npz = ndarray_npy::NpzReader::new( std::fs::File::open(input).with_context(|| format!("opening {:?}", input))?, )?; for name in npz.names()? { match tensor::for_npz(&mut npz, &*name) { Ok(t) => debug!("{} contains {}: {:?}", input, name, t), Err(r) => warn!("Could not read {} from {} ({})", name, input, r), } } let input_outlets = raw_model.input_outlets()?.to_vec(); let last_turn = if matches.is_present("multiturn") { npz.names()? .iter() .map(|name| { name.split('/') .nth(0) .unwrap() .split('_') .nth(1) .unwrap() .parse::<usize>() .unwrap() }) .max() .unwrap() } else { 0 }; for (ix, input) in input_outlets.iter().enumerate() { let mut values = vec![]; let name = raw_model.node(input.node).name.clone(); for turn in 0..=last_turn { let filename = if matches.is_present("multiturn") { format!("turn_{}/{}", turn, name) } else { name.to_string() }; let npy_name = format!("{}.npy", filename); if let Ok(t) = tensor::for_npz(&mut npz, &filename) .or_else(|_| tensor::for_npz(&mut npz, &npy_name)) { let shape = t.shape().to_vec(); let fact = InferenceFact::dt_shape(t.datum_type(), shape); raw_model.set_input_fact(ix, (&fact).try_into().unwrap())?; values.push(t.into_arc_tensor()); } } if values.len() > 0 { input_values.insert(name, values); } } } } if onnx_tc { Self::use_onnx_test_case_data_set( raw_model, &mut input_values, assertions, filename.parent().unwrap().join("test_data_set_0").as_path(), )? } if let Some(tc) = matches.value_of("onnx_test_data_set") { Self::use_onnx_test_case_data_set( raw_model, &mut input_values, assertions, &std::path::Path::new(tc), )? } let const_inputs = matches.values_of("const_input").map(|c| c.collect()).unwrap_or(vec![]); for i in (0..raw_model.inputs.len()).rev() { let input = raw_model.inputs[i]; let name = raw_model.node_name(input.node); if const_inputs.contains(&raw_model.node_name(input.node)) { if let Some(v) = input_values.remove(name) { raw_model.node_mut(input.node).op = tract_core::ops::konst::Const::new(v[0].clone()).into(); raw_model.node_mut(input.node).outputs[0].fact = F::try_from(&InferenceFact::from(v[0].clone().into_tensor())).unwrap(); } else { bail!( "Don't have value for input {}, can't make it const", raw_model.node_name(input.node) ); } raw_model.inputs.remove(i); } } Ok(input_values) } #[allow(unused_variables)] fn pipeline( matches: &clap::ArgMatches, probe: Option<&readings_probe::Probe>, raw_model: Box<dyn Model>, tf_model_extensions: Option<TfExt>, reference_stage: Option<&str>, ) -> CliResult<(Arc<dyn Model>, Option<Arc<PulsedModel>>, Option<Arc<dyn Model>>)> { let keep_last = matches.is_present("verbose"); #[cfg(feature = "pulse")] let pulse: Option<usize> = matches.value_of("pulse").map(|s| s.parse::<usize>()).transpose()?; #[cfg(feature = "pulse")] let concretize_stream_dim: Option<usize> = matches.value_of("concretize_stream_dim").map(|s| s.parse()).transpose()?; let stop_at = matches.value_of("pass").unwrap_or(if matches.is_present("optimize") { "optimize" } else { "before-optimize" }); let nnef_cycle = matches.is_present("nnef_cycle"); info!("Will stop at {}", stop_at); if stop_at == "load" { return Ok((raw_model.into(), None, None)); } let mut inference_model: Option<Arc<InferenceModel>> = None; let mut typed_model: Option<Arc<TypedModel>> = None; #[allow(unused_mut)] let mut pulsed_model: Option<Arc<PulsedModel>> = None; let mut reference_model: Option<Arc<dyn Model>> = None; if raw_model.is::<InferenceModel>() { inference_model = Some(raw_model.downcast::<InferenceModel>().unwrap().into()); } else if raw_model.is::<TypedModel>() { typed_model = Some(raw_model.downcast::<TypedModel>().unwrap().into()); } macro_rules! stage { ($name:expr, $from:ident -> $to:ident, $block:expr) => { if let Some(from) = $from.take() { info!(concat!("Running '", $name, "'")); let mut last_model: Option<Box<dyn Model>> = if keep_last { Some(Box::new(from.as_ref().clone())) } else { None }; let block: &dyn Fn(_) -> CliResult<_> = &$block; let owned_model = Arc::try_unwrap(from).unwrap_or_else(|from| from.as_ref().clone()); match block(owned_model) { Ok(it) => { $to = Some(Arc::new(it)); } Err(e) => { if let Some(last_model) = last_model.take() { return Err(ModelBuildingError(last_model, e.into()))?; } else { return Err(e); } } } info_usage(concat!("after ", $name), probe); if reference_stage.as_deref() == Some($name) { reference_model = Some($to.as_ref().unwrap().clone()); } if stop_at == $name { return Ok(( $to.take().expect("returnable model"), pulsed_model, reference_model, )); } } }; } stage!("analyse", inference_model -> inference_model, |mut m:InferenceModel| -> TractResult<_> { let result = m.analyse(matches.is_present("analyse_fail_fast")); match result { Ok(_) => Ok(m), Err(e) => Err(ModelBuildingError(Box::new(m), e.into()).into()) }}); if let Some(ext) = tf_model_extensions { #[cfg(feature = "tf")] stage!("tf-preproc", inference_model -> inference_model, |m:InferenceModel| Ok(ext.preproc(m)?)); } stage!("incorporate", inference_model -> inference_model, |m:InferenceModel| { Ok(m.incorporate()?)}); stage!("type", inference_model -> typed_model, |m:InferenceModel| Ok(m.into_typed()?)); stage!("declutter", typed_model -> typed_model, |m:TypedModel| { let mut dec = tract_core::optim::Optimizer::declutter(); if let Some(steps) = matches.value_of("declutter_step") { dec = dec.stopping_at(steps.parse()?); } dec.optimize(&m) }); #[cfg(feature = "pulse")] { if let Some(dim) = concretize_stream_dim { stage!("concretize-stream-dim", typed_model -> typed_model, |m:TypedModel| Ok(m.concretize_dims(&SymbolValues::default().with(stream_symbol(), dim as _))?)); stage!("concretize-stream-dim-declutter", typed_model -> typed_model, |m:TypedModel| Ok(m.declutter()?)); } else if let Some(pulse) = pulse { stage!("pulse", typed_model -> pulsed_model, |m:TypedModel| Ok(PulsedModel::new(&m, pulse)?)); stage!("pulse-to-type", pulsed_model -> typed_model, |m:PulsedModel| Ok(m.into_typed()?)); stage!("pulse-declutter", typed_model -> typed_model, |m:TypedModel| Ok(m.declutter()?)); } } if nnef_cycle { stage!("nnef-cycle", typed_model -> typed_model, |m:TypedModel| { let nnef = super::nnef(&matches); let mut vec = vec!(); nnef.write(&m, &mut vec)?; Ok(nnef.model_for_read(&mut &*vec)?) }); stage!("nnef-declutter", typed_model -> typed_model, |m:TypedModel| Ok(m.declutter()?)); } if let Some(sub) = matches.value_of("extract_decluttered_sub") { stage!("extract", typed_model -> typed_model, |m:TypedModel| { let node = m.node_id_by_name(sub)?; Ok(m.nested_models(node)[0].1.downcast_ref::<TypedModel>().unwrap().clone()) }); } stage!("before-optimize", typed_model -> typed_model, |m:TypedModel| Ok(m)); stage!("optimize", typed_model -> typed_model, |m:TypedModel| { let mut opt = tract_core::optim::Optimizer::codegen(); if let Some(steps) = matches.value_of("optimize_step") { opt = opt.stopping_at(steps.parse()?); } opt.optimize(&m) }); Ok((typed_model.clone().unwrap(), pulsed_model, reference_model)) } #[allow(unused_variables)] /// Parses the command-line arguments. pub fn from_clap(matches: &clap::ArgMatches, probe: Option<&Probe>) -> CliResult<Parameters> { let (filename, onnx_tc) = Self::disco_model(matches)?; let (mut graph, mut raw_model, tf_model_extensions) = Self::load_model(matches, probe, &filename)?; info!("Model {:?} loaded", filename); info_usage("model loaded", probe); let (need_tensorflow_model, need_reference_model) = match matches.subcommand() { ("compare", Some(sm)) => { if let Some(with) = sm.value_of("stage") { (false, Some(with)) } else { (true, None) } } _ => (false, None), }; #[cfg(not(feature = "conform"))] let tf_model = (); #[cfg(feature = "conform")] let tf_model = if need_tensorflow_model { info!("Tensorflow version: {}", tract_tensorflow::conform::tf::version()); if matches.is_present("determinize") { if let SomeGraphDef::Tf(ref graph) = graph { let graph = graph.write_to_bytes().unwrap(); Some(tract_tensorflow::conform::tf::for_slice(&graph)?) } else { unreachable!() } } else { Some(tract_tensorflow::conform::tf::for_path(&filename)?) } } else { None }; let need_proto = matches.is_present("proto") || (matches.subcommand_matches("compare").map(|sc| sc.is_present("pbdir"))) .unwrap_or(false); if !need_proto { graph = SomeGraphDef::NoGraphDef; } if let Some(inputs) = matches.values_of("input") { let names = inputs .map(|t| Ok(tensor::for_string(t)?.0)) .collect::<CliResult<Vec<Option<String>>>>()?; if names.iter().all(|s| s.is_some() && s.as_ref().unwrap().len() > 0) { let names: Vec<&str> = names.iter().map(|s| &**s.as_ref().unwrap()).collect(); raw_model.set_input_names(&*names)?; } } if let Some(inputs) = matches.values_of("input_node") { let inputs: Vec<&str> = inputs.map(|s| s).collect(); raw_model.set_input_names(&inputs)?; }; if let Some(outputs) = matches.values_of("output_node") { let outputs: Vec<&str> = outputs.map(|s| s).collect(); raw_model.set_output_names(&outputs)?; }; if let Some(override_facts) = matches.values_of("override_fact") { for fact in override_facts { let (name, fact) = tensor::for_string(fact)?; let node = raw_model.node_id_by_name(&name.unwrap())?; if let Some(inf) = raw_model.downcast_mut::<InferenceModel>() { inf.set_outlet_fact(OutletId::new(node, 0), fact)?; } else if let Some(typ) = raw_model.downcast_mut::<TypedModel>() { typ.set_outlet_fact(OutletId::new(node, 0), (&fact).try_into()?)?; } } }; let output_names_and_labels: Vec<Vec<String>> = raw_model .output_outlets() .iter() .map(|o| { let mut v = vec![format!("{}:{}", raw_model.node_name(o.node), o.slot)]; if o.slot == 0 { v.push(raw_model.node_name(o.node).to_string()); } if let Some(l) = raw_model.outlet_label(*o) { v.push(l.to_string()); } v }) .collect(); let mut assertions = Assertions::from_clap(matches, &*output_names_and_labels)?; if let Some(sub) = matches.value_of("kaldi_downsample") { dispatch_model_mut_no_pulse!(raw_model, |m| Self::kaldi_downsample(m, sub.parse()?))?; } if matches.value_of("kaldi_left_context").is_some() || matches.value_of("kaldi_right_context").is_some() { let left = matches.value_of("kaldi_left_context").unwrap_or("0").parse()?; let right = matches.value_of("kaldi_right_context").unwrap_or("0").parse()?; dispatch_model_mut_no_pulse!(raw_model, |m| Self::kaldi_context(m, left, right))?; } let input_values = dispatch_model_mut_no_pulse!(raw_model, |m| Self::inputs( m, &mut assertions, matches, &filename, onnx_tc ))?; if matches.is_present("partial") { if let Some(m) = raw_model.downcast_ref::<InferenceModel>() { raw_model = Box::new(m.compact()?); } else if let Some(m) = raw_model.downcast_ref::<TypedModel>() { raw_model = Box::new(m.compact()?); } } Self::pipeline( matches, probe, raw_model, tf_model_extensions, need_reference_model.as_deref(), ) .map(|(tract_model, pulsed_model, reference_model)| { info!("Model ready"); info_usage("model ready", probe); Parameters { graph, pulsed_model, tract_model, reference_model, tf_model, input_values, assertions, machine_friendly: matches.is_present("machine_friendly"), multiturn: matches.is_present("multiturn"), } }) } } pub struct BenchLimits { pub max_iters: usize, pub max_time: std::time::Duration, } impl BenchLimits { pub fn from_clap(matches: &clap::ArgMatches) -> CliResult<BenchLimits> { let max_iters = matches.value_of("max_iters").map(usize::from_str).transpose()?.unwrap_or(100_000); let max_time = matches .value_of("max-time") .map(u64::from_str) .transpose()? .map(std::time::Duration::from_millis) .unwrap_or(std::time::Duration::from_secs(5)); Ok(BenchLimits { max_iters, max_time }) } } pub fn display_params_from_clap( root_matches: &clap::ArgMatches, matches: &clap::ArgMatches, ) -> CliResult<DisplayParams> { Ok(DisplayParams { konst: matches.is_present("const"), cost: matches.is_present("cost"), profile: matches.is_present("profile"), left_column_width: 0, invariants: matches.is_present("invariants"), quiet: matches.is_present("quiet"), natural_order: matches.is_present("natural-order"), debug_op: matches.is_present("debug-op"), node_ids: matches.values_of("node_id").map(|values| { values.map(|id| tvec!((id.parse::<usize>().unwrap(), "".to_string()))).collect() }), node_name: matches.value_of("node_name").map(String::from), op_name: matches.value_of("op_name").map(String::from), // successors: matches.value_of("successors").map(|id| id.parse().unwrap()), expect_core: root_matches.value_of("pass").unwrap_or("declutter") == "declutter" && !root_matches.is_present("optimize"), outlet_labels: matches.is_present("outlet-labels"), io: if matches.is_present("io-long") { display_params::Io::Long } else if matches.is_present("io-none") { display_params::Io::None } else { display_params::Io::Short }, info: matches.is_present("info"), json: matches.is_present("json"), }) } #[derive(Debug)] pub struct Assertions { pub assert_outputs: Vec<Option<Arc<Tensor>>>, pub assert_output_facts: Option<Vec<InferenceFact>>, } impl Assertions { fn from_clap( matches: &clap::ArgMatches, output_names: &[Vec<String>], ) -> CliResult<Assertions> { if let Some(sub) = matches.subcommand.as_ref().map(|sub| &sub.matches) { let mut assert_outputs: Vec<Option<Arc<Tensor>>> = vec![None; output_names.len()]; if let Some(values) = sub.values_of("assert-output") { for (ix, o) in values.enumerate() { assert_outputs[ix] = tensor::for_string(o).unwrap().1.value.concretize(); } } if let Some(bundles) = sub.values_of("assert-output-bundle") { for bundle in bundles { let mut npz = ndarray_npy::NpzReader::new(std::fs::File::open(bundle)?)?; for (ix, labels) in output_names.iter().enumerate() { for label in labels { if assert_outputs[ix].is_some() { continue; } let npy_name = format!("{}.npy", label); if let Ok(t) = tensor::for_npz(&mut npz, &npy_name) { assert_outputs[ix] = Some(t.into_arc_tensor()) } } } } } if sub.values_of("assert_output").is_some() || sub.values_of("assert-output-bundle").is_some() { if assert_outputs.contains(&None) { bail!("Could not find assertions for all outputs: names and aliases are {:?}, found {:?}", output_names, assert_outputs); } } let assert_output_facts: Option<Vec<InferenceFact>> = matches .values_of("assert-output-fact") .map(|vs| vs.map(|v| tensor::for_string(v).unwrap().1).collect()); Ok(Assertions { assert_outputs, assert_output_facts }) } else { Ok(Assertions { assert_outputs: vec![None; output_names.len()], assert_output_facts: None, }) } } }
40.748603
173
0.490321
c1c9d472a1e336a75be7493181896bd04f1a3a44
1,058
use libloading::Library; use core::{Plugin, PluginRegistrar}; struct Registrar { plugins: Vec<Box<dyn Plugin>>, } impl PluginRegistrar for Registrar { fn register_plugin(&mut self, plugin: Box<dyn Plugin>) { self.plugins.push(plugin); } } fn main() { let mut registrar = Registrar { plugins: Vec::new(), }; for path in std::env::args_os().skip(1) { // In this code, we never close the shared library - if you need to be able to unload the // library, that will require more work. let lib = Box::leak(Box::new(Library::new(path).unwrap())); // NOTE: You need to do something to ensure you're only loading "safe" code. Out of scope // for this code. unsafe { let func: libloading::Symbol<unsafe extern "C" fn(&mut dyn PluginRegistrar) -> ()> = lib.get(b"plugin_entry").unwrap(); func(&mut registrar); } } for plugin in registrar.plugins { plugin.callback1(); dbg!(plugin.callback2(7)); } }
27.842105
97
0.593573
26296706d3e422034766d12456d75c48c06a33cf
3,624
// Copyright 2019 Alex Ostrovski // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Example how to store a `SecretTree` seed in the passphrase-encrypted form and use it //! to derive heterogeneous keys. use ed25519::Keypair; use pwbox::{ rcrypto::{RustCrypto, Scrypt}, Eraser, Suite, }; use rand6::thread_rng; use secret_tree::{Name, SecretTree}; use std::fmt; struct Keys { consensus_keys: Keypair, service_keys: Keypair, other_secrets: Vec<u128>, } impl Keys { pub fn new(tree: &SecretTree) -> Self { let consensus = tree.child(Name::new("consensus")); let service = tree.child(Name::new("service")); let other = tree.child(Name::new("other")); Keys { consensus_keys: Keypair::generate(&mut consensus.rng()), service_keys: Keypair::generate(&mut service.rng()), other_secrets: (0..5) .map(|i| { let mut buffer = [0_u128]; other.index(i).fill(&mut buffer); buffer[0] }) .collect(), } } } impl fmt::Display for Keys { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let mut debug_struct = f.debug_struct("Keys"); debug_struct.field( "consensus", &hex::encode(self.consensus_keys.public.as_bytes()), ); debug_struct.field("service", &hex::encode(self.service_keys.public.as_bytes())); for (i, secret) in self.other_secrets.iter().enumerate() { debug_struct.field(&format!("other/{}", i), secret); } debug_struct.finish() } } fn main() { // Generate a RNG tree randomly. let mut rng = thread_rng(); let tree = SecretTree::new(&mut rng); let keys = Keys::new(&tree); println!("Original keys: {:#}\n", keys); let public_keys = (keys.consensus_keys.public, keys.service_keys.public); // Assume that we have securely persisted the RNG tree (e.g., with passphrase encryption). let passphrase = "correct horse battery staple"; let secured_store = RustCrypto::build_box(&mut rng) .kdf(if cfg!(debug_assertions) { // Ultra-light parameters to get the test run fast in the debug mode. Scrypt::custom(6, 16) } else { Scrypt::default() }) .seal(passphrase, tree.seed()) .unwrap(); drop(tree); let mut eraser = Eraser::new(); eraser.add_suite::<RustCrypto>(); let secured_store = eraser.erase(&secured_store).unwrap(); println!( "Passphrase-encrypted RNG tree (TOML):\n{}", toml::to_string(&secured_store).unwrap() ); // ...Then, we can restore all keys by deserializing the RNG tree. let seed = eraser .restore(&secured_store) .unwrap() .open(passphrase) .unwrap(); let tree = SecretTree::from_seed(&seed).unwrap(); let keys = Keys::new(&tree); assert_eq!( public_keys, (keys.consensus_keys.public, keys.service_keys.public) ); println!("Restored keys: {:#}", keys); }
32.070796
94
0.610375
48b5bcd1d33c517ab4d6438bb1b6f8c71c3011be
6,648
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. pub fn serialize_operation_crate_operation_create_notification_rule( input: &crate::input::CreateNotificationRuleInput, ) -> Result<aws_smithy_http::body::SdkBody, aws_smithy_http::operation::SerializationError> { let mut out = String::new(); let mut object = aws_smithy_json::serialize::JsonObjectWriter::new(&mut out); crate::json_ser::serialize_structure_crate_input_create_notification_rule_input( &mut object, input, )?; object.finish(); Ok(aws_smithy_http::body::SdkBody::from(out)) } pub fn serialize_operation_crate_operation_delete_notification_rule( input: &crate::input::DeleteNotificationRuleInput, ) -> Result<aws_smithy_http::body::SdkBody, aws_smithy_http::operation::SerializationError> { let mut out = String::new(); let mut object = aws_smithy_json::serialize::JsonObjectWriter::new(&mut out); crate::json_ser::serialize_structure_crate_input_delete_notification_rule_input( &mut object, input, )?; object.finish(); Ok(aws_smithy_http::body::SdkBody::from(out)) } pub fn serialize_operation_crate_operation_delete_target( input: &crate::input::DeleteTargetInput, ) -> Result<aws_smithy_http::body::SdkBody, aws_smithy_http::operation::SerializationError> { let mut out = String::new(); let mut object = aws_smithy_json::serialize::JsonObjectWriter::new(&mut out); crate::json_ser::serialize_structure_crate_input_delete_target_input(&mut object, input)?; object.finish(); Ok(aws_smithy_http::body::SdkBody::from(out)) } pub fn serialize_operation_crate_operation_describe_notification_rule( input: &crate::input::DescribeNotificationRuleInput, ) -> Result<aws_smithy_http::body::SdkBody, aws_smithy_http::operation::SerializationError> { let mut out = String::new(); let mut object = aws_smithy_json::serialize::JsonObjectWriter::new(&mut out); crate::json_ser::serialize_structure_crate_input_describe_notification_rule_input( &mut object, input, )?; object.finish(); Ok(aws_smithy_http::body::SdkBody::from(out)) } pub fn serialize_operation_crate_operation_list_event_types( input: &crate::input::ListEventTypesInput, ) -> Result<aws_smithy_http::body::SdkBody, aws_smithy_http::operation::SerializationError> { let mut out = String::new(); let mut object = aws_smithy_json::serialize::JsonObjectWriter::new(&mut out); crate::json_ser::serialize_structure_crate_input_list_event_types_input(&mut object, input)?; object.finish(); Ok(aws_smithy_http::body::SdkBody::from(out)) } pub fn serialize_operation_crate_operation_list_notification_rules( input: &crate::input::ListNotificationRulesInput, ) -> Result<aws_smithy_http::body::SdkBody, aws_smithy_http::operation::SerializationError> { let mut out = String::new(); let mut object = aws_smithy_json::serialize::JsonObjectWriter::new(&mut out); crate::json_ser::serialize_structure_crate_input_list_notification_rules_input( &mut object, input, )?; object.finish(); Ok(aws_smithy_http::body::SdkBody::from(out)) } pub fn serialize_operation_crate_operation_list_tags_for_resource( input: &crate::input::ListTagsForResourceInput, ) -> Result<aws_smithy_http::body::SdkBody, aws_smithy_http::operation::SerializationError> { let mut out = String::new(); let mut object = aws_smithy_json::serialize::JsonObjectWriter::new(&mut out); crate::json_ser::serialize_structure_crate_input_list_tags_for_resource_input( &mut object, input, )?; object.finish(); Ok(aws_smithy_http::body::SdkBody::from(out)) } pub fn serialize_operation_crate_operation_list_targets( input: &crate::input::ListTargetsInput, ) -> Result<aws_smithy_http::body::SdkBody, aws_smithy_http::operation::SerializationError> { let mut out = String::new(); let mut object = aws_smithy_json::serialize::JsonObjectWriter::new(&mut out); crate::json_ser::serialize_structure_crate_input_list_targets_input(&mut object, input)?; object.finish(); Ok(aws_smithy_http::body::SdkBody::from(out)) } pub fn serialize_operation_crate_operation_subscribe( input: &crate::input::SubscribeInput, ) -> Result<aws_smithy_http::body::SdkBody, aws_smithy_http::operation::SerializationError> { let mut out = String::new(); let mut object = aws_smithy_json::serialize::JsonObjectWriter::new(&mut out); crate::json_ser::serialize_structure_crate_input_subscribe_input(&mut object, input)?; object.finish(); Ok(aws_smithy_http::body::SdkBody::from(out)) } pub fn serialize_operation_crate_operation_tag_resource( input: &crate::input::TagResourceInput, ) -> Result<aws_smithy_http::body::SdkBody, aws_smithy_http::operation::SerializationError> { let mut out = String::new(); let mut object = aws_smithy_json::serialize::JsonObjectWriter::new(&mut out); crate::json_ser::serialize_structure_crate_input_tag_resource_input(&mut object, input)?; object.finish(); Ok(aws_smithy_http::body::SdkBody::from(out)) } pub fn serialize_operation_crate_operation_unsubscribe( input: &crate::input::UnsubscribeInput, ) -> Result<aws_smithy_http::body::SdkBody, aws_smithy_http::operation::SerializationError> { let mut out = String::new(); let mut object = aws_smithy_json::serialize::JsonObjectWriter::new(&mut out); crate::json_ser::serialize_structure_crate_input_unsubscribe_input(&mut object, input)?; object.finish(); Ok(aws_smithy_http::body::SdkBody::from(out)) } pub fn serialize_operation_crate_operation_untag_resource( input: &crate::input::UntagResourceInput, ) -> Result<aws_smithy_http::body::SdkBody, aws_smithy_http::operation::SerializationError> { let mut out = String::new(); let mut object = aws_smithy_json::serialize::JsonObjectWriter::new(&mut out); crate::json_ser::serialize_structure_crate_input_untag_resource_input(&mut object, input)?; object.finish(); Ok(aws_smithy_http::body::SdkBody::from(out)) } pub fn serialize_operation_crate_operation_update_notification_rule( input: &crate::input::UpdateNotificationRuleInput, ) -> Result<aws_smithy_http::body::SdkBody, aws_smithy_http::operation::SerializationError> { let mut out = String::new(); let mut object = aws_smithy_json::serialize::JsonObjectWriter::new(&mut out); crate::json_ser::serialize_structure_crate_input_update_notification_rule_input( &mut object, input, )?; object.finish(); Ok(aws_smithy_http::body::SdkBody::from(out)) }
44.61745
97
0.747894
eb3640cfe15cd5fa2351295bbbb8d936ac6fdc1b
22,415
// Copyright 2020 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use { crate::model::{ error::ModelError, realm::{Realm, WeakRealm}, resolver::{ Resolver, ResolverError, ResolverFut, ResolverRegistrationError, ResolverRegistry, }, }, fidl_fuchsia_sys2 as fsys, std::{collections::HashMap, sync::Arc, time::Duration}, thiserror::Error, }; /// A realm's environment, populated from a component's [`EnvironmentDecl`]. /// An environment defines intrinsic behaviors of a component's realm. Components /// can define an environment, but do not interact with it directly. /// /// [`EnvironmentDecl`]: fidl_fuchsia_sys2::EnvironmentDecl pub struct Environment { /// The parent that created or inherited the environment. parent: Option<WeakRealm>, /// The extension mode of this environment. extends: EnvironmentExtends, /// The runners available in this environment. runner_registry: RunnerRegistry, /// The resolvers in this environment, mapped to URL schemes. resolver_registry: ResolverRegistry, /// The deadline for runners to respond to `ComponentController.Stop` calls. stop_timeout: Duration, } pub const DEFAULT_STOP_TIMEOUT: Duration = Duration::from_secs(5); #[derive(Debug, Error, Clone)] pub enum EnvironmentError { #[error( "stop timeout could not be set, environment has no parent and does not specify a value" )] StopTimeoutUnknown, #[error("failed to register resolvers")] ResolverRegistration(#[from] ResolverRegistrationError), } /// How this environment extends its parent's. #[derive(Debug, Clone)] pub enum EnvironmentExtends { /// This environment extends the environment of its parent's. Realm, /// This environment was created from scratch. None, } impl From<fsys::EnvironmentExtends> for EnvironmentExtends { fn from(e: fsys::EnvironmentExtends) -> Self { match e { fsys::EnvironmentExtends::Realm => Self::Realm, fsys::EnvironmentExtends::None => Self::None, } } } impl Environment { /// Creates a new empty environment without a parent. pub fn empty() -> Environment { Environment { parent: None, extends: EnvironmentExtends::None, runner_registry: RunnerRegistry::default(), resolver_registry: ResolverRegistry::new(), stop_timeout: DEFAULT_STOP_TIMEOUT, } } /// Creates a new root environment with a resolver registry and no parent. pub fn new_root( runner_registry: RunnerRegistry, resolver_registry: ResolverRegistry, ) -> Environment { Environment { parent: None, extends: EnvironmentExtends::None, runner_registry, resolver_registry, stop_timeout: DEFAULT_STOP_TIMEOUT, } } /// Creates an environment from `env_decl`, using `parent` as the parent realm. pub fn from_decl( parent: &Arc<Realm>, env_decl: &cm_rust::EnvironmentDecl, ) -> Result<Environment, EnvironmentError> { Ok(Environment { parent: Some(parent.into()), extends: env_decl.extends.into(), runner_registry: RunnerRegistry::from_decl(&env_decl.runners), resolver_registry: ResolverRegistry::from_decl(&env_decl.resolvers, parent)?, stop_timeout: match env_decl.stop_timeout_ms { Some(timeout) => Duration::from_millis(timeout.into()), None => match env_decl.extends { fsys::EnvironmentExtends::Realm => parent.environment.stop_timeout(), fsys::EnvironmentExtends::None => { return Err(EnvironmentError::StopTimeoutUnknown); } }, }, }) } /// Creates a new environment with `parent` as the parent. pub fn new_inheriting(parent: &Arc<Realm>) -> Environment { Environment { parent: Some(parent.into()), extends: EnvironmentExtends::Realm, runner_registry: RunnerRegistry::default(), resolver_registry: ResolverRegistry::new(), stop_timeout: parent.environment.stop_timeout(), } } pub fn stop_timeout(&self) -> Duration { self.stop_timeout } /// Returns the runner registered to `name` and the realm that created the environment the /// runner was registered to (`None` for component manager's realm). Returns `None` if there /// was no match. pub fn get_registered_runner( &self, name: &cm_rust::CapabilityName, ) -> Result<Option<(Option<Arc<Realm>>, RunnerRegistration)>, ModelError> { let parent = self.parent.as_ref().map(|p| p.upgrade()).transpose()?; match self.runner_registry.get_runner(name) { Some(reg) => Ok(Some((parent, reg.clone()))), None => match self.extends { EnvironmentExtends::Realm => { parent.unwrap().environment.get_registered_runner(name) } EnvironmentExtends::None => { return Ok(None); } }, } } } impl Resolver for Environment { fn resolve<'a>(&'a self, component_url: &'a str) -> ResolverFut<'a> { Box::pin(async move { match self.resolver_registry.resolve(component_url).await { Err(ResolverError::SchemeNotRegistered) => match &self.extends { EnvironmentExtends::Realm => { self.parent .as_ref() .unwrap() .upgrade() .map_err(|_| ResolverError::SchemeNotRegistered)? .environment .resolve(component_url) .await } EnvironmentExtends::None => Err(ResolverError::SchemeNotRegistered), }, result => result, } }) } } /// The set of runners available in a realm's environment. /// /// [`RunnerRegistration`]: fidl_fuchsia_sys2::RunnerRegistration pub struct RunnerRegistry { runners: HashMap<cm_rust::CapabilityName, RunnerRegistration>, } impl RunnerRegistry { pub fn default() -> Self { Self { runners: HashMap::new() } } pub fn new(runners: HashMap<cm_rust::CapabilityName, RunnerRegistration>) -> Self { Self { runners } } pub fn from_decl(regs: &Vec<cm_rust::RunnerRegistration>) -> Self { let mut runners = HashMap::new(); for reg in regs { runners.insert( reg.target_name.clone(), RunnerRegistration { source_name: reg.source_name.clone(), source: reg.source.clone(), }, ); } Self { runners } } pub fn get_runner(&self, name: &cm_rust::CapabilityName) -> Option<&RunnerRegistration> { self.runners.get(name) } } /// A single runner registered in an environment. /// /// [`RunnerRegistration`]: fidl_fuchsia_sys2::RunnerRegistration #[derive(Debug, Clone, PartialEq, Eq)] pub struct RunnerRegistration { pub source: cm_rust::RegistrationSource, pub source_name: cm_rust::CapabilityName, } #[cfg(test)] mod tests { use { super::*, crate::model::{ binding::Binder, error::ModelError, model::{Model, ModelParams}, moniker::AbsoluteMoniker, realm::BindReason, testing::{ mocks::MockResolver, test_helpers::{ ChildDeclBuilder, CollectionDeclBuilder, ComponentDeclBuilder, EnvironmentDeclBuilder, }, }, }, fuchsia_async as fasync, maplit::hashmap, matches::assert_matches, std::sync::Weak, }; #[test] fn test_from_decl() { let realm = Arc::new(Realm::new_root_realm( Environment::empty(), Weak::new(), "test:///root".to_string(), )); let environment = Environment::from_decl( &realm, &EnvironmentDeclBuilder::new() .name("env") .extends(fsys::EnvironmentExtends::None) .stop_timeout(1234) .build(), ) .expect("environment construction failed"); assert_matches!(environment.parent, Some(_)); let environment = Environment::from_decl( &realm, &EnvironmentDeclBuilder::new() .name("env") .extends(fsys::EnvironmentExtends::Realm) .build(), ) .expect("environment constuction failed"); assert_matches!(environment.parent, Some(_)); } // Each component declares an environment for their child that inherits from the realm's // environment. The leaf component should be able to access the resolvers of the root realm. #[fasync::run_singlethreaded(test)] async fn test_inherit_root() -> Result<(), ModelError> { let runner_reg = RunnerRegistration { source: cm_rust::RegistrationSource::Parent, source_name: "test-src".into(), }; let runners: HashMap<cm_rust::CapabilityName, RunnerRegistration> = hashmap! { "test".into() => runner_reg.clone() }; let mut resolver = MockResolver::new(); resolver.add_component( "root", ComponentDeclBuilder::new_empty_component() .add_child(ChildDeclBuilder::new().name("a").url("test:///a").environment("env_a")) .add_environment( EnvironmentDeclBuilder::new() .name("env_a") .extends(fsys::EnvironmentExtends::Realm), ) .build(), ); resolver.add_component( "a", ComponentDeclBuilder::new_empty_component() .add_child(ChildDeclBuilder::new().name("b").url("test:///b").environment("env_b")) .add_environment( EnvironmentDeclBuilder::new() .name("env_b") .extends(fsys::EnvironmentExtends::Realm), ) .build(), ); resolver.add_component("b", ComponentDeclBuilder::new_empty_component().build()); let resolvers = { let mut registry = ResolverRegistry::new(); registry.register("test".to_string(), Box::new(resolver)).unwrap(); registry }; let model = Arc::new(Model::new(ModelParams { root_component_url: "test:///root".to_string(), root_environment: Environment::new_root(RunnerRegistry::new(runners), resolvers), namespace_capabilities: vec![], })); let realm = model.bind(&vec!["a:0", "b:0"].into(), &BindReason::Eager).await?; assert_eq!(realm.component_url, "test:///b"); let registered_runner = realm.environment.get_registered_runner(&"test".into()).unwrap(); assert_matches!(registered_runner.as_ref(), Some((None, r)) if r == &runner_reg); assert_matches!(realm.environment.get_registered_runner(&"foo".into()), Ok(None)); Ok(()) } // A component declares an environment that inherits from realm, and the realm's environment // added something that should be available in the component's realm. #[fasync::run_singlethreaded(test)] async fn test_inherit_parent() -> Result<(), ModelError> { let runner_reg = RunnerRegistration { source: cm_rust::RegistrationSource::Parent, source_name: "test-src".into(), }; let runners: HashMap<cm_rust::CapabilityName, RunnerRegistration> = hashmap! { "test".into() => runner_reg.clone() }; let mut resolver = MockResolver::new(); resolver.add_component( "root", ComponentDeclBuilder::new_empty_component() .add_child(ChildDeclBuilder::new().name("a").url("test:///a").environment("env_a")) .add_environment( EnvironmentDeclBuilder::new() .name("env_a") .extends(fsys::EnvironmentExtends::Realm) .add_runner(cm_rust::RunnerRegistration { source: cm_rust::RegistrationSource::Parent, source_name: "test-src".into(), target_name: "test".into(), }), ) .build(), ); resolver.add_component( "a", ComponentDeclBuilder::new_empty_component() .add_child(ChildDeclBuilder::new().name("b").url("test:///b").environment("env_b")) .add_environment( EnvironmentDeclBuilder::new() .name("env_b") .extends(fsys::EnvironmentExtends::Realm), ) .build(), ); resolver.add_component("b", ComponentDeclBuilder::new_empty_component().build()); let resolvers = { let mut registry = ResolverRegistry::new(); registry.register("test".to_string(), Box::new(resolver)).unwrap(); registry }; let model = Arc::new(Model::new(ModelParams { root_component_url: "test:///root".to_string(), root_environment: Environment::new_root(RunnerRegistry::new(runners), resolvers), namespace_capabilities: vec![], })); let realm = model.bind(&vec!["a:0", "b:0"].into(), &BindReason::Eager).await?; assert_eq!(realm.component_url, "test:///b"); let registered_runner = realm.environment.get_registered_runner(&"test".into()).unwrap(); assert_matches!(registered_runner.as_ref(), Some((Some(_), r)) if r == &runner_reg); let parent_moniker = &registered_runner.unwrap().0.unwrap().abs_moniker; assert_eq!(parent_moniker, &AbsoluteMoniker::root()); assert_matches!(realm.environment.get_registered_runner(&"foo".into()), Ok(None)); Ok(()) } // A component in a collection declares an environment that inherits from realm, and the // realm's environment added something that should be available in the component's realm. #[fasync::run_singlethreaded(test)] async fn test_inherit_in_collection() -> Result<(), ModelError> { let runner_reg = RunnerRegistration { source: cm_rust::RegistrationSource::Parent, source_name: "test-src".into(), }; let runners: HashMap<cm_rust::CapabilityName, RunnerRegistration> = hashmap! { "test".into() => runner_reg.clone() }; let mut resolver = MockResolver::new(); resolver.add_component( "root", ComponentDeclBuilder::new_empty_component() .add_child(ChildDeclBuilder::new().name("a").url("test:///a").environment("env_a")) .add_environment( EnvironmentDeclBuilder::new() .name("env_a") .extends(fsys::EnvironmentExtends::Realm) .add_runner(cm_rust::RunnerRegistration { source: cm_rust::RegistrationSource::Parent, source_name: "test-src".into(), target_name: "test".into(), }), ) .build(), ); resolver.add_component( "a", ComponentDeclBuilder::new_empty_component() .add_collection( CollectionDeclBuilder::new_transient_collection("coll").environment("env_b"), ) .add_environment( EnvironmentDeclBuilder::new() .name("env_b") .extends(fsys::EnvironmentExtends::Realm), ) .build(), ); resolver.add_component("b", ComponentDeclBuilder::new_empty_component().build()); let resolvers = { let mut registry = ResolverRegistry::new(); registry.register("test".to_string(), Box::new(resolver)).unwrap(); registry }; let model = Arc::new(Model::new(ModelParams { root_component_url: "test:///root".to_string(), root_environment: Environment::new_root(RunnerRegistry::new(runners), resolvers), namespace_capabilities: vec![], })); // Add instance to collection. { let parent_realm = model.bind(&vec!["a:0"].into(), &BindReason::Eager).await?; let child_decl = ChildDeclBuilder::new_lazy_child("b").build(); parent_realm .add_dynamic_child("coll".into(), &child_decl) .await .expect("failed to add child"); } let realm = model.bind(&vec!["a:0", "coll:b:1"].into(), &BindReason::Eager).await?; assert_eq!(realm.component_url, "test:///b"); let registered_runner = realm.environment.get_registered_runner(&"test".into()).unwrap(); assert_matches!(registered_runner.as_ref(), Some((Some(_), r)) if r == &runner_reg); let parent_moniker = &registered_runner.unwrap().0.unwrap().abs_moniker; assert_eq!(parent_moniker, &AbsoluteMoniker::root()); assert_matches!(realm.environment.get_registered_runner(&"foo".into()), Ok(None)); Ok(()) } // One of the components does not declare or specify an environment for the leaf child. The // leaf child component should still be able to access the resolvers of the root realm, as an // implicit inheriting environment is assumed. #[fasync::run_singlethreaded(test)] async fn test_auto_inheritance() -> Result<(), ModelError> { let runner_reg = RunnerRegistration { source: cm_rust::RegistrationSource::Parent, source_name: "test-src".into(), }; let runners: HashMap<cm_rust::CapabilityName, RunnerRegistration> = hashmap! { "test".into() => runner_reg.clone() }; let mut resolver = MockResolver::new(); resolver.add_component( "root", ComponentDeclBuilder::new_empty_component() .add_child(ChildDeclBuilder::new().name("a").url("test:///a").environment("env_a")) .add_environment( EnvironmentDeclBuilder::new() .name("env_a") .extends(fsys::EnvironmentExtends::Realm), ) .build(), ); resolver.add_component( "a", ComponentDeclBuilder::new_empty_component() .add_child(ChildDeclBuilder::new().name("b").url("test:///b")) .build(), ); resolver.add_component("b", ComponentDeclBuilder::new_empty_component().build()); let resolvers = { let mut registry = ResolverRegistry::new(); registry.register("test".to_string(), Box::new(resolver)).unwrap(); registry }; let model = Arc::new(Model::new(ModelParams { root_component_url: "test:///root".to_string(), root_environment: Environment::new_root(RunnerRegistry::new(runners), resolvers), namespace_capabilities: vec![], })); let realm = model.bind(&vec!["a:0", "b:0"].into(), &BindReason::Eager).await?; assert_eq!(realm.component_url, "test:///b"); let registered_runner = realm.environment.get_registered_runner(&"test".into()).unwrap(); assert_matches!(registered_runner.as_ref(), Some((None, r)) if r == &runner_reg); assert_matches!(realm.environment.get_registered_runner(&"foo".into()), Ok(None)); Ok(()) } // One of the components declares an environment that does not inherit from the realm. This // means that any child components of this component cannot be resolved. #[fasync::run_singlethreaded(test)] async fn test_resolver_no_inheritance() -> Result<(), ModelError> { let mut resolver = MockResolver::new(); resolver.add_component( "root", ComponentDeclBuilder::new_empty_component() .add_child(ChildDeclBuilder::new().name("a").url("test:///a").environment("env_a")) .add_environment( EnvironmentDeclBuilder::new() .name("env_a") .extends(fsys::EnvironmentExtends::Realm), ) .build(), ); resolver.add_component( "a", ComponentDeclBuilder::new_empty_component() .add_child(ChildDeclBuilder::new().name("b").url("test:///b").environment("env_b")) .add_environment( EnvironmentDeclBuilder::new() .name("env_b") .extends(fsys::EnvironmentExtends::None) .stop_timeout(1234), ) .build(), ); resolver.add_component("b", ComponentDeclBuilder::new_empty_component().build()); let registry = { let mut registry = ResolverRegistry::new(); registry.register("test".to_string(), Box::new(resolver)).unwrap(); registry }; let model = Arc::new(Model::new(ModelParams { root_component_url: "test:///root".to_string(), root_environment: Environment::new_root(RunnerRegistry::default(), registry), namespace_capabilities: vec![], })); assert_matches!( model.bind(&vec!["a:0", "b:0"].into(), &BindReason::Eager).await, Err(ModelError::ResolverError { .. }) ); Ok(()) } }
39.602473
99
0.568102
d93ed85ac57eaa264cd77844a772ea34f55c9d0b
23,291
//! Config program use crate::ConfigKeys; use bincode::deserialize; use solana_sdk::{ account::{ReadableAccount, WritableAccount}, feature_set, ic_msg, instruction::InstructionError, keyed_account::keyed_account_at_index, process_instruction::InvokeContext, program_utils::limited_deserialize, pubkey::Pubkey, }; pub fn process_instruction( _program_id: &Pubkey, data: &[u8], invoke_context: &mut dyn InvokeContext, ) -> Result<(), InstructionError> { let keyed_accounts = invoke_context.get_keyed_accounts()?; let key_list: ConfigKeys = limited_deserialize(data)?; let config_keyed_account = &mut keyed_account_at_index(keyed_accounts, 0)?; let current_data: ConfigKeys = { let config_account = config_keyed_account.try_account_ref_mut()?; if invoke_context.is_feature_active(&feature_set::check_program_owner::id()) && config_account.owner() != &crate::id() { return Err(InstructionError::InvalidAccountOwner); } deserialize(&config_account.data()).map_err(|err| { ic_msg!( invoke_context, "Unable to deserialize config account: {}", err ); InstructionError::InvalidAccountData })? }; let current_signer_keys: Vec<Pubkey> = current_data .keys .iter() .filter(|(_, is_signer)| *is_signer) .map(|(pubkey, _)| *pubkey) .collect(); if current_signer_keys.is_empty() { // Config account keypair must be a signer on account initialization, // or when no signers specified in Config data if config_keyed_account.signer_key().is_none() { return Err(InstructionError::MissingRequiredSignature); } } let mut counter = 0; let mut keyed_accounts_iter = keyed_accounts.iter().skip(1); for (signer, _) in key_list.keys.iter().filter(|(_, is_signer)| *is_signer) { counter += 1; if signer != config_keyed_account.unsigned_key() { let signer_account = keyed_accounts_iter.next(); if signer_account.is_none() { ic_msg!( invoke_context, "account {:?} is not in account list", signer ); return Err(InstructionError::MissingRequiredSignature); } let signer_key = signer_account.unwrap().signer_key(); if signer_key.is_none() { ic_msg!( invoke_context, "account {:?} signer_key().is_none()", signer ); return Err(InstructionError::MissingRequiredSignature); } if signer_key.unwrap() != signer { ic_msg!( invoke_context, "account[{:?}].signer_key() does not match Config data)", counter + 1 ); return Err(InstructionError::MissingRequiredSignature); } // If Config account is already initialized, update signatures must match Config data if !current_data.keys.is_empty() && !current_signer_keys.iter().any(|pubkey| pubkey == signer) { ic_msg!( invoke_context, "account {:?} is not in stored signer list", signer ); return Err(InstructionError::MissingRequiredSignature); } } else if config_keyed_account.signer_key().is_none() { ic_msg!(invoke_context, "account[0].signer_key().is_none()"); return Err(InstructionError::MissingRequiredSignature); } } // Check for Config data signers not present in incoming account update if current_signer_keys.len() > counter { ic_msg!( invoke_context, "too few signers: {:?}; expected: {:?}", counter, current_signer_keys.len() ); return Err(InstructionError::MissingRequiredSignature); } if config_keyed_account.data_len()? < data.len() { ic_msg!(invoke_context, "instruction data too large"); return Err(InstructionError::InvalidInstructionData); } config_keyed_account .try_account_ref_mut()? .data_as_mut_slice()[..data.len()] .copy_from_slice(&data); Ok(()) } #[cfg(test)] mod tests { use super::*; use crate::{config_instruction, get_config_data, id, ConfigKeys, ConfigState}; use bincode::serialized_size; use serde_derive::{Deserialize, Serialize}; use solana_sdk::{ account::{Account, AccountSharedData}, keyed_account::create_keyed_accounts_unified, process_instruction::MockInvokeContext, signature::{Keypair, Signer}, system_instruction::SystemInstruction, }; use std::cell::RefCell; #[derive(Serialize, Deserialize, Debug, PartialEq)] struct MyConfig { pub item: u64, } impl Default for MyConfig { fn default() -> Self { Self { item: 123_456_789 } } } impl MyConfig { pub fn new(item: u64) -> Self { Self { item } } pub fn deserialize(input: &[u8]) -> Option<Self> { deserialize(input).ok() } } impl ConfigState for MyConfig { fn max_space() -> u64 { serialized_size(&Self::default()).unwrap() } } fn create_config_account(keys: Vec<(Pubkey, bool)>) -> (Keypair, RefCell<AccountSharedData>) { let from_pubkey = solana_sdk::pubkey::new_rand(); let config_keypair = Keypair::new(); let config_pubkey = config_keypair.pubkey(); let instructions = config_instruction::create_account::<MyConfig>(&from_pubkey, &config_pubkey, 1, keys); let system_instruction = limited_deserialize(&instructions[0].data).unwrap(); let space = match system_instruction { SystemInstruction::CreateAccount { lamports: _, space, owner: _, } => space, _ => panic!("Not a CreateAccount system instruction"), }; let config_account = RefCell::new(AccountSharedData::from(Account { data: vec![0; space as usize], owner: id(), ..Account::default() })); let accounts = vec![(true, false, &config_pubkey, &config_account)]; let keyed_accounts = create_keyed_accounts_unified(&accounts); assert_eq!( process_instruction( &id(), &instructions[1].data, &mut MockInvokeContext::new(keyed_accounts) ), Ok(()) ); (config_keypair, config_account) } #[test] fn test_process_create_ok() { solana_logger::setup(); let keys = vec![]; let (_, config_account) = create_config_account(keys); assert_eq!( Some(MyConfig::default()), deserialize(get_config_data(&config_account.borrow().data()).unwrap()).ok() ); } #[test] fn test_process_store_ok() { solana_logger::setup(); let keys = vec![]; let (config_keypair, config_account) = create_config_account(keys.clone()); let config_pubkey = config_keypair.pubkey(); let my_config = MyConfig::new(42); let instruction = config_instruction::store(&config_pubkey, true, keys, &my_config); let accounts = vec![(true, false, &config_pubkey, &config_account)]; let keyed_accounts = create_keyed_accounts_unified(&accounts); assert_eq!( process_instruction( &id(), &instruction.data, &mut MockInvokeContext::new(keyed_accounts) ), Ok(()) ); assert_eq!( Some(my_config), deserialize(get_config_data(&config_account.borrow().data()).unwrap()).ok() ); } #[test] fn test_process_store_fail_instruction_data_too_large() { solana_logger::setup(); let keys = vec![]; let (config_keypair, config_account) = create_config_account(keys.clone()); let config_pubkey = config_keypair.pubkey(); let my_config = MyConfig::new(42); let mut instruction = config_instruction::store(&config_pubkey, true, keys, &my_config); instruction.data = vec![0; 123]; // <-- Replace data with a vector that's too large let accounts = vec![(true, false, &config_pubkey, &config_account)]; let keyed_accounts = create_keyed_accounts_unified(&accounts); assert_eq!( process_instruction( &id(), &instruction.data, &mut MockInvokeContext::new(keyed_accounts) ), Err(InstructionError::InvalidInstructionData) ); } #[test] fn test_process_store_fail_account0_not_signer() { solana_logger::setup(); let keys = vec![]; let (config_keypair, config_account) = create_config_account(keys); let config_pubkey = config_keypair.pubkey(); let my_config = MyConfig::new(42); let mut instruction = config_instruction::store(&config_pubkey, true, vec![], &my_config); instruction.accounts[0].is_signer = false; // <----- not a signer let accounts = vec![(false, false, &config_pubkey, &config_account)]; let keyed_accounts = create_keyed_accounts_unified(&accounts); assert_eq!( process_instruction( &id(), &instruction.data, &mut MockInvokeContext::new(keyed_accounts) ), Err(InstructionError::MissingRequiredSignature) ); } #[test] fn test_process_store_with_additional_signers() { solana_logger::setup(); let pubkey = solana_sdk::pubkey::new_rand(); let signer0_pubkey = solana_sdk::pubkey::new_rand(); let signer1_pubkey = solana_sdk::pubkey::new_rand(); let keys = vec![ (pubkey, false), (signer0_pubkey, true), (signer1_pubkey, true), ]; let (config_keypair, config_account) = create_config_account(keys.clone()); let config_pubkey = config_keypair.pubkey(); let my_config = MyConfig::new(42); let instruction = config_instruction::store(&config_pubkey, true, keys.clone(), &my_config); let signer0_account = RefCell::new(AccountSharedData::default()); let signer1_account = RefCell::new(AccountSharedData::default()); let accounts = vec![ (true, false, &config_pubkey, &config_account), (true, false, &signer0_pubkey, &signer0_account), (true, false, &signer1_pubkey, &signer1_account), ]; let keyed_accounts = create_keyed_accounts_unified(&accounts); assert_eq!( process_instruction( &id(), &instruction.data, &mut MockInvokeContext::new(keyed_accounts) ), Ok(()) ); let meta_data: ConfigKeys = deserialize(&config_account.borrow().data()).unwrap(); assert_eq!(meta_data.keys, keys); assert_eq!( Some(my_config), deserialize(get_config_data(&config_account.borrow().data()).unwrap()).ok() ); } #[test] fn test_process_store_without_config_signer() { solana_logger::setup(); let pubkey = solana_sdk::pubkey::new_rand(); let signer0_pubkey = solana_sdk::pubkey::new_rand(); let keys = vec![(pubkey, false), (signer0_pubkey, true)]; let (config_keypair, _) = create_config_account(keys.clone()); let config_pubkey = config_keypair.pubkey(); let my_config = MyConfig::new(42); let instruction = config_instruction::store(&config_pubkey, false, keys, &my_config); let signer0_account = RefCell::new(AccountSharedData::from(Account { owner: id(), ..Account::default() })); let accounts = vec![(true, false, &signer0_pubkey, &signer0_account)]; let keyed_accounts = create_keyed_accounts_unified(&accounts); assert_eq!( process_instruction( &id(), &instruction.data, &mut MockInvokeContext::new(keyed_accounts) ), Err(InstructionError::InvalidAccountData) ); } #[test] fn test_process_store_with_bad_additional_signer() { solana_logger::setup(); let signer0_pubkey = solana_sdk::pubkey::new_rand(); let signer1_pubkey = solana_sdk::pubkey::new_rand(); let signer0_account = RefCell::new(AccountSharedData::default()); let signer1_account = RefCell::new(AccountSharedData::default()); let keys = vec![(signer0_pubkey, true)]; let (config_keypair, config_account) = create_config_account(keys.clone()); let config_pubkey = config_keypair.pubkey(); let my_config = MyConfig::new(42); let instruction = config_instruction::store(&config_pubkey, true, keys, &my_config); // Config-data pubkey doesn't match signer let accounts = vec![ (true, false, &config_pubkey, &config_account), (true, false, &signer1_pubkey, &signer1_account), ]; let keyed_accounts = create_keyed_accounts_unified(&accounts); assert_eq!( process_instruction( &id(), &instruction.data, &mut MockInvokeContext::new(keyed_accounts) ), Err(InstructionError::MissingRequiredSignature) ); // Config-data pubkey not a signer let accounts = vec![ (true, false, &config_pubkey, &config_account), (false, false, &signer0_pubkey, &signer0_account), ]; let keyed_accounts = create_keyed_accounts_unified(&accounts); assert_eq!( process_instruction( &id(), &instruction.data, &mut MockInvokeContext::new(keyed_accounts) ), Err(InstructionError::MissingRequiredSignature) ); } #[test] fn test_config_updates() { solana_logger::setup(); let pubkey = solana_sdk::pubkey::new_rand(); let signer0_pubkey = solana_sdk::pubkey::new_rand(); let signer1_pubkey = solana_sdk::pubkey::new_rand(); let signer2_pubkey = solana_sdk::pubkey::new_rand(); let signer0_account = RefCell::new(AccountSharedData::default()); let signer1_account = RefCell::new(AccountSharedData::default()); let signer2_account = RefCell::new(AccountSharedData::default()); let keys = vec![ (pubkey, false), (signer0_pubkey, true), (signer1_pubkey, true), ]; let (config_keypair, config_account) = create_config_account(keys.clone()); let config_pubkey = config_keypair.pubkey(); let my_config = MyConfig::new(42); let instruction = config_instruction::store(&config_pubkey, true, keys.clone(), &my_config); let accounts = vec![ (true, false, &config_pubkey, &config_account), (true, false, &signer0_pubkey, &signer0_account), (true, false, &signer1_pubkey, &signer1_account), ]; let keyed_accounts = create_keyed_accounts_unified(&accounts); assert_eq!( process_instruction( &id(), &instruction.data, &mut MockInvokeContext::new(keyed_accounts) ), Ok(()) ); // Update with expected signatures let new_config = MyConfig::new(84); let instruction = config_instruction::store(&config_pubkey, false, keys.clone(), &new_config); let accounts = vec![ (false, false, &config_pubkey, &config_account), (true, false, &signer0_pubkey, &signer0_account), (true, false, &signer1_pubkey, &signer1_account), ]; let keyed_accounts = create_keyed_accounts_unified(&accounts); assert_eq!( process_instruction( &id(), &instruction.data, &mut MockInvokeContext::new(keyed_accounts) ), Ok(()) ); let meta_data: ConfigKeys = deserialize(&config_account.borrow().data()).unwrap(); assert_eq!(meta_data.keys, keys); assert_eq!( new_config, MyConfig::deserialize(get_config_data(&config_account.borrow().data()).unwrap()) .unwrap() ); // Attempt update with incomplete signatures let keys = vec![(pubkey, false), (signer0_pubkey, true)]; let instruction = config_instruction::store(&config_pubkey, false, keys, &my_config); let accounts = vec![ (false, false, &config_pubkey, &config_account), (true, false, &signer0_pubkey, &signer0_account), (false, false, &signer1_pubkey, &signer1_account), ]; let keyed_accounts = create_keyed_accounts_unified(&accounts); assert_eq!( process_instruction( &id(), &instruction.data, &mut MockInvokeContext::new(keyed_accounts) ), Err(InstructionError::MissingRequiredSignature) ); // Attempt update with incorrect signatures let keys = vec![ (pubkey, false), (signer0_pubkey, true), (signer2_pubkey, true), ]; let instruction = config_instruction::store(&config_pubkey, false, keys, &my_config); let accounts = vec![ (false, false, &config_pubkey, &config_account), (true, false, &signer0_pubkey, &signer0_account), (true, false, &signer2_pubkey, &signer2_account), ]; let keyed_accounts = create_keyed_accounts_unified(&accounts); assert_eq!( process_instruction( &id(), &instruction.data, &mut MockInvokeContext::new(keyed_accounts) ), Err(InstructionError::MissingRequiredSignature) ); } #[test] fn test_config_updates_requiring_config() { solana_logger::setup(); let pubkey = solana_sdk::pubkey::new_rand(); let signer0_pubkey = solana_sdk::pubkey::new_rand(); let signer0_account = RefCell::new(AccountSharedData::default()); let keys = vec![ (pubkey, false), (signer0_pubkey, true), (signer0_pubkey, true), ]; // Dummy keys for account sizing let (config_keypair, config_account) = create_config_account(keys); let config_pubkey = config_keypair.pubkey(); let my_config = MyConfig::new(42); let keys = vec![ (pubkey, false), (signer0_pubkey, true), (config_keypair.pubkey(), true), ]; let instruction = config_instruction::store(&config_pubkey, true, keys.clone(), &my_config); let accounts = vec![ (true, false, &config_pubkey, &config_account), (true, false, &signer0_pubkey, &signer0_account), ]; let keyed_accounts = create_keyed_accounts_unified(&accounts); assert_eq!( process_instruction( &id(), &instruction.data, &mut MockInvokeContext::new(keyed_accounts) ), Ok(()) ); // Update with expected signatures let new_config = MyConfig::new(84); let instruction = config_instruction::store(&config_pubkey, true, keys.clone(), &new_config); let accounts = vec![ (true, false, &config_pubkey, &config_account), (true, false, &signer0_pubkey, &signer0_account), ]; let keyed_accounts = create_keyed_accounts_unified(&accounts); assert_eq!( process_instruction( &id(), &instruction.data, &mut MockInvokeContext::new(keyed_accounts) ), Ok(()) ); let meta_data: ConfigKeys = deserialize(&config_account.borrow().data()).unwrap(); assert_eq!(meta_data.keys, keys); assert_eq!( new_config, MyConfig::deserialize(get_config_data(&config_account.borrow().data()).unwrap()) .unwrap() ); // Attempt update with incomplete signatures let keys = vec![(pubkey, false), (config_keypair.pubkey(), true)]; let instruction = config_instruction::store(&config_pubkey, true, keys, &my_config); let accounts = vec![(true, false, &config_pubkey, &config_account)]; let keyed_accounts = create_keyed_accounts_unified(&accounts); assert_eq!( process_instruction( &id(), &instruction.data, &mut MockInvokeContext::new(keyed_accounts) ), Err(InstructionError::MissingRequiredSignature) ); } #[test] fn test_config_initialize_no_panic() { let from_pubkey = solana_sdk::pubkey::new_rand(); let config_pubkey = solana_sdk::pubkey::new_rand(); let instructions = config_instruction::create_account::<MyConfig>(&from_pubkey, &config_pubkey, 1, vec![]); let accounts = vec![]; let keyed_accounts = create_keyed_accounts_unified(&accounts); assert_eq!( process_instruction( &id(), &instructions[1].data, &mut MockInvokeContext::new(keyed_accounts) ), Err(InstructionError::NotEnoughAccountKeys) ); } #[test] fn test_config_bad_owner() { let from_pubkey = solana_sdk::pubkey::new_rand(); let config_pubkey = solana_sdk::pubkey::new_rand(); let new_config = MyConfig::new(84); let signer0_pubkey = solana_sdk::pubkey::new_rand(); let signer0_account = RefCell::new(AccountSharedData::default()); let config_account = RefCell::new(AccountSharedData::default()); let keys = vec![ (from_pubkey, false), (signer0_pubkey, true), (config_pubkey, true), ]; let instruction = config_instruction::store(&config_pubkey, true, keys, &new_config); let accounts = vec![ (true, false, &config_pubkey, &config_account), (true, false, &signer0_pubkey, &signer0_account), ]; let keyed_accounts = create_keyed_accounts_unified(&accounts); assert_eq!( process_instruction( &id(), &instruction.data, &mut MockInvokeContext::new(keyed_accounts), ), Err(InstructionError::InvalidAccountOwner) ); } }
37.566129
100
0.580954
5041dd58a935abeea398b97a83ec40f1c96b1702
14,037
// LNP/BP client-side-validation foundation libraries implementing LNPBP // specifications & standards (LNPBP-4, 7, 8, 9, 42, 81) // // Written in 2019-2021 by // Dr. Maxim Orlovsky <[email protected]> // // To the extent possible under law, the author(s) have dedicated all // copyright and related and neighboring rights to this software to // the public domain worldwide. This software is distributed without // any warranty. // // You should have received a copy of the Apache 2.0 License along with this // software. If not, see <https://opensource.org/licenses/Apache-2.0>. use amplify::proc_attr::ParametrizedAttr; use proc_macro2::{Span, TokenStream as TokenStream2}; use quote::{ToTokens, TokenStreamExt}; use syn::spanned::Spanned; use syn::{ Data, DataEnum, DataStruct, DeriveInput, Error, Field, Fields, Ident, ImplGenerics, Index, LitStr, Result, TypeGenerics, WhereClause, }; use crate::param::{EncodingDerive, TlvDerive, CRATE, REPR, USE_TLV}; use crate::TlvEncoding; /// Performs actual derivation of the decode trait using the provided /// information about trait parameters and requirements for TLV support (see /// [`TlvEncoding`] description). /// /// You will find example of the function use in the /// [crate top-level documentation][crate]. pub fn decode_derive( attr_name: &'static str, crate_name: Ident, trait_name: Ident, decode_name: Ident, deserialize_name: Ident, input: DeriveInput, tlv_encoding: TlvEncoding, ) -> Result<TokenStream2> { let (impl_generics, ty_generics, where_clause) = input.generics.split_for_impl(); let ident_name = &input.ident; let global_param = ParametrizedAttr::with(attr_name, &input.attrs)?; match input.data { Data::Struct(data) => decode_struct_impl( attr_name, &crate_name, &trait_name, &decode_name, &deserialize_name, data, ident_name, global_param, impl_generics, ty_generics, where_clause, tlv_encoding, ), Data::Enum(data) => decode_enum_impl( attr_name, &crate_name, &trait_name, &decode_name, &deserialize_name, data, ident_name, global_param, impl_generics, ty_generics, where_clause, ), Data::Union(_) => Err(Error::new_spanned( &input, format!("Deriving `{}` is not supported in unions", trait_name), )), } } #[allow(clippy::too_many_arguments)] fn decode_struct_impl( attr_name: &'static str, crate_name: &Ident, trait_name: &Ident, decode_name: &Ident, deserialize_name: &Ident, data: DataStruct, ident_name: &Ident, mut global_param: ParametrizedAttr, impl_generics: ImplGenerics, ty_generics: TypeGenerics, where_clause: Option<&WhereClause>, tlv_encoding: TlvEncoding, ) -> Result<TokenStream2> { let encoding = EncodingDerive::with( &mut global_param, crate_name, true, false, false, )?; if tlv_encoding == TlvEncoding::Denied && encoding.tlv.is_some() { return Err(Error::new( ident_name.span(), format!("TLV extensions are not allowed in `{}`", attr_name), )); } let inner_impl = match data.fields { Fields::Named(ref fields) => decode_fields_impl( attr_name, crate_name, trait_name, decode_name, deserialize_name, ident_name, &fields.named, global_param, false, tlv_encoding, )?, Fields::Unnamed(ref fields) => decode_fields_impl( attr_name, crate_name, trait_name, decode_name, deserialize_name, ident_name, &fields.unnamed, global_param, false, tlv_encoding, )?, Fields::Unit => quote! {}, }; let import = encoding.use_crate; Ok(quote! { impl #impl_generics #import::#trait_name for #ident_name #ty_generics #where_clause { #[inline] fn #decode_name<D: ::std::io::Read>(mut d: D) -> ::core::result::Result<Self, #import::Error> { use #import::#trait_name; #inner_impl } } }) } #[allow(clippy::too_many_arguments)] fn decode_enum_impl( attr_name: &'static str, crate_name: &Ident, trait_name: &Ident, decode_name: &Ident, deserialize_name: &Ident, data: DataEnum, ident_name: &Ident, mut global_param: ParametrizedAttr, impl_generics: ImplGenerics, ty_generics: TypeGenerics, where_clause: Option<&WhereClause>, ) -> Result<TokenStream2> { let encoding = EncodingDerive::with(&mut global_param, crate_name, true, true, false)?; let repr = encoding.repr; let mut inner_impl = TokenStream2::new(); for (order, variant) in data.variants.iter().enumerate() { let mut local_param = ParametrizedAttr::with(attr_name, &variant.attrs)?; // First, test individual attribute let _ = EncodingDerive::with( &mut local_param, crate_name, false, true, false, )?; // Second, combine global and local together let mut combined = global_param.clone().merged(local_param.clone())?; combined.args.remove(REPR); combined.args.remove(CRATE); let encoding = EncodingDerive::with( &mut combined, crate_name, false, true, false, )?; if encoding.skip { continue; } let field_impl = match variant.fields { Fields::Named(ref fields) => decode_fields_impl( attr_name, crate_name, trait_name, decode_name, deserialize_name, ident_name, &fields.named, local_param, true, TlvEncoding::Denied, )?, Fields::Unnamed(ref fields) => decode_fields_impl( attr_name, crate_name, trait_name, decode_name, deserialize_name, ident_name, &fields.unnamed, local_param, true, TlvEncoding::Denied, )?, Fields::Unit => TokenStream2::new(), }; let ident = &variant.ident; let value = match (encoding.value, encoding.by_order) { (Some(val), _) => val.to_token_stream(), (None, true) => Index::from(order as usize).to_token_stream(), (None, false) => quote! { Self::#ident as #repr }, }; inner_impl.append_all(quote_spanned! { variant.span() => x if x == #value => { Self::#ident { #field_impl } } }); } let import = encoding.use_crate; let enum_name = LitStr::new(&ident_name.to_string(), Span::call_site()); Ok(quote! { impl #impl_generics #import::#trait_name for #ident_name #ty_generics #where_clause { fn #decode_name<D: ::std::io::Read>(mut d: D) -> ::core::result::Result<Self, #import::Error> { use #import::#trait_name; Ok(match #repr::#decode_name(&mut d)? { #inner_impl unknown => Err(#import::Error::EnumValueNotKnown(#enum_name, unknown as usize))? }) } } }) } #[allow(clippy::too_many_arguments)] fn decode_fields_impl<'a>( attr_name: &'static str, crate_name: &Ident, trait_name: &Ident, decode_name: &Ident, deserialize_name: &Ident, ident_name: &Ident, fields: impl IntoIterator<Item = &'a Field>, mut parent_param: ParametrizedAttr, is_enum: bool, tlv_encoding: TlvEncoding, ) -> Result<TokenStream2> { let mut stream = TokenStream2::new(); let use_tlv = parent_param.args.contains_key(USE_TLV); parent_param.args.remove(CRATE); parent_param.args.remove(USE_TLV); let parent_attr = EncodingDerive::with( &mut parent_param.clone(), crate_name, false, is_enum, false, )?; let import = parent_attr.use_crate; let mut skipped_fields = vec![]; let mut strict_fields = vec![]; let mut tlv_fields = bmap! {}; let mut tlv_aggregator = None; for (index, field) in fields.into_iter().enumerate() { let mut local_param = ParametrizedAttr::with(attr_name, &field.attrs)?; // First, test individual attribute let _ = EncodingDerive::with( &mut local_param, crate_name, false, is_enum, use_tlv, )?; // Second, combine global and local together let mut combined = parent_param.clone().merged(local_param)?; let encoding = EncodingDerive::with( &mut combined, crate_name, false, is_enum, use_tlv, )?; let name = field .ident .as_ref() .map(Ident::to_token_stream) .unwrap_or_else(|| Index::from(index).to_token_stream()); if encoding.skip { skipped_fields.push(name); continue; } encoding.tlv.unwrap_or(TlvDerive::None).process( field, name, &mut strict_fields, &mut tlv_fields, &mut tlv_aggregator, )?; } for name in strict_fields { stream.append_all(quote_spanned! { Span::call_site() => #name: #import::#trait_name::#decode_name(&mut d)?, }); } let mut default_fields = skipped_fields; default_fields.extend(tlv_fields.values().map(|(n, _)| n).cloned()); default_fields.extend(tlv_aggregator.clone()); for name in default_fields { stream.append_all(quote_spanned! { Span::call_site() => #name: Default::default(), }); } if !is_enum { if use_tlv { let mut inner = TokenStream2::new(); for (type_no, (name, optional)) in tlv_fields { if optional { inner.append_all(quote_spanned! { Span::call_site() => #type_no => s.#name = Some(#import::#trait_name::#deserialize_name(bytes)?), }); } else { inner.append_all(quote_spanned! { Span::call_site() => #type_no => s.#name = #import::#trait_name::#deserialize_name(bytes)?, }); } } let aggregator = if let Some(ref tlv_aggregator) = tlv_aggregator { quote_spanned! { Span::call_site() => _ if type_no % 2 == 0 => return Err(#import::TlvError::UnknownEvenType(type_no).into()), _ => { s.#tlv_aggregator.insert(type_no, bytes); }, } } else { quote_spanned! { Span::call_site() => _ if type_no % 2 == 0 => return Err(#import::TlvError::UnknownEvenType(type_no).into()), _ => {} } }; stream = match tlv_encoding { TlvEncoding::Count => quote_spanned! { Span::call_site() => let mut s = #ident_name { #stream }; let tlvs = ::std::collections::BTreeMap::<usize, Box<[u8]>>::#decode_name(&mut d)?; }, TlvEncoding::Length => quote_spanned! { Span::call_site() => let mut s = #ident_name { #stream }; let mut tlvs: ::std::collections::BTreeMap<usize, Box<[u8]>> = Default::default(); let data = Box::<[u8]>::#decode_name(&mut d)?; let iter = data.into_iter(); while iter.len() > 0 { let type_no = usize::#decode_name(&mut d)?; let len = usize::#decode_name(&mut d)?; let bytes: Box<[u8]> = iter.clone().take(len).copied().collect(); let max = tlvs.keys().max().copied().unwrap_or_default(); if type_no > max { return Err(#import::TlvError::Order { read: type_no, max }.into()); } if bytes.len() != len { return Err(#import::TlvError::Len { expected: len, actual: bytes.len() }.into()); } if tlvs.insert(type_no, bytes).is_some() { return Err(#import::TlvError::Repeated(type_no).into()); } } }, TlvEncoding::Denied => unreachable!( "denied TLV encoding is already checked in the caller \ method" ), }; stream.append_all(quote_spanned! { Span::call_site() => for (type_no, bytes) in tlvs { match type_no { #inner #aggregator } } Ok(s) }); } else { stream = quote_spanned! { Span::call_site() => Ok(#ident_name { #stream }) }; } } Ok(stream) }
32.194954
108
0.521479
ccd7c7cba53ce18aa2c5bcd3905bf019dbb22d84
6,759
extern crate tokio; extern crate env_logger; extern crate futures; use futures::sync::oneshot; use std::sync::{Arc, Mutex}; use std::thread; use tokio::io; use tokio::net::{TcpStream, TcpListener}; use tokio::prelude::future::lazy; use tokio::prelude::*; use tokio::runtime::Runtime; macro_rules! t { ($e:expr) => (match $e { Ok(e) => e, Err(e) => panic!("{} failed with {:?}", stringify!($e), e), }) } fn create_client_server_future() -> Box<Future<Item=(), Error=()> + Send> { let server = t!(TcpListener::bind(&"127.0.0.1:0".parse().unwrap())); let addr = t!(server.local_addr()); let client = TcpStream::connect(&addr); let server = server.incoming().take(1) .map_err(|e| panic!("accept err = {:?}", e)) .for_each(|socket| { tokio::spawn({ io::write_all(socket, b"hello") .map(|_| ()) .map_err(|e| panic!("write err = {:?}", e)) }) }) .map(|_| ()); let client = client .map_err(|e| panic!("connect err = {:?}", e)) .and_then(|client| { // Read all io::read_to_end(client, vec![]) .map(|_| ()) .map_err(|e| panic!("read err = {:?}", e)) }); let future = server.join(client) .map(|_| ()); Box::new(future) } #[test] fn runtime_tokio_run() { let _ = env_logger::init(); tokio::run(create_client_server_future()); } #[test] fn runtime_single_threaded() { let _ = env_logger::init(); let mut runtime = tokio::runtime::current_thread::Runtime::new() .unwrap(); runtime.block_on(create_client_server_future()).unwrap(); runtime.run().unwrap(); } #[test] fn runtime_single_threaded_block_on() { let _ = env_logger::init(); tokio::runtime::current_thread::block_on_all(create_client_server_future()).unwrap(); } #[test] fn runtime_single_threaded_block_on_all() { let cnt = Arc::new(Mutex::new(0)); let c = cnt.clone(); let msg = tokio::runtime::current_thread::block_on_all(lazy(move || { { let mut x = c.lock().unwrap(); *x = 1 + *x; } // Spawn! tokio::spawn(lazy(move || { { let mut x = c.lock().unwrap(); *x = 1 + *x; } Ok::<(), ()>(()) })); Ok::<_, ()>("hello") })).unwrap(); assert_eq!(2, *cnt.lock().unwrap()); assert_eq!(msg, "hello"); } #[test] fn runtime_single_threaded_racy_spawn() { let (trigger, exit) = futures::sync::oneshot::channel(); let (handle_tx, handle_rx) = ::std::sync::mpsc::channel(); let jh = ::std::thread::spawn(move || { let mut rt = tokio::runtime::current_thread::Runtime::new().unwrap(); handle_tx.send(rt.handle()).unwrap(); // don't exit until we are told to rt.block_on(exit.map_err(|_| ())).unwrap(); // run until all spawned futures (incl. the "exit" signal future) have completed. rt.run().unwrap(); }); let (tx, rx) = futures::sync::oneshot::channel(); let handle = handle_rx.recv().unwrap(); handle .spawn(futures::future::lazy(move || { tx.send(()).unwrap(); Ok(()) })) .unwrap(); // signal runtime thread to exit trigger.send(()).unwrap(); // wait for runtime thread to exit jh.join().unwrap(); assert_eq!(rx.wait().unwrap(), ()); } #[test] fn runtime_multi_threaded() { let _ = env_logger::init(); let mut runtime = tokio::runtime::Builder::new() .build() .unwrap(); runtime.spawn(create_client_server_future()); runtime.shutdown_on_idle().wait().unwrap(); } #[test] fn block_on_timer() { use std::time::{Duration, Instant}; use tokio::timer::{Delay, Error}; fn after_1s<T>(x: T) -> Box<Future<Item = T, Error = Error> + Send> where T: Send + 'static, { Box::new(Delay::new(Instant::now() + Duration::from_millis(100)).map(move |_| x)) } let mut runtime = Runtime::new().unwrap(); assert_eq!(runtime.block_on(after_1s(42)).unwrap(), 42); runtime.shutdown_on_idle().wait().unwrap(); } #[test] fn spawn_from_block_on() { let cnt = Arc::new(Mutex::new(0)); let c = cnt.clone(); let mut runtime = Runtime::new().unwrap(); let msg = runtime .block_on(lazy(move || { { let mut x = c.lock().unwrap(); *x = 1 + *x; } // Spawn! tokio::spawn(lazy(move || { { let mut x = c.lock().unwrap(); *x = 1 + *x; } Ok::<(), ()>(()) })); Ok::<_, ()>("hello") })) .unwrap(); runtime.shutdown_on_idle().wait().unwrap(); assert_eq!(2, *cnt.lock().unwrap()); assert_eq!(msg, "hello"); } #[test] fn block_waits() { let (tx, rx) = oneshot::channel(); thread::spawn(|| { use std::time::Duration; thread::sleep(Duration::from_millis(1000)); tx.send(()).unwrap(); }); let cnt = Arc::new(Mutex::new(0)); let c = cnt.clone(); let mut runtime = Runtime::new().unwrap(); runtime .block_on(rx.then(move |_| { { let mut x = c.lock().unwrap(); *x = 1 + *x; } Ok::<_, ()>(()) })) .unwrap(); assert_eq!(1, *cnt.lock().unwrap()); runtime.shutdown_on_idle().wait().unwrap(); } #[test] fn spawn_many() { const ITER: usize = 200; let cnt = Arc::new(Mutex::new(0)); let mut runtime = Runtime::new().unwrap(); for _ in 0..ITER { let c = cnt.clone(); runtime.spawn(lazy(move || { { let mut x = c.lock().unwrap(); *x = 1 + *x; } Ok::<(), ()>(()) })); } runtime.shutdown_on_idle().wait().unwrap(); assert_eq!(ITER, *cnt.lock().unwrap()); } #[test] fn spawn_from_block_on_all() { let cnt = Arc::new(Mutex::new(0)); let c = cnt.clone(); let runtime = Runtime::new().unwrap(); let msg = runtime .block_on_all(lazy(move || { { let mut x = c.lock().unwrap(); *x = 1 + *x; } // Spawn! tokio::spawn(lazy(move || { { let mut x = c.lock().unwrap(); *x = 1 + *x; } Ok::<(), ()>(()) })); Ok::<_, ()>("hello") })) .unwrap(); assert_eq!(2, *cnt.lock().unwrap()); assert_eq!(msg, "hello"); }
24.667883
89
0.490605
91ef5a7c98d6dbc7ed93ff1036453772c119b7dc
1,384
// run-pass // Demonstrate the use of the unguarded escape hatch with a type param in negative position // to assert that destructor will not access any dead data. // // Compare with compile-fail/issue28498-reject-lifetime-param.rs // Demonstrate that a type param in negative position causes dropck to reject code // that might indirectly access previously dropped value. // // Compare with run-pass/issue28498-ugeh-with-passed-to-fn.rs #![feature(dropck_eyepatch)] #[derive(Debug)] struct ScribbleOnDrop(String); impl Drop for ScribbleOnDrop { fn drop(&mut self) { self.0 = format!("DROPPED"); } } struct Foo<T>(u32, T, Box<for <'r> fn(&'r T) -> String>); unsafe impl<#[may_dangle] T> Drop for Foo<T> { fn drop(&mut self) { // Use of `may_dangle` is sound, because destructor never passes a `self.1` // to the callback (in `self.2`) despite having it available. println!("Dropping Foo({}, _)", self.0); } } fn callback(s: & &ScribbleOnDrop) -> String { format!("{:?}", s) } fn main() { let (last_dropped, foo0); let (foo1, first_dropped); last_dropped = ScribbleOnDrop(format!("last")); first_dropped = ScribbleOnDrop(format!("first")); foo0 = Foo(0, &last_dropped, Box::new(callback)); foo1 = Foo(1, &first_dropped, Box::new(callback)); println!("foo0.1: {:?} foo1.1: {:?}", foo0.1, foo1.1); }
29.446809
91
0.657514
03007cc89ec0cc75fcc1fe608c50a86f4398b03a
112
pub mod element; pub mod example; pub mod icon; pub mod item; pub mod library; pub mod module; pub mod package;
14
16
0.75
ebb924ebafe32f24c90753ed785dad36e6c1b247
7,672
//! This is full-featured modern JSON implementation according to ECMA-404 standard. //! //! This crate allows deserialization of JSON `Iterator<u8>` stream into primitive types (`bool`, `i32`, etc.), //! Strings and any other types that implement special trait called [TryFromJson](trait.TryFromJson.html), which can be implemented //! automatically through `#[derive(TryFromJson)]` for your structs and enums. //! //! And serialization back to JSON through [DebugToJson](trait.DebugToJson.html) trait, that acts like [Debug](https://doc.rust-lang.org/std/fmt/trait.Debug.html), allowing to //! print your objects with `println!()` and such. Or through [WriteToJson](trait.WriteToJson.html) trait that allows to write //! to a `io::Write` stream. //! //! This crate allows to read whitespece-separated JSON values from stream in sequence. It also allows to pipe blob strings to a writer. //! //! # Installation //! //! In `Cargo.toml` of your project add: //! //! ```toml //! [dependencies] //! nop-json = "2.0" //! ``` //! //! # Examples //! //! ## Creating Reader object //! //! First need to create a [Reader](struct.Reader.html) object giving it something that implements `Iterator<Item=u8>`. //! We can read from a string like this: //! //! ``` //! use nop_json::Reader; //! //! let mut reader = Reader::new(r#" "a JSON string" "#.bytes()); //! ``` //! //! To read from a file we need to convert `std::io::Read` to `Iterator<Item=u8>`. We can use `read_iter` crate for this. //! //! ```no_run //! use std::fs::File; //! use read_iter::ReadIter; // also add dependency to Cargo.toml //! use nop_json::Reader; //! //! let mut file = ReadIter::new(File::open("/tmp/test.json").unwrap()); //! let mut reader = Reader::new(&mut file); //! ``` //! //! See [Reader::new()](struct.Reader.html#method.new) for more details. //! //! ## Deserializing simple values //! //! To read JSON values from an input stream, call `reader.read()` method, and assign the result to a variable that implements `TryFromJson`. //! This crate adds implementation of `TryFromJson` to many primitive types, `Vec`, `HashMap`, and more. //! //! ``` //! use nop_json::Reader; //! //! let mut reader = Reader::new(r#" true 100.5 "Hello" "Infinity" [true, false] "#.bytes()); //! //! let the_true: bool = reader.read().unwrap(); //! let the_hundred_point_five: f32 = reader.read().unwrap(); //! let the_hello: String = reader.read().unwrap(); //! let the_infinity: f32 = reader.read().unwrap(); //! let the_array: Vec<bool> = reader.read().unwrap(); //! //! assert_eq!(the_true, true); //! assert_eq!(the_hundred_point_five, 100.5); //! assert_eq!(the_hello, "Hello"); //! assert!(the_infinity.is_infinite()); //! assert_eq!(the_array, vec![true, false]); //! ``` //! //! ## Deserializing any JSON values //! //! We have generic [Value](enum.Value.html) type that can hold any JSON node. //! //! ``` //! use nop_json::{Reader, Value}; //! use std::convert::TryInto; //! //! let mut reader = Reader::new(r#" true 100.5 "Hello" [true, false] "#.bytes()); //! //! let the_true: Value = reader.read().unwrap(); //! let the_hundred_point_five: Value = reader.read().unwrap(); //! let the_hello: Value = reader.read().unwrap(); //! let the_array: Value = reader.read().unwrap(); //! //! assert_eq!(the_true, Value::Bool(true)); //! let the_hundred_point_five: f32 = the_hundred_point_five.try_into().unwrap(); //! assert_eq!(the_hundred_point_five, 100.5f32); //! assert_eq!(the_hello, Value::String("Hello".to_string())); //! assert_eq!(the_array, Value::Array(vec![Value::Bool(true), Value::Bool(false)])); //! ``` //! //! You can parse any JSON document to [Value](enum.Value.html). //! //! ``` //! use nop_json::{Reader, Value}; //! //! let mut reader = Reader::new(r#" {"array": [{"x": 1}, "a string"]} "#.bytes()); //! let doc: Value = reader.read().unwrap(); //! assert_eq!(doc.to_string(), r#"{"array":[{"x":1},"a string"]}"#); //! ``` //! //! ## Deserializing/serializing structs and enums //! //! To deserialize a struct or an enum, your struct needs to implement [TryFromJson](trait.TryFromJson.html) and [ValidateJson](trait.ValidateJson.html) traits. //! To serialize - [DebugToJson](trait.DebugToJson.html) and/or [WriteToJson](trait.WriteToJson.html). //! //! ``` //! use nop_json::{Reader, TryFromJson, ValidateJson, DebugToJson}; //! //! #[derive(TryFromJson, ValidateJson, DebugToJson, PartialEq)] //! struct Point {x: i32, y: i32} //! //! #[derive(TryFromJson, ValidateJson, DebugToJson, PartialEq)] //! enum Geometry //! { #[json(point)] Point(Point), //! #[json(cx, cy, r)] Circle(i32, i32, i32), //! Nothing, //! } //! //! let mut reader = Reader::new(r#" {"point": {"x": 0, "y": 0}} "#.bytes()); //! let obj: Geometry = reader.read().unwrap(); //! println!("Serialized back to JSON: {:?}", obj); //! ``` //! See [TryFromJson](trait.TryFromJson.html), [ValidateJson](trait.ValidateJson.html), [DebugToJson](trait.DebugToJson.html), [WriteToJson](trait.WriteToJson.html). //! //! ## Serializing scalar values //! //! You can println!() word "true" or "false" to serialize a boolean. Also numbers can be printed as println!() does by default. //! The format is JSON-compatible. To serialize a &str, you can use [escape](fn.escape.html) function. //! //! Alternatively you can create a [Value](enum.Value.html) object, and serialize with it any scalar/nonscalar value. //! ``` //! use std::convert::TryInto; //! use nop_json::Value; //! //! let the_true: Value = true.try_into().unwrap(); //! println!("Serialized to JSON: {:?}", the_true); //! # assert_eq!(format!("{:?}", the_true), "true") //! ``` //! //! ## Skipping a value from stream //! //! To skip current value without storing it (and allocating memory), read it to the `()` type. //! ``` //! use nop_json::Reader; //! //! let mut reader = Reader::new(r#" true 100.5 "Hello" [true, false] "#.bytes()); //! //! let _: () = reader.read().unwrap(); //! let _: () = reader.read().unwrap(); //! let _: () = reader.read().unwrap(); //! let _: () = reader.read().unwrap(); //! ``` //! //! ## Reading binary data //! See [read_blob](struct.Reader.html#method.read_blob). //! //! ## Null, NaN, infinity and -0 //! //! Reading to a variable of type `Option<T>` can read either `T` or `null`. //! //! ``` //! use nop_json::Reader; //! //! let mut reader = Reader::new(r#" "non-null" null "#.bytes()); //! //! let str_or_null_1: Option<String> = reader.read().unwrap(); //! let str_or_null_2: Option<String> = reader.read().unwrap(); //! //! assert_eq!(str_or_null_1, Some("non-null".to_string())); //! assert_eq!(str_or_null_2, None); //! ``` //! //! Reading junk to `f32` or `f64` type will read NaN. Reading string "Infinity", "-Infinity" and "-0" will read corresponding floating point numbers. //! //! ``` //! use nop_json::Reader; //! //! let mut reader = Reader::new(r#" "Hello all!" "Infinity" "-Infinity" "0" "-0" "#.bytes()); //! //! let nan: f32 = reader.read().unwrap(); //! let inf: f32 = reader.read().unwrap(); //! let minf: f32 = reader.read().unwrap(); //! let zero: f32 = reader.read().unwrap(); //! let mzero: f32 = reader.read().unwrap(); //! //! assert!(nan.is_nan()); //! assert_eq!(inf, f32::INFINITY); //! assert_eq!(minf, f32::NEG_INFINITY); //! assert!(zero==0.0 && !zero.is_sign_negative()); //! assert!(mzero==0.0 && mzero.is_sign_negative()); //! ``` mod nop_json; mod value; mod debug_to_json; mod write_to_json; mod validate_json; mod escape; pub use crate::nop_json::{Reader, TryFromJson}; pub use crate::debug_to_json::DebugToJson; pub use crate::write_to_json::WriteToJson; pub use crate::validate_json::ValidateJson; pub use crate::escape::{escape, escape_bytes}; pub use value::Value;
36.884615
175
0.644552
e903cbdc029188d7b08178f93073c538996b33a3
116
#![cfg_attr(not(debug_assertions), windows_subsystem = "windows")] fn main() { doukutsu_rs::init().unwrap(); }
19.333333
66
0.672414
ff089e4e9abf8020be9e7fcd6e985b6f6cd17ab8
13,666
use crate::distribution::{Continuous, ContinuousCDF}; use crate::statistics::*; use crate::{Result, StatsError}; use rand::distributions::OpenClosed01; use rand::Rng; use std::f64; /// Implements the [Pareto](https://en.wikipedia.org/wiki/Pareto_distribution) /// distribution /// /// # Examples /// /// ``` /// use statrs::distribution::{Pareto, Continuous}; /// use statrs::statistics::Distribution; /// use statrs::prec; /// /// let p = Pareto::new(1.0, 2.0).unwrap(); /// assert_eq!(p.mean().unwrap(), 2.0); /// assert!(prec::almost_eq(p.pdf(2.0), 0.25, 1e-15)); /// ``` #[derive(Debug, Copy, Clone, PartialEq)] pub struct Pareto { scale: f64, shape: f64, } impl Pareto { /// Constructs a new Pareto distribution with scale `scale`, and `shape` /// shape. /// /// # Errors /// /// Returns an error if any of `scale` or `shape` are `NaN`. /// Returns an error if `scale <= 0.0` or `shape <= 0.0` /// /// # Examples /// /// ``` /// use statrs::distribution::Pareto; /// /// let mut result = Pareto::new(1.0, 2.0); /// assert!(result.is_ok()); /// /// result = Pareto::new(0.0, 0.0); /// assert!(result.is_err()); /// ``` pub fn new(scale: f64, shape: f64) -> Result<Pareto> { let is_nan = scale.is_nan() || shape.is_nan(); if is_nan || scale <= 0.0 || shape <= 0.0 { Err(StatsError::BadParams) } else { Ok(Pareto { scale, shape }) } } /// Returns the scale of the Pareto distribution /// /// # Examples /// /// ``` /// use statrs::distribution::Pareto; /// /// let n = Pareto::new(1.0, 2.0).unwrap(); /// assert_eq!(n.scale(), 1.0); /// ``` pub fn scale(&self) -> f64 { self.scale } /// Returns the shape of the Pareto distribution /// /// # Examples /// /// ``` /// use statrs::distribution::Pareto; /// /// let n = Pareto::new(1.0, 2.0).unwrap(); /// assert_eq!(n.shape(), 2.0); /// ``` pub fn shape(&self) -> f64 { self.shape } } impl ::rand::distributions::Distribution<f64> for Pareto { fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> f64 { // Inverse transform sampling let u: f64 = rng.sample(OpenClosed01); self.scale * u.powf(-1.0 / self.shape) } } impl ContinuousCDF<f64, f64> for Pareto { /// Calculates the cumulative distribution function for the Pareto /// distribution at `x` /// /// # Formula /// /// ```ignore /// if x < x_m { /// 0 /// } else { /// 1 - (x_m/x)^α /// } /// ``` /// /// where `x_m` is the scale and `α` is the shape fn cdf(&self, x: f64) -> f64 { if x < self.scale { 0.0 } else { 1.0 - (self.scale / x).powf(self.shape) } } } impl Min<f64> for Pareto { /// Returns the minimum value in the domain of the Pareto distribution /// representable by a double precision float /// /// # Formula /// /// ```ignore /// x_m /// ``` /// /// where `x_m` is the scale fn min(&self) -> f64 { self.scale } } impl Max<f64> for Pareto { /// Returns the maximum value in the domain of the Pareto distribution /// representable by a double precision float /// /// # Formula /// /// ```ignore /// INF /// ``` fn max(&self) -> f64 { f64::INFINITY } } impl Distribution<f64> for Pareto { /// Returns the mean of the Pareto distribution /// /// # Formula /// /// ```ignore /// if α <= 1 { /// INF /// } else { /// (α * x_m)/(α - 1) /// } /// ``` /// /// where `x_m` is the scale and `α` is the shape fn mean(&self) -> Option<f64> { if self.shape <= 1.0 { None } else { Some((self.shape * self.scale) / (self.shape - 1.0)) } } /// Returns the variance of the Pareto distribution /// /// # Formula /// /// ```ignore /// if α <= 2 { /// INF /// } else { /// (x_m/(α - 1))^2 * (α/(α - 2)) /// } /// ``` /// /// where `x_m` is the scale and `α` is the shape fn variance(&self) -> Option<f64> { if self.shape <= 2.0 { None } else { let a = self.scale / (self.shape - 1.0); // just a temporary variable Some(a * a * self.shape / (self.shape - 2.0)) } } /// Returns the entropy for the Pareto distribution /// /// # Formula /// /// ```ignore /// ln(α/x_m) - 1/α - 1 /// ``` /// /// where `x_m` is the scale and `α` is the shape fn entropy(&self) -> Option<f64> { Some(self.shape.ln() - self.scale.ln() - (1.0 / self.shape) - 1.0) } /// Returns the skewness of the Pareto distribution /// /// # Panics /// /// If `α <= 3.0` /// /// where `α` is the shape /// /// # Formula /// /// ```ignore /// (2*(α + 1)/(α - 3))*sqrt((α - 2)/α) /// ``` /// /// where `α` is the shape fn skewness(&self) -> Option<f64> { if self.shape <= 3.0 { None } else { Some( (2.0 * (self.shape + 1.0) / (self.shape - 3.0)) * ((self.shape - 2.0) / self.shape).sqrt(), ) } } } impl Median<f64> for Pareto { /// Returns the median of the Pareto distribution /// /// # Formula /// /// ```ignore /// x_m*2^(1/α) /// ``` /// /// where `x_m` is the scale and `α` is the shape fn median(&self) -> f64 { self.scale * (2f64.powf(1.0 / self.shape)) } } impl Mode<Option<f64>> for Pareto { /// Returns the mode of the Pareto distribution /// /// # Formula /// /// ```ignore /// x_m /// ``` /// /// where `x_m` is the scale fn mode(&self) -> Option<f64> { Some(self.scale) } } impl Continuous<f64, f64> for Pareto { /// Calculates the probability density function for the Pareto distribution /// at `x` /// /// # Formula /// /// ```ignore /// if x < x_m { /// 0 /// } else { /// (α * x_m^α)/(x^(α + 1)) /// } /// ``` /// /// where `x_m` is the scale and `α` is the shape fn pdf(&self, x: f64) -> f64 { if x < self.scale { 0.0 } else { (self.shape * self.scale.powf(self.shape)) / x.powf(self.shape + 1.0) } } /// Calculates the log probability density function for the Pareto /// distribution at `x` /// /// # Formula /// /// ```ignore /// if x < x_m { /// -INF /// } else { /// ln(α) + α*ln(x_m) - (α + 1)*ln(x) /// } /// ``` /// /// where `x_m` is the scale and `α` is the shape fn ln_pdf(&self, x: f64) -> f64 { if x < self.scale { f64::NEG_INFINITY } else { self.shape.ln() + self.shape * self.scale.ln() - (self.shape + 1.0) * x.ln() } } } #[rustfmt::skip] #[cfg(test)] mod tests { use crate::statistics::*; use crate::distribution::{ContinuousCDF, Continuous, Pareto}; use crate::distribution::internal::*; use crate::consts::ACC; fn try_create(scale: f64, shape: f64) -> Pareto { let p = Pareto::new(scale, shape); assert!(p.is_ok()); p.unwrap() } fn create_case(scale: f64, shape: f64) { let p = try_create(scale, shape); assert_eq!(scale, p.scale()); assert_eq!(shape, p.shape()); } fn bad_create_case(scale: f64, shape: f64) { let p = Pareto::new(scale, shape); assert!(p.is_err()); } fn get_value<T, F>(scale: f64, shape: f64, eval: F) -> T where F: Fn(Pareto) -> T { let p = try_create(scale, shape); eval(p) } fn test_case<F>(scale: f64, shape: f64, expected: f64, eval: F) where F: Fn(Pareto) -> f64 { let x = get_value(scale, shape, eval); assert_eq!(expected, x); } fn test_almost<F>(scale: f64, shape: f64, expected: f64, acc: f64, eval: F) where F: Fn(Pareto) -> f64 { let p = try_create(scale, shape); let x = eval(p); assert_almost_eq!(expected, x, acc); } #[test] fn test_create() { create_case(10.0, 0.1); create_case(5.0, 1.0); create_case(0.1, 10.0); create_case(10.0, 100.0); create_case(1.0, f64::INFINITY); create_case(f64::INFINITY, f64::INFINITY); } #[test] fn test_bad_create() { bad_create_case(0.0, 0.0); bad_create_case(1.0, -1.0); bad_create_case(-1.0, 1.0); bad_create_case(-1.0, -1.0); bad_create_case(f64::NAN, 1.0); bad_create_case(1.0, f64::NAN); bad_create_case(f64::NAN, f64::NAN); } #[test] fn test_variance() { let variance = |x: Pareto| x.variance().unwrap(); test_case(1.0, 3.0, 0.75, variance); test_almost(10.0, 10.0, 125.0 / 81.0, 1e-13, variance); } #[test] #[should_panic] fn test_variance_degen() { let variance = |x: Pareto| x.variance().unwrap(); test_case(1.0, 1.0, f64::INFINITY, variance); // shape <= 2.0 } #[test] fn test_entropy() { let entropy = |x: Pareto| x.entropy().unwrap(); test_case(0.1, 0.1, -11.0, entropy); test_case(1.0, 1.0, -2.0, entropy); test_case(10.0, 10.0, -1.1, entropy); test_case(3.0, 1.0, -2.0 - 3f64.ln(), entropy); test_case(1.0, 3.0, -4.0/3.0 + 3f64.ln(), entropy); } #[test] fn test_skewness() { let skewness = |x: Pareto| x.skewness().unwrap(); test_case(1.0, 4.0, 5.0*2f64.sqrt(), skewness); test_case(1.0, 100.0, (707.0/485.0)*2f64.sqrt(), skewness); } #[test] #[should_panic] fn test_skewness_invalid_shape() { let skewness = |x: Pareto| x.skewness().unwrap(); get_value(1.0, 3.0, skewness); } #[test] fn test_mode() { let mode = |x: Pareto| x.mode().unwrap(); test_case(0.1, 1.0, 0.1, mode); test_case(2.0, 1.0, 2.0, mode); test_case(10.0, f64::INFINITY, 10.0, mode); test_case(f64::INFINITY, 1.0, f64::INFINITY, mode); } #[test] fn test_median() { let median = |x: Pareto| x.median(); test_case(0.1, 0.1, 102.4, median); test_case(1.0, 1.0, 2.0, median); test_case(10.0, 10.0, 10.0*2f64.powf(0.1), median); test_case(3.0, 0.5, 12.0, median); test_case(10.0, f64::INFINITY, 10.0, median); } #[test] fn test_min_max() { let min = |x: Pareto| x.min(); let max = |x: Pareto| x.max(); test_case(0.2, f64::INFINITY, 0.2, min); test_case(10.0, f64::INFINITY, 10.0, min); test_case(f64::INFINITY, 1.0, f64::INFINITY, min); test_case(1.0, 0.1, f64::INFINITY, max); test_case(3.0, 10.0, f64::INFINITY, max); } #[test] fn test_pdf() { let pdf = |arg: f64| move |x: Pareto| x.pdf(arg); test_case(1.0, 1.0, 0.0, pdf(0.1)); test_case(1.0, 1.0, 1.0, pdf(1.0)); test_case(1.0, 1.0, 4.0/9.0, pdf(1.5)); test_case(1.0, 1.0, 1.0/25.0, pdf(5.0)); test_case(1.0, 1.0, 1.0/2500.0, pdf(50.0)); test_case(1.0, 4.0, 4.0, pdf(1.0)); test_case(1.0, 4.0, 128.0/243.0, pdf(1.5)); test_case(1.0, 4.0, 1.0/78125000.0, pdf(50.0)); test_case(3.0, 2.0, 2.0/3.0, pdf(3.0)); test_case(3.0, 2.0, 18.0/125.0, pdf(5.0)); test_almost(25.0, 100.0, 1.5777218104420236e-30, 1e-50, pdf(50.0)); test_almost(100.0, 25.0, 6.6003546737276816e-6, 1e-16, pdf(150.0)); test_case(1.0, 2.0, 0.0, pdf(f64::INFINITY)); } #[test] fn test_ln_pdf() { let ln_pdf = |arg: f64| move |x: Pareto| x.ln_pdf(arg); test_case(1.0, 1.0, f64::NEG_INFINITY, ln_pdf(0.1)); test_case(1.0, 1.0, 0.0, ln_pdf(1.0)); test_almost(1.0, 1.0, 4f64.ln() - 9f64.ln(), 1e-14, ln_pdf(1.5)); test_almost(1.0, 1.0, -(25f64.ln()), 1e-14, ln_pdf(5.0)); test_almost(1.0, 1.0, -(2500f64.ln()), 1e-14, ln_pdf(50.0)); test_almost(1.0, 4.0, 4f64.ln(), 1e-14, ln_pdf(1.0)); test_almost(1.0, 4.0, 128f64.ln() - 243f64.ln(), 1e-14, ln_pdf(1.5)); test_almost(1.0, 4.0, -(78125000f64.ln()), 1e-14, ln_pdf(50.0)); test_almost(3.0, 2.0, 2f64.ln() - 3f64.ln(), 1e-14, ln_pdf(3.0)); test_almost(3.0, 2.0, 18f64.ln() - 125f64.ln(), 1e-14, ln_pdf(5.0)); test_almost(25.0, 100.0, 1.5777218104420236e-30f64.ln(), 1e-12, ln_pdf(50.0)); test_almost(100.0, 25.0, 6.6003546737276816e-6f64.ln(), 1e-12, ln_pdf(150.0)); test_case(1.0, 2.0, f64::NEG_INFINITY, ln_pdf(f64::INFINITY)); } #[test] fn test_cdf() { let cdf = |arg: f64| move |x: Pareto| x.cdf(arg); test_case(0.1, 0.1, 0.0, cdf(0.1)); test_case(1.0, 1.0, 0.0, cdf(1.0)); test_case(5.0, 5.0, 0.0, cdf(2.0)); test_case(7.0, 7.0, 0.9176457, cdf(10.0)); test_case(10.0, 10.0, 50700551.0/60466176.0, cdf(12.0)); test_case(5.0, 1.0, 0.5, cdf(10.0)); test_case(3.0, 10.0, 1023.0/1024.0, cdf(6.0)); test_case(1.0, 1.0, 1.0, cdf(f64::INFINITY)); } #[test] fn test_continuous() { test::check_continuous_distribution(&try_create(1.0, 10.0), 1.0, 10.0); test::check_continuous_distribution(&try_create(0.1, 2.0), 0.1, 100.0); } }
27.552419
88
0.499854
017caa7d04e57b7d05e57323925429282d30adc7
63,875
/* AUTO GENERATED FILE DO NOT EDIT codegen/elder_dragon_quicktype.py */ use serde::{Serialize, Deserialize}; extern crate serde_json; use self::serde_json::Error; pub fn serialize(json: &str) -> Result<Aatrox,Error>{ serde_json::from_str(json) } use std::collections::HashMap; #[derive(Serialize, Deserialize)] pub struct Aatrox { #[serde(rename = "type")] aatrox_type: GroupEnum, format: String, version: String, data: AatroxData, } #[derive(Serialize, Deserialize)] pub struct AatroxData { #[serde(rename = "Aatrox")] aatrox: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct PuneHedgehog { id: String, key: String, name: String, title: String, image: Image, skins: Vec<Skin>, lore: String, blurb: String, allytips: Vec<String>, enemytips: Vec<String>, tags: Vec<Tag>, partype: String, info: Info, stats: HashMap<String, f64>, spells: Vec<Spell>, passive: Passive, recommended: Vec<PurpleRecommended>, } #[derive(Serialize, Deserialize)] pub struct Image { full: String, sprite: Sprite, group: GroupEnum, x: i64, y: i64, w: i64, h: i64, } #[derive(Serialize, Deserialize)] pub struct Info { attack: i64, defense: i64, magic: i64, difficulty: i64, } #[derive(Serialize, Deserialize)] pub struct Passive { name: String, description: String, image: Image, } #[derive(Serialize, Deserialize)] pub struct PurpleRecommended { champion: String, title: String, map: Map, mode: Mode, #[serde(rename = "type")] recommended_type: RecommendedType, #[serde(rename = "customTag")] custom_tag: String, sortrank: Option<i64>, #[serde(rename = "extensionPage")] extension_page: Option<bool>, #[serde(rename = "useObviousCheckmark")] use_obvious_checkmark: Option<bool>, #[serde(rename = "customPanel")] custom_panel: Option<serde_json::Value>, blocks: Vec<Block>, #[serde(rename = "requiredPerk")] required_perk: Option<String>, #[serde(rename = "extenOrnnPage")] exten_ornn_page: Option<bool>, } #[derive(Serialize, Deserialize)] pub struct Block { #[serde(rename = "type")] block_type: BlockType, #[serde(rename = "recMath")] rec_math: Option<bool>, #[serde(rename = "recSteps")] rec_steps: Option<bool>, #[serde(rename = "minSummonerLevel")] min_summoner_level: Option<i64>, #[serde(rename = "maxSummonerLevel")] max_summoner_level: Option<i64>, #[serde(rename = "showIfSummonerSpell")] show_if_summoner_spell: Option<IfSummonerSpell>, #[serde(rename = "hideIfSummonerSpell")] hide_if_summoner_spell: Option<IfSummonerSpell>, #[serde(rename = "appendAfterSection")] append_after_section: Option<String>, #[serde(rename = "visibleWithAllOf")] visible_with_all_of: Option<Vec<Of>>, #[serde(rename = "hiddenWithAnyOf")] hidden_with_any_of: Option<Vec<Of>>, items: Vec<Item>, } #[derive(Serialize, Deserialize)] pub struct Item { id: String, count: i64, #[serde(rename = "hideCount")] hide_count: Option<bool>, hidecount: Option<bool>, } #[derive(Serialize, Deserialize)] pub struct Skin { id: String, num: i64, name: String, chromas: bool, } #[derive(Serialize, Deserialize)] pub struct Spell { id: String, name: String, description: String, tooltip: String, leveltip: Option<Leveltip>, maxrank: i64, cooldown: Vec<f64>, #[serde(rename = "cooldownBurn")] cooldown_burn: String, cost: Vec<i64>, #[serde(rename = "costBurn")] cost_burn: String, datavalues: Datavalues, effect: Vec<Option<Vec<f64>>>, #[serde(rename = "effectBurn")] effect_burn: Vec<Option<String>>, vars: Vec<Var>, #[serde(rename = "costType")] cost_type: CostType, maxammo: String, range: Vec<i64>, #[serde(rename = "rangeBurn")] range_burn: String, image: Image, resource: Option<String>, } #[derive(Serialize, Deserialize)] pub struct Datavalues { } #[derive(Serialize, Deserialize)] pub struct Leveltip { label: Vec<String>, effect: Vec<String>, } #[derive(Serialize, Deserialize)] pub struct Var { link: Link, coeff: Coeff, key: Key, } #[derive(Serialize, Deserialize)] pub struct Ahri { #[serde(rename = "type")] ahri_type: GroupEnum, format: String, version: String, data: AhriData, } #[derive(Serialize, Deserialize)] pub struct AhriData { #[serde(rename = "Ahri")] ahri: HammerfestPonies, } #[derive(Serialize, Deserialize)] pub struct HammerfestPonies { id: String, key: String, name: String, title: String, image: Image, skins: Vec<Skin>, lore: String, blurb: String, allytips: Vec<String>, enemytips: Vec<String>, tags: Vec<Tag>, partype: Partype, info: Info, stats: HashMap<String, f64>, spells: Vec<Spell>, passive: Passive, recommended: Vec<FluffyRecommended>, } #[derive(Serialize, Deserialize)] pub struct FluffyRecommended { champion: String, title: String, map: Map, mode: Mode, #[serde(rename = "type")] recommended_type: RecommendedType, #[serde(rename = "customTag")] custom_tag: Option<String>, sortrank: Option<i64>, #[serde(rename = "extensionPage")] extension_page: Option<bool>, #[serde(rename = "customPanel")] custom_panel: Option<serde_json::Value>, blocks: Vec<Block>, #[serde(rename = "requiredPerk")] required_perk: Option<String>, #[serde(rename = "useObviousCheckmark")] use_obvious_checkmark: Option<bool>, priority: Option<bool>, } #[derive(Serialize, Deserialize)] pub struct Akali { #[serde(rename = "type")] akali_type: GroupEnum, format: String, version: String, data: AkaliData, } #[derive(Serialize, Deserialize)] pub struct AkaliData { #[serde(rename = "Akali")] akali: AkaliClass, } #[derive(Serialize, Deserialize)] pub struct AkaliClass { id: String, key: String, name: String, title: String, image: Image, skins: Vec<Skin>, lore: String, blurb: String, allytips: Vec<String>, enemytips: Vec<String>, tags: Vec<Tag>, partype: Partype, info: Info, stats: HashMap<String, f64>, spells: Vec<Spell>, passive: Passive, recommended: Vec<AkaliRecommended>, } #[derive(Serialize, Deserialize)] pub struct AkaliRecommended { champion: String, title: String, map: Map, mode: Mode, #[serde(rename = "type")] recommended_type: RecommendedType, #[serde(rename = "customTag")] custom_tag: String, #[serde(rename = "requiredPerk")] required_perk: Option<String>, sortrank: Option<i64>, #[serde(rename = "extensionPage")] extension_page: Option<bool>, #[serde(rename = "customPanel")] custom_panel: Option<String>, blocks: Vec<Block>, #[serde(rename = "useObviousCheckmark")] use_obvious_checkmark: Option<bool>, #[serde(rename = "customPanelCurrencyType")] custom_panel_currency_type: Option<String>, #[serde(rename = "customPanelBuffCurrencyName")] custom_panel_buff_currency_name: Option<String>, } #[derive(Serialize, Deserialize)] pub struct Alistar { #[serde(rename = "type")] alistar_type: GroupEnum, format: String, version: String, data: AlistarData, } #[derive(Serialize, Deserialize)] pub struct AlistarData { #[serde(rename = "Alistar")] alistar: HammerfestPonies, } #[derive(Serialize, Deserialize)] pub struct Amumu { #[serde(rename = "type")] amumu_type: GroupEnum, format: String, version: String, data: AmumuData, } #[derive(Serialize, Deserialize)] pub struct AmumuData { #[serde(rename = "Amumu")] amumu: HammerfestPonies, } #[derive(Serialize, Deserialize)] pub struct Anivia { #[serde(rename = "type")] anivia_type: GroupEnum, format: String, version: String, data: AniviaData, } #[derive(Serialize, Deserialize)] pub struct AniviaData { #[serde(rename = "Anivia")] anivia: PurpleAnivia, } #[derive(Serialize, Deserialize)] pub struct PurpleAnivia { id: PurpleId, key: String, name: String, title: String, image: Image, skins: Vec<Skin>, lore: String, blurb: String, allytips: Vec<String>, enemytips: Vec<String>, tags: Vec<Tag>, partype: Partype, info: Info, stats: HashMap<String, f64>, spells: Vec<Spell>, passive: Passive, recommended: Vec<AniviaRecommended>, } #[derive(Serialize, Deserialize)] pub struct AniviaRecommended { champion: PurpleId, title: String, map: Map, mode: Mode, #[serde(rename = "type")] recommended_type: RecommendedType, #[serde(rename = "customTag")] custom_tag: String, sortrank: Option<i64>, #[serde(rename = "extensionPage")] extension_page: bool, #[serde(rename = "customPanel")] custom_panel: Option<serde_json::Value>, blocks: Vec<Block>, #[serde(rename = "useObviousCheckmark")] use_obvious_checkmark: Option<bool>, #[serde(rename = "requiredPerk")] required_perk: Option<String>, } #[derive(Serialize, Deserialize)] pub struct Annie { #[serde(rename = "type")] annie_type: GroupEnum, format: String, version: String, data: AnnieData, } #[derive(Serialize, Deserialize)] pub struct AnnieData { #[serde(rename = "Annie")] annie: PurpleAnivia, } #[derive(Serialize, Deserialize)] pub struct Ashe { #[serde(rename = "type")] ashe_type: GroupEnum, format: String, version: String, data: AsheData, } #[derive(Serialize, Deserialize)] pub struct AsheData { #[serde(rename = "Ashe")] ashe: HammerfestPonies, } #[derive(Serialize, Deserialize)] pub struct AurelionSol { #[serde(rename = "type")] aurelion_sol_type: GroupEnum, format: String, version: String, data: AurelionSolData, } #[derive(Serialize, Deserialize)] pub struct AurelionSolData { #[serde(rename = "AurelionSol")] aurelion_sol: PurpleAnivia, } #[derive(Serialize, Deserialize)] pub struct Azir { #[serde(rename = "type")] azir_type: GroupEnum, format: String, version: String, data: AzirData, } #[derive(Serialize, Deserialize)] pub struct AzirData { #[serde(rename = "Azir")] azir: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Bard { #[serde(rename = "type")] bard_type: GroupEnum, format: String, version: String, data: BardData, } #[derive(Serialize, Deserialize)] pub struct BardData { #[serde(rename = "Bard")] bard: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Blitzcrank { #[serde(rename = "type")] blitzcrank_type: GroupEnum, format: String, version: String, data: BlitzcrankData, } #[derive(Serialize, Deserialize)] pub struct BlitzcrankData { #[serde(rename = "Blitzcrank")] blitzcrank: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Brand { #[serde(rename = "type")] brand_type: GroupEnum, format: String, version: String, data: BrandData, } #[derive(Serialize, Deserialize)] pub struct BrandData { #[serde(rename = "Brand")] brand: BrandClass, } #[derive(Serialize, Deserialize)] pub struct BrandClass { id: BrandId, key: String, name: String, title: String, image: Image, skins: Vec<Skin>, lore: String, blurb: String, allytips: Vec<String>, enemytips: Vec<String>, tags: Vec<Tag>, partype: Partype, info: Info, stats: HashMap<String, f64>, spells: Vec<Spell>, passive: Passive, recommended: Vec<BrandRecommended>, } #[derive(Serialize, Deserialize)] pub struct BrandRecommended { champion: BrandId, title: String, #[serde(rename = "type")] recommended_type: RecommendedType, map: Map, mode: Mode, priority: Option<bool>, blocks: Vec<Block>, #[serde(rename = "customTag")] custom_tag: Option<String>, sortrank: Option<i64>, #[serde(rename = "extensionPage")] extension_page: Option<bool>, #[serde(rename = "customPanel")] custom_panel: Option<serde_json::Value>, #[serde(rename = "useObviousCheckmark")] use_obvious_checkmark: Option<bool>, } #[derive(Serialize, Deserialize)] pub struct Braum { #[serde(rename = "type")] braum_type: GroupEnum, format: String, version: String, data: BraumData, } #[derive(Serialize, Deserialize)] pub struct BraumData { #[serde(rename = "Braum")] braum: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Caitlyn { #[serde(rename = "type")] caitlyn_type: GroupEnum, format: String, version: String, data: CaitlynData, } #[derive(Serialize, Deserialize)] pub struct CaitlynData { #[serde(rename = "Caitlyn")] caitlyn: HammerfestPonies, } #[derive(Serialize, Deserialize)] pub struct Camille { #[serde(rename = "type")] camille_type: GroupEnum, format: String, version: String, data: CamilleData, } #[derive(Serialize, Deserialize)] pub struct CamilleData { #[serde(rename = "Camille")] camille: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Cassiopeia { #[serde(rename = "type")] cassiopeia_type: GroupEnum, format: String, version: String, data: CassiopeiaData, } #[derive(Serialize, Deserialize)] pub struct CassiopeiaData { #[serde(rename = "Cassiopeia")] cassiopeia: PurpleAnivia, } #[derive(Serialize, Deserialize)] pub struct Chogath { #[serde(rename = "type")] chogath_type: GroupEnum, format: String, version: String, data: ChogathData, } #[derive(Serialize, Deserialize)] pub struct ChogathData { #[serde(rename = "Chogath")] chogath: HammerfestPonies, } #[derive(Serialize, Deserialize)] pub struct Corki { #[serde(rename = "type")] corki_type: GroupEnum, format: String, version: String, data: CorkiData, } #[derive(Serialize, Deserialize)] pub struct CorkiData { #[serde(rename = "Corki")] corki: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Darius { #[serde(rename = "type")] darius_type: GroupEnum, format: String, version: String, data: DariusData, } #[derive(Serialize, Deserialize)] pub struct DariusData { #[serde(rename = "Darius")] darius: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Diana { #[serde(rename = "type")] diana_type: GroupEnum, format: String, version: String, data: DianaData, } #[derive(Serialize, Deserialize)] pub struct DianaData { #[serde(rename = "Diana")] diana: HammerfestPonies, } #[derive(Serialize, Deserialize)] pub struct Draven { #[serde(rename = "type")] draven_type: GroupEnum, format: String, version: String, data: DravenData, } #[derive(Serialize, Deserialize)] pub struct DravenData { #[serde(rename = "Draven")] draven: HammerfestPonies, } #[derive(Serialize, Deserialize)] pub struct DrMundo { #[serde(rename = "type")] dr_mundo_type: GroupEnum, format: String, version: String, data: DrMundoData, } #[derive(Serialize, Deserialize)] pub struct DrMundoData { #[serde(rename = "DrMundo")] dr_mundo: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Ekko { #[serde(rename = "type")] ekko_type: GroupEnum, format: String, version: String, data: EkkoData, } #[derive(Serialize, Deserialize)] pub struct EkkoData { #[serde(rename = "Ekko")] ekko: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Elise { #[serde(rename = "type")] elise_type: GroupEnum, format: String, version: String, data: EliseData, } #[derive(Serialize, Deserialize)] pub struct EliseData { #[serde(rename = "Elise")] elise: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Evelynn { #[serde(rename = "type")] evelynn_type: GroupEnum, format: String, version: String, data: EvelynnData, } #[derive(Serialize, Deserialize)] pub struct EvelynnData { #[serde(rename = "Evelynn")] evelynn: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Ezreal { #[serde(rename = "type")] ezreal_type: GroupEnum, format: String, version: String, data: EzrealData, } #[derive(Serialize, Deserialize)] pub struct EzrealData { #[serde(rename = "Ezreal")] ezreal: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Fiddlesticks { #[serde(rename = "type")] fiddlesticks_type: GroupEnum, format: String, version: String, data: FiddlesticksData, } #[derive(Serialize, Deserialize)] pub struct FiddlesticksData { #[serde(rename = "Fiddlesticks")] fiddlesticks: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Fiora { #[serde(rename = "type")] fiora_type: GroupEnum, format: String, version: String, data: FioraData, } #[derive(Serialize, Deserialize)] pub struct FioraData { #[serde(rename = "Fiora")] fiora: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Fizz { #[serde(rename = "type")] fizz_type: GroupEnum, format: String, version: String, data: FizzData, } #[derive(Serialize, Deserialize)] pub struct FizzData { #[serde(rename = "Fizz")] fizz: HammerfestPonies, } #[derive(Serialize, Deserialize)] pub struct Galio { #[serde(rename = "type")] galio_type: GroupEnum, format: String, version: String, data: GalioData, } #[derive(Serialize, Deserialize)] pub struct GalioData { #[serde(rename = "Galio")] galio: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Gangplank { #[serde(rename = "type")] gangplank_type: GroupEnum, format: String, version: String, data: GangplankData, } #[derive(Serialize, Deserialize)] pub struct GangplankData { #[serde(rename = "Gangplank")] gangplank: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Garen { #[serde(rename = "type")] garen_type: GroupEnum, format: String, version: String, data: GarenData, } #[derive(Serialize, Deserialize)] pub struct GarenData { #[serde(rename = "Garen")] garen: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Gnar { #[serde(rename = "type")] gnar_type: GroupEnum, format: String, version: String, data: GnarData, } #[derive(Serialize, Deserialize)] pub struct GnarData { #[serde(rename = "Gnar")] gnar: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Gragas { #[serde(rename = "type")] gragas_type: GroupEnum, format: String, version: String, data: GragasData, } #[derive(Serialize, Deserialize)] pub struct GragasData { #[serde(rename = "Gragas")] gragas: HammerfestPonies, } #[derive(Serialize, Deserialize)] pub struct Graves { #[serde(rename = "type")] graves_type: GroupEnum, format: String, version: String, data: GravesData, } #[derive(Serialize, Deserialize)] pub struct GravesData { #[serde(rename = "Graves")] graves: HammerfestPonies, } #[derive(Serialize, Deserialize)] pub struct Hecarim { #[serde(rename = "type")] hecarim_type: GroupEnum, format: String, version: String, data: HecarimData, } #[derive(Serialize, Deserialize)] pub struct HecarimData { #[serde(rename = "Hecarim")] hecarim: HammerfestPonies, } #[derive(Serialize, Deserialize)] pub struct Heimerdinger { #[serde(rename = "type")] heimerdinger_type: GroupEnum, format: String, version: String, data: HeimerdingerData, } #[derive(Serialize, Deserialize)] pub struct HeimerdingerData { #[serde(rename = "Heimerdinger")] heimerdinger: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Illaoi { #[serde(rename = "type")] illaoi_type: GroupEnum, format: String, version: String, data: IllaoiData, } #[derive(Serialize, Deserialize)] pub struct IllaoiData { #[serde(rename = "Illaoi")] illaoi: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Irelia { #[serde(rename = "type")] irelia_type: GroupEnum, format: String, version: String, data: IreliaData, } #[derive(Serialize, Deserialize)] pub struct IreliaData { #[serde(rename = "Irelia")] irelia: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Ivern { #[serde(rename = "type")] ivern_type: GroupEnum, format: String, version: String, data: IvernData, } #[derive(Serialize, Deserialize)] pub struct IvernData { #[serde(rename = "Ivern")] ivern: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Janna { #[serde(rename = "type")] janna_type: GroupEnum, format: String, version: String, data: JannaData, } #[derive(Serialize, Deserialize)] pub struct JannaData { #[serde(rename = "Janna")] janna: HammerfestPonies, } #[derive(Serialize, Deserialize)] pub struct JarvanIv { #[serde(rename = "type")] jarvan_iv_type: GroupEnum, format: String, version: String, data: JarvanIvData, } #[derive(Serialize, Deserialize)] pub struct JarvanIvData { #[serde(rename = "JarvanIV")] jarvan_iv: HammerfestPonies, } #[derive(Serialize, Deserialize)] pub struct Jax { #[serde(rename = "type")] jax_type: GroupEnum, format: String, version: String, data: JaxData, } #[derive(Serialize, Deserialize)] pub struct JaxData { #[serde(rename = "Jax")] jax: HammerfestPonies, } #[derive(Serialize, Deserialize)] pub struct Jayce { #[serde(rename = "type")] jayce_type: GroupEnum, format: String, version: String, data: JayceData, } #[derive(Serialize, Deserialize)] pub struct JayceData { #[serde(rename = "Jayce")] jayce: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Jhin { #[serde(rename = "type")] jhin_type: GroupEnum, format: String, version: String, data: JhinData, } #[derive(Serialize, Deserialize)] pub struct JhinData { #[serde(rename = "Jhin")] jhin: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Jinx { #[serde(rename = "type")] jinx_type: GroupEnum, format: String, version: String, data: JinxData, } #[derive(Serialize, Deserialize)] pub struct JinxData { #[serde(rename = "Jinx")] jinx: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Kaisa { #[serde(rename = "type")] kaisa_type: GroupEnum, format: String, version: String, data: KaisaData, } #[derive(Serialize, Deserialize)] pub struct KaisaData { #[serde(rename = "Kaisa")] kaisa: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Kalista { #[serde(rename = "type")] kalista_type: GroupEnum, format: String, version: String, data: KalistaData, } #[derive(Serialize, Deserialize)] pub struct KalistaData { #[serde(rename = "Kalista")] kalista: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Karma { #[serde(rename = "type")] karma_type: GroupEnum, format: String, version: String, data: KarmaData, } #[derive(Serialize, Deserialize)] pub struct KarmaData { #[serde(rename = "Karma")] karma: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Karthus { #[serde(rename = "type")] karthus_type: GroupEnum, format: String, version: String, data: KarthusData, } #[derive(Serialize, Deserialize)] pub struct KarthusData { #[serde(rename = "Karthus")] karthus: HammerfestPonies, } #[derive(Serialize, Deserialize)] pub struct Kassadin { #[serde(rename = "type")] kassadin_type: GroupEnum, format: String, version: String, data: KassadinData, } #[derive(Serialize, Deserialize)] pub struct KassadinData { #[serde(rename = "Kassadin")] kassadin: PurpleAnivia, } #[derive(Serialize, Deserialize)] pub struct Katarina { #[serde(rename = "type")] katarina_type: GroupEnum, format: String, version: String, data: KatarinaData, } #[derive(Serialize, Deserialize)] pub struct KatarinaData { #[serde(rename = "Katarina")] katarina: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Kayle { #[serde(rename = "type")] kayle_type: GroupEnum, format: String, version: String, data: KayleData, } #[derive(Serialize, Deserialize)] pub struct KayleData { #[serde(rename = "Kayle")] kayle: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Kayn { #[serde(rename = "type")] kayn_type: GroupEnum, format: String, version: String, data: KaynData, } #[derive(Serialize, Deserialize)] pub struct KaynData { #[serde(rename = "Kayn")] kayn: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Kennen { #[serde(rename = "type")] kennen_type: GroupEnum, format: String, version: String, data: KennenData, } #[derive(Serialize, Deserialize)] pub struct KennenData { #[serde(rename = "Kennen")] kennen: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Khazix { #[serde(rename = "type")] khazix_type: GroupEnum, format: String, version: String, data: KhazixData, } #[derive(Serialize, Deserialize)] pub struct KhazixData { #[serde(rename = "Khazix")] khazix: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Kindred { #[serde(rename = "type")] kindred_type: GroupEnum, format: String, version: String, data: KindredData, } #[derive(Serialize, Deserialize)] pub struct KindredData { #[serde(rename = "Kindred")] kindred: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Kled { #[serde(rename = "type")] kled_type: GroupEnum, format: String, version: String, data: KledData, } #[derive(Serialize, Deserialize)] pub struct KledData { #[serde(rename = "Kled")] kled: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct KogMaw { #[serde(rename = "type")] kog_maw_type: GroupEnum, format: String, version: String, data: KogMawData, } #[derive(Serialize, Deserialize)] pub struct KogMawData { #[serde(rename = "KogMaw")] kog_maw: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Leblanc { #[serde(rename = "type")] leblanc_type: GroupEnum, format: String, version: String, data: LeblancData, } #[derive(Serialize, Deserialize)] pub struct LeblancData { #[serde(rename = "Leblanc")] leblanc: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct LeeSin { #[serde(rename = "type")] lee_sin_type: GroupEnum, format: String, version: String, data: LeeSinData, } #[derive(Serialize, Deserialize)] pub struct LeeSinData { #[serde(rename = "LeeSin")] lee_sin: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Leona { #[serde(rename = "type")] leona_type: GroupEnum, format: String, version: String, data: LeonaData, } #[derive(Serialize, Deserialize)] pub struct LeonaData { #[serde(rename = "Leona")] leona: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Lissandra { #[serde(rename = "type")] lissandra_type: GroupEnum, format: String, version: String, data: LissandraData, } #[derive(Serialize, Deserialize)] pub struct LissandraData { #[serde(rename = "Lissandra")] lissandra: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Lucian { #[serde(rename = "type")] lucian_type: GroupEnum, format: String, version: String, data: LucianData, } #[derive(Serialize, Deserialize)] pub struct LucianData { #[serde(rename = "Lucian")] lucian: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Lulu { #[serde(rename = "type")] lulu_type: GroupEnum, format: String, version: String, data: LuluData, } #[derive(Serialize, Deserialize)] pub struct LuluData { #[serde(rename = "Lulu")] lulu: HammerfestPonies, } #[derive(Serialize, Deserialize)] pub struct Lux { #[serde(rename = "type")] lux_type: GroupEnum, format: String, version: String, data: LuxData, } #[derive(Serialize, Deserialize)] pub struct LuxData { #[serde(rename = "Lux")] lux: HammerfestPonies, } #[derive(Serialize, Deserialize)] pub struct Malphite { #[serde(rename = "type")] malphite_type: GroupEnum, format: String, version: String, data: MalphiteData, } #[derive(Serialize, Deserialize)] pub struct MalphiteData { #[serde(rename = "Malphite")] malphite: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Malzahar { #[serde(rename = "type")] malzahar_type: GroupEnum, format: String, version: String, data: MalzaharData, } #[derive(Serialize, Deserialize)] pub struct MalzaharData { #[serde(rename = "Malzahar")] malzahar: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Maokai { #[serde(rename = "type")] maokai_type: GroupEnum, format: String, version: String, data: MaokaiData, } #[derive(Serialize, Deserialize)] pub struct MaokaiData { #[serde(rename = "Maokai")] maokai: PurpleAnivia, } #[derive(Serialize, Deserialize)] pub struct MasterYi { #[serde(rename = "type")] master_yi_type: GroupEnum, format: String, version: String, data: MasterYiData, } #[derive(Serialize, Deserialize)] pub struct MasterYiData { #[serde(rename = "MasterYi")] master_yi: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct MissFortune { #[serde(rename = "type")] miss_fortune_type: GroupEnum, format: String, version: String, data: MissFortuneData, } #[derive(Serialize, Deserialize)] pub struct MissFortuneData { #[serde(rename = "MissFortune")] miss_fortune: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct MonkeyKing { #[serde(rename = "type")] monkey_king_type: GroupEnum, format: String, version: String, data: MonkeyKingData, } #[derive(Serialize, Deserialize)] pub struct MonkeyKingData { #[serde(rename = "MonkeyKing")] monkey_king: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Mordekaiser { #[serde(rename = "type")] mordekaiser_type: GroupEnum, format: String, version: String, data: MordekaiserData, } #[derive(Serialize, Deserialize)] pub struct MordekaiserData { #[serde(rename = "Mordekaiser")] mordekaiser: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Morgana { #[serde(rename = "type")] morgana_type: GroupEnum, format: String, version: String, data: MorganaData, } #[derive(Serialize, Deserialize)] pub struct MorganaData { #[serde(rename = "Morgana")] morgana: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Nami { #[serde(rename = "type")] nami_type: GroupEnum, format: String, version: String, data: NamiData, } #[derive(Serialize, Deserialize)] pub struct NamiData { #[serde(rename = "Nami")] nami: HammerfestPonies, } #[derive(Serialize, Deserialize)] pub struct Nasus { #[serde(rename = "type")] nasus_type: GroupEnum, format: String, version: String, data: NasusData, } #[derive(Serialize, Deserialize)] pub struct NasusData { #[serde(rename = "Nasus")] nasus: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Nautilus { #[serde(rename = "type")] nautilus_type: GroupEnum, format: String, version: String, data: NautilusData, } #[derive(Serialize, Deserialize)] pub struct NautilusData { #[serde(rename = "Nautilus")] nautilus: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Neeko { #[serde(rename = "type")] neeko_type: GroupEnum, format: String, version: String, data: NeekoData, } #[derive(Serialize, Deserialize)] pub struct NeekoData { #[serde(rename = "Neeko")] neeko: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Nidalee { #[serde(rename = "type")] nidalee_type: GroupEnum, format: String, version: String, data: NidaleeData, } #[derive(Serialize, Deserialize)] pub struct NidaleeData { #[serde(rename = "Nidalee")] nidalee: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Nocturne { #[serde(rename = "type")] nocturne_type: GroupEnum, format: String, version: String, data: NocturneData, } #[derive(Serialize, Deserialize)] pub struct NocturneData { #[serde(rename = "Nocturne")] nocturne: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Nunu { #[serde(rename = "type")] nunu_type: GroupEnum, format: String, version: String, data: NunuData, } #[derive(Serialize, Deserialize)] pub struct NunuData { #[serde(rename = "Nunu")] nunu: HammerfestPonies, } #[derive(Serialize, Deserialize)] pub struct Olaf { #[serde(rename = "type")] olaf_type: GroupEnum, format: String, version: String, data: OlafData, } #[derive(Serialize, Deserialize)] pub struct OlafData { #[serde(rename = "Olaf")] olaf: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Orianna { #[serde(rename = "type")] orianna_type: GroupEnum, format: String, version: String, data: OriannaData, } #[derive(Serialize, Deserialize)] pub struct OriannaData { #[serde(rename = "Orianna")] orianna: PurpleAnivia, } #[derive(Serialize, Deserialize)] pub struct Ornn { #[serde(rename = "type")] ornn_type: GroupEnum, format: String, version: String, data: OrnnData, } #[derive(Serialize, Deserialize)] pub struct OrnnData { #[serde(rename = "Ornn")] ornn: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Pantheon { #[serde(rename = "type")] pantheon_type: GroupEnum, format: String, version: String, data: PantheonData, } #[derive(Serialize, Deserialize)] pub struct PantheonData { #[serde(rename = "Pantheon")] pantheon: HammerfestPonies, } #[derive(Serialize, Deserialize)] pub struct Poppy { #[serde(rename = "type")] poppy_type: GroupEnum, format: String, version: String, data: PoppyData, } #[derive(Serialize, Deserialize)] pub struct PoppyData { #[serde(rename = "Poppy")] poppy: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Pyke { #[serde(rename = "type")] pyke_type: GroupEnum, format: String, version: String, data: PykeData, } #[derive(Serialize, Deserialize)] pub struct PykeData { #[serde(rename = "Pyke")] pyke: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Qiyana { #[serde(rename = "type")] qiyana_type: GroupEnum, format: String, version: String, data: QiyanaData, } #[derive(Serialize, Deserialize)] pub struct QiyanaData { #[serde(rename = "Qiyana")] qiyana: QiyanaClass, } #[derive(Serialize, Deserialize)] pub struct QiyanaClass { id: FluffyId, key: String, name: String, title: String, image: Image, skins: Vec<Skin>, lore: String, blurb: String, allytips: Vec<Option<serde_json::Value>>, enemytips: Vec<Option<serde_json::Value>>, tags: Vec<Tag>, partype: Partype, info: Info, stats: HashMap<String, f64>, spells: Vec<Spell>, passive: Passive, recommended: Vec<QiyanaRecommended>, } #[derive(Serialize, Deserialize)] pub struct QiyanaRecommended { champion: FluffyId, title: String, map: String, mode: Mode, #[serde(rename = "type")] recommended_type: RecommendedType, #[serde(rename = "customTag")] custom_tag: String, sortrank: i64, #[serde(rename = "extensionPage")] extension_page: bool, #[serde(rename = "useObviousCheckmark")] use_obvious_checkmark: Option<bool>, #[serde(rename = "customPanel")] custom_panel: Option<serde_json::Value>, blocks: Vec<Block>, } #[derive(Serialize, Deserialize)] pub struct Quinn { #[serde(rename = "type")] quinn_type: GroupEnum, format: String, version: String, data: QuinnData, } #[derive(Serialize, Deserialize)] pub struct QuinnData { #[serde(rename = "Quinn")] quinn: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Rakan { #[serde(rename = "type")] rakan_type: GroupEnum, format: String, version: String, data: RakanData, } #[derive(Serialize, Deserialize)] pub struct RakanData { #[serde(rename = "Rakan")] rakan: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Rammus { #[serde(rename = "type")] rammus_type: GroupEnum, format: String, version: String, data: RammusData, } #[derive(Serialize, Deserialize)] pub struct RammusData { #[serde(rename = "Rammus")] rammus: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct RekSai { #[serde(rename = "type")] rek_sai_type: GroupEnum, format: String, version: String, data: RekSaiData, } #[derive(Serialize, Deserialize)] pub struct RekSaiData { #[serde(rename = "RekSai")] rek_sai: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Renekton { #[serde(rename = "type")] renekton_type: GroupEnum, format: String, version: String, data: RenektonData, } #[derive(Serialize, Deserialize)] pub struct RenektonData { #[serde(rename = "Renekton")] renekton: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Rengar { #[serde(rename = "type")] rengar_type: GroupEnum, format: String, version: String, data: RengarData, } #[derive(Serialize, Deserialize)] pub struct RengarData { #[serde(rename = "Rengar")] rengar: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Riven { #[serde(rename = "type")] riven_type: GroupEnum, format: String, version: String, data: RivenData, } #[derive(Serialize, Deserialize)] pub struct RivenData { #[serde(rename = "Riven")] riven: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Rumble { #[serde(rename = "type")] rumble_type: GroupEnum, format: String, version: String, data: RumbleData, } #[derive(Serialize, Deserialize)] pub struct RumbleData { #[serde(rename = "Rumble")] rumble: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Ryze { #[serde(rename = "type")] ryze_type: GroupEnum, format: String, version: String, data: RyzeData, } #[derive(Serialize, Deserialize)] pub struct RyzeData { #[serde(rename = "Ryze")] ryze: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Sejuani { #[serde(rename = "type")] sejuani_type: GroupEnum, format: String, version: String, data: SejuaniData, } #[derive(Serialize, Deserialize)] pub struct SejuaniData { #[serde(rename = "Sejuani")] sejuani: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Senna { #[serde(rename = "type")] senna_type: GroupEnum, format: String, version: String, data: SennaData, } #[derive(Serialize, Deserialize)] pub struct SennaData { #[serde(rename = "Senna")] senna: QiyanaClass, } #[derive(Serialize, Deserialize)] pub struct Shaco { #[serde(rename = "type")] shaco_type: GroupEnum, format: String, version: String, data: ShacoData, } #[derive(Serialize, Deserialize)] pub struct ShacoData { #[serde(rename = "Shaco")] shaco: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Shen { #[serde(rename = "type")] shen_type: GroupEnum, format: String, version: String, data: ShenData, } #[derive(Serialize, Deserialize)] pub struct ShenData { #[serde(rename = "Shen")] shen: HammerfestPonies, } #[derive(Serialize, Deserialize)] pub struct Shyvana { #[serde(rename = "type")] shyvana_type: GroupEnum, format: String, version: String, data: ShyvanaData, } #[derive(Serialize, Deserialize)] pub struct ShyvanaData { #[serde(rename = "Shyvana")] shyvana: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Singed { #[serde(rename = "type")] singed_type: GroupEnum, format: String, version: String, data: SingedData, } #[derive(Serialize, Deserialize)] pub struct SingedData { #[serde(rename = "Singed")] singed: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Sion { #[serde(rename = "type")] sion_type: GroupEnum, format: String, version: String, data: SionData, } #[derive(Serialize, Deserialize)] pub struct SionData { #[serde(rename = "Sion")] sion: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Sivir { #[serde(rename = "type")] sivir_type: GroupEnum, format: String, version: String, data: SivirData, } #[derive(Serialize, Deserialize)] pub struct SivirData { #[serde(rename = "Sivir")] sivir: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Skarner { #[serde(rename = "type")] skarner_type: GroupEnum, format: String, version: String, data: SkarnerData, } #[derive(Serialize, Deserialize)] pub struct SkarnerData { #[serde(rename = "Skarner")] skarner: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Sona { #[serde(rename = "type")] sona_type: GroupEnum, format: String, version: String, data: SonaData, } #[derive(Serialize, Deserialize)] pub struct SonaData { #[serde(rename = "Sona")] sona: HammerfestPonies, } #[derive(Serialize, Deserialize)] pub struct Soraka { #[serde(rename = "type")] soraka_type: GroupEnum, format: String, version: String, data: SorakaData, } #[derive(Serialize, Deserialize)] pub struct SorakaData { #[serde(rename = "Soraka")] soraka: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Swain { #[serde(rename = "type")] swain_type: GroupEnum, format: String, version: String, data: SwainData, } #[derive(Serialize, Deserialize)] pub struct SwainData { #[serde(rename = "Swain")] swain: BrandClass, } #[derive(Serialize, Deserialize)] pub struct Sylas { #[serde(rename = "type")] sylas_type: GroupEnum, format: String, version: String, data: SylasData, } #[derive(Serialize, Deserialize)] pub struct SylasData { #[serde(rename = "Sylas")] sylas: HammerfestPonies, } #[derive(Serialize, Deserialize)] pub struct Syndra { #[serde(rename = "type")] syndra_type: GroupEnum, format: String, version: String, data: SyndraData, } #[derive(Serialize, Deserialize)] pub struct SyndraData { #[serde(rename = "Syndra")] syndra: HammerfestPonies, } #[derive(Serialize, Deserialize)] pub struct TahmKench { #[serde(rename = "type")] tahm_kench_type: GroupEnum, format: String, version: String, data: TahmKenchData, } #[derive(Serialize, Deserialize)] pub struct TahmKenchData { #[serde(rename = "TahmKench")] tahm_kench: HammerfestPonies, } #[derive(Serialize, Deserialize)] pub struct Taliyah { #[serde(rename = "type")] taliyah_type: GroupEnum, format: String, version: String, data: TaliyahData, } #[derive(Serialize, Deserialize)] pub struct TaliyahData { #[serde(rename = "Taliyah")] taliyah: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Talon { #[serde(rename = "type")] talon_type: GroupEnum, format: String, version: String, data: TalonData, } #[derive(Serialize, Deserialize)] pub struct TalonData { #[serde(rename = "Talon")] talon: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Taric { #[serde(rename = "type")] taric_type: GroupEnum, format: String, version: String, data: TaricData, } #[derive(Serialize, Deserialize)] pub struct TaricData { #[serde(rename = "Taric")] taric: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Teemo { #[serde(rename = "type")] teemo_type: GroupEnum, format: String, version: String, data: TeemoData, } #[derive(Serialize, Deserialize)] pub struct TeemoData { #[serde(rename = "Teemo")] teemo: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Thresh { #[serde(rename = "type")] thresh_type: GroupEnum, format: String, version: String, data: ThreshData, } #[derive(Serialize, Deserialize)] pub struct ThreshData { #[serde(rename = "Thresh")] thresh: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Tristana { #[serde(rename = "type")] tristana_type: GroupEnum, format: String, version: String, data: TristanaData, } #[derive(Serialize, Deserialize)] pub struct TristanaData { #[serde(rename = "Tristana")] tristana: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Trundle { #[serde(rename = "type")] trundle_type: GroupEnum, format: String, version: String, data: TrundleData, } #[derive(Serialize, Deserialize)] pub struct TrundleData { #[serde(rename = "Trundle")] trundle: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Tryndamere { #[serde(rename = "type")] tryndamere_type: GroupEnum, format: String, version: String, data: TryndamereData, } #[derive(Serialize, Deserialize)] pub struct TryndamereData { #[serde(rename = "Tryndamere")] tryndamere: HammerfestPonies, } #[derive(Serialize, Deserialize)] pub struct TwistedFate { #[serde(rename = "type")] twisted_fate_type: GroupEnum, format: String, version: String, data: TwistedFateData, } #[derive(Serialize, Deserialize)] pub struct TwistedFateData { #[serde(rename = "TwistedFate")] twisted_fate: BrandClass, } #[derive(Serialize, Deserialize)] pub struct Twitch { #[serde(rename = "type")] twitch_type: GroupEnum, format: String, version: String, data: TwitchData, } #[derive(Serialize, Deserialize)] pub struct TwitchData { #[serde(rename = "Twitch")] twitch: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Udyr { #[serde(rename = "type")] udyr_type: GroupEnum, format: String, version: String, data: UdyrData, } #[derive(Serialize, Deserialize)] pub struct UdyrData { #[serde(rename = "Udyr")] udyr: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Urgot { #[serde(rename = "type")] urgot_type: GroupEnum, format: String, version: String, data: UrgotData, } #[derive(Serialize, Deserialize)] pub struct UrgotData { #[serde(rename = "Urgot")] urgot: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Varus { #[serde(rename = "type")] varus_type: GroupEnum, format: String, version: String, data: VarusData, } #[derive(Serialize, Deserialize)] pub struct VarusData { #[serde(rename = "Varus")] varus: HammerfestPonies, } #[derive(Serialize, Deserialize)] pub struct Vayne { #[serde(rename = "type")] vayne_type: GroupEnum, format: String, version: String, data: VayneData, } #[derive(Serialize, Deserialize)] pub struct VayneData { #[serde(rename = "Vayne")] vayne: HammerfestPonies, } #[derive(Serialize, Deserialize)] pub struct Veigar { #[serde(rename = "type")] veigar_type: GroupEnum, format: String, version: String, data: VeigarData, } #[derive(Serialize, Deserialize)] pub struct VeigarData { #[serde(rename = "Veigar")] veigar: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Velkoz { #[serde(rename = "type")] velkoz_type: GroupEnum, format: String, version: String, data: VelkozData, } #[derive(Serialize, Deserialize)] pub struct VelkozData { #[serde(rename = "Velkoz")] velkoz: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Vi { #[serde(rename = "type")] vi_type: GroupEnum, format: String, version: String, data: ViData, } #[derive(Serialize, Deserialize)] pub struct ViData { #[serde(rename = "Vi")] vi: HammerfestPonies, } #[derive(Serialize, Deserialize)] pub struct Viktor { #[serde(rename = "type")] viktor_type: GroupEnum, format: String, version: String, data: ViktorData, } #[derive(Serialize, Deserialize)] pub struct ViktorData { #[serde(rename = "Viktor")] viktor: HammerfestPonies, } #[derive(Serialize, Deserialize)] pub struct Vladimir { #[serde(rename = "type")] vladimir_type: GroupEnum, format: String, version: String, data: VladimirData, } #[derive(Serialize, Deserialize)] pub struct VladimirData { #[serde(rename = "Vladimir")] vladimir: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Volibear { #[serde(rename = "type")] volibear_type: GroupEnum, format: String, version: String, data: VolibearData, } #[derive(Serialize, Deserialize)] pub struct VolibearData { #[serde(rename = "Volibear")] volibear: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Warwick { #[serde(rename = "type")] warwick_type: GroupEnum, format: String, version: String, data: WarwickData, } #[derive(Serialize, Deserialize)] pub struct WarwickData { #[serde(rename = "Warwick")] warwick: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Xayah { #[serde(rename = "type")] xayah_type: GroupEnum, format: String, version: String, data: XayahData, } #[derive(Serialize, Deserialize)] pub struct XayahData { #[serde(rename = "Xayah")] xayah: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Xerath { #[serde(rename = "type")] xerath_type: GroupEnum, format: String, version: String, data: XerathData, } #[derive(Serialize, Deserialize)] pub struct XerathData { #[serde(rename = "Xerath")] xerath: PurpleAnivia, } #[derive(Serialize, Deserialize)] pub struct XinZhao { #[serde(rename = "type")] xin_zhao_type: GroupEnum, format: String, version: String, data: XinZhaoData, } #[derive(Serialize, Deserialize)] pub struct XinZhaoData { #[serde(rename = "XinZhao")] xin_zhao: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Yasuo { #[serde(rename = "type")] yasuo_type: GroupEnum, format: String, version: String, data: YasuoData, } #[derive(Serialize, Deserialize)] pub struct YasuoData { #[serde(rename = "Yasuo")] yasuo: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Yorick { #[serde(rename = "type")] yorick_type: GroupEnum, format: String, version: String, data: YorickData, } #[derive(Serialize, Deserialize)] pub struct YorickData { #[serde(rename = "Yorick")] yorick: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Yuumi { #[serde(rename = "type")] yuumi_type: GroupEnum, format: String, version: String, data: YuumiData, } #[derive(Serialize, Deserialize)] pub struct YuumiData { #[serde(rename = "Yuumi")] yuumi: QiyanaClass, } #[derive(Serialize, Deserialize)] pub struct Zac { #[serde(rename = "type")] zac_type: GroupEnum, format: String, version: String, data: ZacData, } #[derive(Serialize, Deserialize)] pub struct ZacData { #[serde(rename = "Zac")] zac: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Zed { #[serde(rename = "type")] zed_type: GroupEnum, format: String, version: String, data: ZedData, } #[derive(Serialize, Deserialize)] pub struct ZedData { #[serde(rename = "Zed")] zed: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Ziggs { #[serde(rename = "type")] ziggs_type: GroupEnum, format: String, version: String, data: ZiggsData, } #[derive(Serialize, Deserialize)] pub struct ZiggsData { #[serde(rename = "Ziggs")] ziggs: PuneHedgehog, } #[derive(Serialize, Deserialize)] pub struct Zilean { #[serde(rename = "type")] zilean_type: GroupEnum, format: String, version: String, data: ZileanData, } #[derive(Serialize, Deserialize)] pub struct ZileanData { #[serde(rename = "Zilean")] zilean: PurpleAnivia, } #[derive(Serialize, Deserialize)] pub struct Zoe { #[serde(rename = "type")] zoe_type: GroupEnum, format: String, version: String, data: ZoeData, } #[derive(Serialize, Deserialize)] pub struct ZoeData { #[serde(rename = "Zoe")] zoe: ZoeClass, } #[derive(Serialize, Deserialize)] pub struct ZoeClass { id: FluffyId, key: String, name: String, title: String, image: Image, skins: Vec<Skin>, lore: String, blurb: String, allytips: Vec<String>, enemytips: Vec<String>, tags: Vec<Tag>, partype: Partype, info: Info, stats: HashMap<String, f64>, spells: Vec<Spell>, passive: Passive, recommended: Vec<QiyanaRecommended>, } #[derive(Serialize, Deserialize)] pub struct Zyra { #[serde(rename = "type")] zyra_type: GroupEnum, format: String, version: String, data: ZyraData, } #[derive(Serialize, Deserialize)] pub struct ZyraData { #[serde(rename = "Zyra")] zyra: HammerfestPonies, } #[derive(Serialize, Deserialize)] #[serde(untagged)] pub enum Coeff { Double(f64), DoubleArray(Vec<f64>), } #[derive(Serialize, Deserialize)] pub enum GroupEnum { #[serde(rename = "champion")] Champion, #[serde(rename = "passive")] Passive, #[serde(rename = "spell")] Spell, } #[derive(Serialize, Deserialize)] pub enum Sprite { #[serde(rename = "champion0.png")] Champion0Png, #[serde(rename = "champion1.png")] Champion1Png, #[serde(rename = "champion2.png")] Champion2Png, #[serde(rename = "champion3.png")] Champion3Png, #[serde(rename = "champion4.png")] Champion4Png, #[serde(rename = "passive0.png")] Passive0Png, #[serde(rename = "passive1.png")] Passive1Png, #[serde(rename = "passive2.png")] Passive2Png, #[serde(rename = "passive3.png")] Passive3Png, #[serde(rename = "passive4.png")] Passive4Png, #[serde(rename = "spell0.png")] Spell0Png, #[serde(rename = "spell10.png")] Spell10Png, #[serde(rename = "spell11.png")] Spell11Png, #[serde(rename = "spell12.png")] Spell12Png, #[serde(rename = "spell13.png")] Spell13Png, #[serde(rename = "spell14.png")] Spell14Png, #[serde(rename = "spell1.png")] Spell1Png, #[serde(rename = "spell2.png")] Spell2Png, #[serde(rename = "spell3.png")] Spell3Png, #[serde(rename = "spell4.png")] Spell4Png, #[serde(rename = "spell5.png")] Spell5Png, #[serde(rename = "spell6.png")] Spell6Png, #[serde(rename = "spell7.png")] Spell7Png, #[serde(rename = "spell8.png")] Spell8Png, #[serde(rename = "spell9.png")] Spell9Png, } #[derive(Serialize, Deserialize)] pub enum BlockType { #[serde(rename = "ability_scaling")] AbilityScaling, #[serde(rename = "aggressive")] Aggressive, #[serde(rename = "agressive")] Agressive, #[serde(rename = "beginner_advanced")] BeginnerAdvanced, #[serde(rename = "beginner_legendary")] BeginnerLegendary, #[serde(rename = "beginner_LegendaryItem")] BeginnerLegendaryItem, #[serde(rename = "beginner_legendaryitem")] BeginnerLegendaryitem, #[serde(rename = "beginner_MoreLegendaryItems")] BeginnerMoreLegendaryItems, #[serde(rename = "beginner_morelegendaryitems")] BeginnerMorelegendaryitems, #[serde(rename = "beginner_MovementSpeed")] BeginnerMovementSpeed, #[serde(rename = "beginner_movementspeed")] BeginnerMovementspeed, #[serde(rename = "beginner_movespeed")] BeginnerMovespeed, #[serde(rename = "beginner_starter")] BeginnerStarter, #[serde(rename = "champspecific")] Champspecific, #[serde(rename = "consumable")] Consumable, #[serde(rename = "consumables")] Consumables, #[serde(rename = "consumablesjungle")] Consumablesjungle, #[serde(rename = "defensive")] Defensive, #[serde(rename = "defensivejungle")] Defensivejungle, #[serde(rename = "early")] Early, #[serde(rename = "earlyjungle")] Earlyjungle, #[serde(rename = "")] Empty, #[serde(rename = "essential")] Essential, #[serde(rename = "essentialjungle")] Essentialjungle, KingPoroSnax, #[serde(rename = "kingporosnax")] Kingporosnax, #[serde(rename = "npe1")] Npe1, #[serde(rename = "npe2")] Npe2, #[serde(rename = "npe3")] Npe3, #[serde(rename = "npe4")] Npe4, #[serde(rename = "odyjinx1")] Odyjinx1, #[serde(rename = "odyjinx2")] Odyjinx2, #[serde(rename = "odyjinx3")] Odyjinx3, #[serde(rename = "odymalphite1")] Odymalphite1, #[serde(rename = "odymalphite2")] Odymalphite2, #[serde(rename = "odymalphite3")] Odymalphite3, #[serde(rename = "odysona1")] Odysona1, #[serde(rename = "odysona2")] Odysona2, #[serde(rename = "odysona3")] Odysona3, #[serde(rename = "odyyasuo1")] Odyyasuo1, #[serde(rename = "odyyasuo2")] Odyyasuo2, #[serde(rename = "odyyasuo3")] Odyyasuo3, #[serde(rename = "odyziggs1")] Odyziggs1, #[serde(rename = "odyziggs2")] Odyziggs2, #[serde(rename = "odyziggs3")] Odyziggs3, #[serde(rename = "offensive")] Offensive, #[serde(rename = "offmeta")] Offmeta, #[serde(rename = "ornnupgrades")] Ornnupgrades, #[serde(rename = "protective")] Protective, #[serde(rename = "recommended")] Recommended, #[serde(rename = "selective")] Selective, #[serde(rename = "siegeDefense")] SiegeDefense, #[serde(rename = "siegeOffense")] SiegeOffense, #[serde(rename = "siegedefense")] Siegedefense, #[serde(rename = "siegeoffense")] Siegeoffense, #[serde(rename = "situational")] Situational, #[serde(rename = "situationaljungle")] Situationaljungle, #[serde(rename = "standard")] Standard, #[serde(rename = "standardjungle")] Standardjungle, #[serde(rename = "starting")] Starting, #[serde(rename = "startingjungle")] Startingjungle, #[serde(rename = "support")] Support, #[serde(rename = "1)buystarteritems")] The1Buystarteritems, #[serde(rename = "beginner_Advanced")] TypeBeginnerAdvanced, #[serde(rename = "beginner_Starter")] TypeBeginnerStarter, } #[derive(Serialize, Deserialize)] pub enum Of { #[serde(rename = "")] Empty, #[serde(rename = "kaynass")] Kaynass, #[serde(rename = "kaynslay")] Kaynslay, } #[derive(Serialize, Deserialize)] pub enum IfSummonerSpell { #[serde(rename = "")] Empty, ItemSmiteAoE, ItemTeleportCancel, OdinTrinketRevive, #[serde(rename = "S5_SummonerSmiteDuel")] S5SummonerSmiteDuel, #[serde(rename = "S5_SummonerSmitePlayerGanker")] S5SummonerSmitePlayerGanker, #[serde(rename = "S5_SummonerSmiteQuick")] S5SummonerSmiteQuick, SummonerOdinPromote, SummonerPoroRecall, SummonerPoroThrow, SummonerReturn, SummonerSiegeChampSelect2, SummonerSmite, #[serde(rename = "SummonerSnowURFSnowball_Mark")] SummonerSnowUrfSnowballMark, SummonerSnowball, SummonerTeleport, TeleportCancel, } #[derive(Serialize, Deserialize)] pub enum Map { #[serde(rename = "any")] Any, CityPark, CrystalScar, #[serde(rename = "HA")] Ha, Odyssey, ProjectSlums, #[serde(rename = "SL")] Sl, #[serde(rename = "SR")] Sr, #[serde(rename = "TT")] Tt, } #[derive(Serialize, Deserialize)] pub enum Mode { #[serde(rename = "any")] Any, #[serde(rename = "ARAM")] Aram, #[serde(rename = "ASCENSION")] Ascension, #[serde(rename = "CLASSIC")] Classic, #[serde(rename = "FIRSTBLOOD")] Firstblood, #[serde(rename = "GAMEMODEX")] Gamemodex, #[serde(rename = "INTRO")] Intro, #[serde(rename = "KINGPORO")] Kingporo, #[serde(rename = "ODIN")] Odin, #[serde(rename = "ODYSSEY")] Odyssey, #[serde(rename = "PROJECT")] Project, #[serde(rename = "SIEGE")] Siege, #[serde(rename = "STARGUARDIAN")] Starguardian, #[serde(rename = "TUTORIAL")] Tutorial, #[serde(rename = "TUTORIAL_MODULE_2")] TutorialModule2, #[serde(rename = "TUTORIAL_MODULE_3")] TutorialModule3, } #[derive(Serialize, Deserialize)] pub enum RecommendedType { #[serde(rename = "riot")] Riot, #[serde(rename = "riot-mid")] RiotMid, #[serde(rename = "riot-support")] RiotSupport, } #[derive(Serialize, Deserialize)] pub enum CostType { #[serde(rename = " {{ abilityresourcename }}")] Abilityresourcename, #[serde(rename = " 魔力、所有充能")] Ambitious, #[serde(rename = " 個炮臺儲存數 & {{ cost }} 魔力")] Cost, #[serde(rename = " 能量")] CostType, #[serde(rename = "1 顆種子")] CostType1, #[serde(rename = "% 最大生命、{{ cost }} 魔力")] CostTypeCost, #[serde(rename = "被動")] Cunning, #[serde(rename = "無消耗")] Empty, #[serde(rename = " 生命")] Fluffy, #[serde(rename = "% 最大生命")] Frisky, #[serde(rename = " 點憤怒值 / 每秒")] Hilarious, #[serde(rename = " 熱能")] Indecent, #[serde(rename = "消耗 生命")] Indigo, #[serde(rename = "% 目前生命")] Magenta, #[serde(rename = "&nbsp;")] Nbsp, #[serde(rename = " 魔力")] Purple, #[serde(rename = "")] Sticky, #[serde(rename = "% 當前生命")] Tentacled, #[serde(rename = "產生 1 點殘虐值")] The1, } #[derive(Serialize, Deserialize)] pub enum Key { #[serde(rename = "a1")] A1, #[serde(rename = "a2")] A2, #[serde(rename = "f1")] F1, #[serde(rename = "f2")] F2, #[serde(rename = "f3")] F3, #[serde(rename = "f4")] F4, } #[derive(Serialize, Deserialize)] pub enum Link { #[serde(rename = "armor")] Armor, #[serde(rename = "attackdamage")] Attackdamage, #[serde(rename = "bonusarmor")] Bonusarmor, #[serde(rename = "bonusattackdamage")] Bonusattackdamage, #[serde(rename = "bonushealth")] Bonushealth, #[serde(rename = "bonusspellblock")] Bonusspellblock, #[serde(rename = "health")] Health, #[serde(rename = "@special.BraumWArmor")] SpecialBraumWArmor, #[serde(rename = "@special.BraumWMR")] SpecialBraumWmr, #[serde(rename = "@special.jaxrarmor")] SpecialJaxrarmor, #[serde(rename = "@special.jaxrmr")] SpecialJaxrmr, #[serde(rename = "@special.nautilusq")] SpecialNautilusq, #[serde(rename = "@special.viw")] SpecialViw, #[serde(rename = "spelldamage")] Spelldamage, #[serde(rename = "@stacks")] Stacks, #[serde(rename = "@text")] Text, } #[derive(Serialize, Deserialize)] pub enum Tag { Assassin, Fighter, Mage, Marksman, Support, Tank, } #[derive(Serialize, Deserialize)] pub enum Partype { #[serde(rename = "魔力")] Empty, #[serde(rename = "能量")] Partype, #[serde(rename = "怒氣")] Purple, } #[derive(Serialize, Deserialize)] pub enum PurpleId { Anivia, Annie, AurelionSol, Cassiopeia, Kassadin, Maokai, Orianna, Xerath, Zilean, } #[derive(Serialize, Deserialize)] pub enum BrandId { Brand, Swain, TwistedFate, } #[derive(Serialize, Deserialize)] pub enum FluffyId { Qiyana, Senna, Yuumi, Zoe, }
20.853738
53
0.646481
e58b1e2936e08d955fb62ce1234bfae645af2528
27,258
use std::{cmp, f64::consts, mem}; use ieee754::Ieee754; use noisy_float::types::n64; use probability::distribution::{Inverse, Laplace}; #[cfg(not(feature="use-mpfr"))] use probability::prelude::Gaussian; #[cfg(feature="use-mpfr")] use rug::{Float, rand::{ThreadRandGen, ThreadRandState}}; use smartnoise_validator::components::snapping_mechanism::{compute_precision, get_smallest_greater_or_eq_power_of_two, redefine_epsilon}; use smartnoise_validator::errors::*; use smartnoise_validator::Integer; use crate::utilities; use crate::utilities::get_closest_multiple_of_lambda; // Give MPFR ability to draw randomness from OpenSSL #[cfg(feature="use-mpfr")] struct GeneratorOpenSSL; #[cfg(feature="use-mpfr")] impl ThreadRandGen for GeneratorOpenSSL { fn gen(&mut self) -> u32 { let mut buffer = [0u8; 4]; // impossible not to panic here // cannot ignore errors with .ok(), because the buffer will remain 0 utilities::fill_bytes(&mut buffer).unwrap(); u32::from_ne_bytes(buffer) } } /// Return sample from a censored Geometric distribution with parameter p=0.5 without calling to sample_bit_prob. /// /// The algorithm generates 1023 bits uniformly at random and returns the /// index of the first bit with value 1. If all 1023 bits are 0, then /// the algorithm acts as if the last bit was a 1 and returns 1022. /// /// This is a less general version of the sample_geometric_censored function, designed to be used /// only inside of the sample_bit_prob function. The major difference is that this function does not /// call sample_bit_prob itself (whereas sample_geometric_censored does), so having this more specialized /// version allows us to avoid an infinite dependence loop. pub fn censored_specific_geom(enforce_constant_time: bool) -> Result<i16> { Ok(if enforce_constant_time { let mut buffer = vec!(0_u8; 128); utilities::fill_bytes(&mut buffer)?; cmp::min(buffer.into_iter().enumerate() // ignore samples that contain no events .filter(|(_, sample)| sample > &0) // compute the index of the smallest event in the batch .map(|(i, sample)| 8 * i + sample.leading_zeros() as usize) // retrieve the smallest index .min() // return 1022 if no events occurred (slight dp violation w.p. ~2^-52) .unwrap_or(1022) as i16, 1022) } else { // retrieve up to 128 bytes, each containing 8 trials for i in 0..128 { let mut buffer = vec!(0_u8; 1); utilities::fill_bytes(&mut buffer)?; if buffer[0] > 0 { return Ok(cmp::min(i * 8 + buffer[0].leading_zeros() as i16, 1022)) } } 1022 }) } /// Sample a single bit with arbitrary probability of success /// /// Uses only an unbiased source of coin flips. /// The strategy for doing this with 2 flips in expectation is described [here](https://amakelov.wordpress.com/2013/10/10/arbitrarily-biasing-a-coin-in-2-expected-tosses/). /// /// # Arguments /// * `prob`- The desired probability of success (bit = 1). /// * `enforce_constant_time` - Whether or not to enforce the algorithm to run in constant time /// /// # Return /// A bit that is 1 with probability "prob" /// /// # Examples /// /// ``` /// // returns a bit with Pr(bit = 1) = 0.7 /// use smartnoise_runtime::utilities::noise::sample_bit_prob; /// let n = sample_bit_prob(0.7, false); /// # n.unwrap(); /// ``` /// ```should_panic /// // fails because 1.3 not a valid probability /// use smartnoise_runtime::utilities::noise::sample_bit_prob; /// let n = sample_bit_prob(1.3, false); /// # n.unwrap(); /// ``` /// ```should_panic /// // fails because -0.3 is not a valid probability /// use smartnoise_runtime::utilities::noise::sample_bit_prob; /// let n = sample_bit_prob(-0.3, false); /// # n.unwrap(); /// ``` pub fn sample_bit_prob(prob: f64, enforce_constant_time: bool) -> Result<bool> { // ensure that prob is a valid probability if prob < 0.0 || prob > 1.0 {return Err("probability is not within [0, 1]".into())} // decompose probability into mantissa and exponent integers to quickly identify the value in the first_heads_index let (_sign, exponent, mantissa) = prob.decompose_raw(); // repeatedly flip fair coin (up to 1023 times) and identify index (0-based) of first heads let first_heads_index = censored_specific_geom(enforce_constant_time)?; // if prob == 1., return after retrieving censored_specific_geom, to protect constant time if exponent == 1023 { return Ok(true) } // number of leading zeros in binary representation of prob // cast is non-saturating because exponent only uses first 11 bits // exponent is bounded within [0, 1022] by check for valid probability let num_leading_zeros = 1022_i16 - exponent as i16; // 0 is the most significant/leftmost implicit bit in the mantissa/fraction/significand // 52 is the least significant/rightmost Ok(match first_heads_index - num_leading_zeros { // index into the leading zeros of the binary representation i if i < 0 => false, // bit index 0 is implicitly set in ieee-754 when the exponent is nonzero i if i == 0 => exponent != 0, // all other digits out-of-bounds are not float-approximated/are-implicitly-zero i if i > 52 => false, // retrieve the bit at `i` slots shifted from the left i => mantissa & (1_u64 << (52 - i as usize)) != 0 }) } /// Sample from the binomial distribution. /// /// # Arguments /// * `n` - Number of trials /// * `prob`- The desired probability of success (bit = 1). /// * `enforce_constant_time` - Whether or not to enforce the algorithm to run in constant time /// /// # Return /// Number of successful trials pub fn sample_binomial(n: i64, prob: f64, enforce_constant_time: bool) -> Result<i64> { (0..n).try_fold(0, |sum, _| sample_bit_prob(prob, enforce_constant_time) .map(|v| sum + if v {1} else {0})) } #[cfg(test)] mod test_sample_bit_prob { use ieee754::Ieee754; use itertools::Itertools; use crate::utilities::noise::{sample_bit_prob, sample_uniform}; fn check_bit_vs_string_equal(value: f64) { let (_sign, _exponent, mut mantissa) = value.decompose_raw(); let mantissa_string = format!("1{:052b}", mantissa); // add implicit 1 to mantissa let mantissa_vec: Vec<i64> = mantissa_string.chars() .map(|x| x.to_digit(2).unwrap() as i64).collect(); let to_str = |v| if v {"1"} else {"0"}; let vec_bits = (0..mantissa_string.len()) .map(|idx| mantissa_vec[idx] != 0) .map(to_str).join(""); // set the implicit 1 mantissa |= 1u64 << 52; let log_bits = (0..mantissa_string.len()) .map(|idx| mantissa & (1u64 << (52 - idx)) != 0u64) .map(to_str).join(""); // println!("vec_bits: {:?}", vec_bits); // println!("log_bits: {:?}", log_bits); assert_eq!(vec_bits, log_bits); } #[test] fn random_bit_vs_string() { for _ in 0..1000 { let prob = sample_uniform(0., 1., false).unwrap(); check_bit_vs_string_equal(prob) } } #[test] fn sample_bit_prob_random() { let trials = 10_000; (0..=100) .map(|i| 0.01 * i as f64) .map(|prob| (prob, (0..trials) .fold(1, |sum, _| sum + sample_bit_prob(prob, false).unwrap() as i32) as f64 / trials as f64)) .map(|(prob, actual)| (prob, actual - prob)) .filter(|(_, bias)| bias.abs() > 0.01) .for_each(|(prob, bias)| println!("expected: {:?}, bias: {:?}", prob, bias)); } #[test] fn sample_bit_prob_edge() { for _ in 0..10_000 { assert!(!sample_bit_prob(0., false).unwrap()); assert!(sample_bit_prob(1., false).unwrap()); } } #[test] fn edge_cases_bit_vs_string() { check_bit_vs_string_equal(0.); check_bit_vs_string_equal(1.); check_bit_vs_string_equal(f64::MAX); check_bit_vs_string_equal(f64::MIN) } } pub fn sample_bit() -> Result<bool> { let mut buffer = [0u8; 1]; utilities::fill_bytes(&mut buffer)?; Ok(buffer[0] & 1 == 1) } #[cfg(test)] mod test_sample_bit { use crate::utilities::noise::sample_bit; #[test] fn test_sample_bit() { (0..100).for_each(|_| { dbg!(sample_bit().unwrap()); }); } } /// Sample from uniform integers between min and max (inclusive). /// /// # Arguments /// /// * `min` - &i64, minimum value of distribution to sample from /// * `max` - &i64, maximum value of distribution to sample from /// /// # Return /// Random uniform variable between min and max (inclusive). /// /// # Example /// /// ``` /// // returns a uniform draw from the set {0,1,2} /// use smartnoise_runtime::utilities::noise::sample_uniform_int; /// let n = sample_uniform_int(0, 2).unwrap(); /// assert!(n == 0 || n == 1 || n == 2); /// ``` /// /// ```should_panic /// // fails because min > max /// use smartnoise_runtime::utilities::noise::sample_uniform_int; /// let n = sample_uniform_int(2, 0); /// # n.unwrap(); /// ``` pub fn sample_uniform_int(min: Integer, max: Integer) -> Result<Integer> { if min > max {return Err("min may not be greater than max".into());} // define number of possible integers we could sample and the maximum // number of bits it would take to represent them let n_ints: Integer = max - min + 1; let n_bytes = ((n_ints as f64).log2()).ceil() as usize / 8 + 1; // uniformly sample integers from the set {0, 1, ..., n_ints-1} // by filling the first n_bytes of a buffer with noise, // interpreting the buffer as an i64, // and rejecting integers that are too large let mut buffer = [0u8; mem::size_of::<Integer>()]; loop { utilities::fill_bytes(&mut buffer[..n_bytes])?; let uniform_int = i64::from_le_bytes(buffer); if uniform_int < n_ints { return Ok(uniform_int + min) } } } #[cfg(test)] mod test_sample_uniform_int { use crate::utilities::noise::sample_uniform_int; #[test] fn test_sample_bit() { (0..1_000).for_each(|_| { println!("{:?}", sample_uniform_int(0, 100).unwrap()); }); } } /// Returns random sample from Uniform[min,max). /// /// All notes below refer to the version that samples from [0,1), before the final scaling takes place. /// /// This algorithm is taken from [Mironov (2012)](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.366.5957&rep=rep1&type=pdf) /// and is important for making some of the guarantees in the paper. /// /// The idea behind the uniform sampling is to first sample a "precision band". /// Each band is a range of floating point numbers with the same level of arithmetic precision /// and is situated between powers of two. /// A band is sampled with probability relative to the unit of least precision using the Geometric distribution. /// That is, the uniform sampler will generate the band [1/2,1) with probability 1/2, [1/4,1/2) with probability 1/4, /// and so on. /// /// Once the precision band has been selected, floating numbers numbers are generated uniformly within the band /// by generating a 52-bit mantissa uniformly at random. /// /// # Arguments /// /// `min`: f64 minimum of uniform distribution (inclusive) /// `max`: f64 maximum of uniform distribution (non-inclusive) /// /// # Return /// Random draw from Unif[min, max). /// /// # Example /// ``` /// // valid draw from Unif[0,2) /// use smartnoise_runtime::utilities::noise::sample_uniform; /// let unif = sample_uniform(0.0, 2.0, false); /// # unif.unwrap(); /// ``` /// ``` should_panic /// // fails because min > max /// use smartnoise_runtime::utilities::noise::sample_uniform; /// let unif = sample_uniform(2.0, 0.0, false); /// # unif.unwrap(); /// ``` pub fn sample_uniform(min: f64, max: f64, enforce_constant_time: bool) -> Result<f64> { if min > max {return Err("min may not be greater than max".into());} // Generate mantissa let mut mantissa_buffer = [0u8; 8]; // mantissa bit index zero is implicit utilities::fill_bytes(&mut mantissa_buffer[1..])?; // limit the buffer to 52 bits mantissa_buffer[1] %= 16; // convert mantissa to integer let mantissa_int = u64::from_be_bytes(mantissa_buffer); // Generate exponent. A saturated mantissa with implicit bit is ~2 let exponent: i16 = -(1 + censored_specific_geom(enforce_constant_time)?); // Generate uniform random number from [0,1) let uniform_rand = f64::recompose(false, exponent, mantissa_int); Ok(uniform_rand * (max - min) + min) } #[cfg(test)] mod test_uniform { use crate::utilities::noise::sample_uniform; #[test] fn test_uniform() { // (1..=100).for_each(|idx| println!("{:?}", (1. / 100. * idx as f64).decompose())); // println!("{:?}", 1.0f64.decompose()); let min = 0.; let max = 1.; if !(0..1000).all(|_| { let sample = sample_uniform(min, max, false).unwrap(); let within = min <= sample && max >= sample; if !within { println!("value outside of range: {:?}", sample); } within }) { panic!("not all numbers are within the range") } } #[test] fn test_endian() { use ieee754::Ieee754; let old_mantissa = 0.192f64.decompose().2; let mut buffer = old_mantissa.to_be_bytes(); // from str_radix ignores these extra bits, but reconstruction from_be_bytes uses them buffer[1] = buffer[1] + 32; println!("{:?}", buffer); let new_buffer = buffer.iter() .map(|v| format!("{:08b}", v)) .collect::<Vec<String>>(); println!("{:?}", new_buffer); let new_mantissa = u64::from_str_radix(&new_buffer.concat(), 2).unwrap(); println!("{:?} {:?}", old_mantissa, new_mantissa); let int_bytes = 12i64.to_le_bytes(); println!("{:?}", int_bytes); } } /// Returns random sample from Uniform[min,max) using the MPFR library. /// /// If [min, max) == [0, 1),then this is done in a way that respects exact rounding. /// Otherwise, the return will be the result of a composition of two operations that /// respect exact rounding (though the result will not necessarily). /// /// # Arguments /// * `min` - Lower bound of uniform distribution. /// * `max` - Upper bound of uniform distribution. /// /// # Return /// Draw from Unif[min, max]. /// /// # Example /// ``` /// use smartnoise_runtime::utilities::noise::sample_uniform_mpfr; /// let unif = sample_uniform_mpfr(0.0, 1.0); /// # unif.unwrap(); /// ``` #[cfg(feature = "use-mpfr")] pub fn sample_uniform_mpfr(min: f64, max: f64) -> Result<rug::Float> { // initialize 64-bit floats within mpfr/rug let mpfr_min = Float::with_val(53, min); let mpfr_max = Float::with_val(53, max); let mpfr_diff = Float::with_val(53, &mpfr_max - &mpfr_min); // initialize randomness let mut rng = GeneratorOpenSSL {}; let mut state = ThreadRandState::new_custom(&mut rng); // generate Unif[0,1] according to mpfr standard, then convert to correct scale let mut unif = Float::with_val(53, Float::random_cont(&mut state)); unif = unif.mul_add(&mpfr_diff, &mpfr_min); // return uniform Ok(unif) } /// Sample from Laplace distribution centered at shift and scaled by scale. /// /// # Arguments /// /// * `shift` - The expectation of the Laplace distribution. /// * `scale` - The scaling parameter of the Laplace distribution. /// /// # Return /// Draw from Laplace(shift, scale). /// /// # Example /// ``` /// use smartnoise_runtime::utilities::noise::sample_laplace; /// let n = sample_laplace(0.0, 2.0, false); /// # n.unwrap(); /// ``` pub fn sample_laplace(shift: f64, scale: f64, enforce_constant_time: bool) -> Result<f64> { let probability: f64 = sample_uniform(0., 1., enforce_constant_time)?; Ok(Laplace::new(shift, scale).inverse(probability)) } /// Sample from Gaussian distribution centered at shift and scaled by scale. /// /// # Arguments /// /// * `shift` - The expectation of the Gaussian distribution. /// * `scale` - The scaling parameter (standard deviation) of the Gaussian distribution. /// /// # Return /// A draw from Gaussian(shift, scale). /// /// # Example /// ``` /// use smartnoise_runtime::utilities::noise::sample_gaussian; /// let n = sample_gaussian(0.0, 2.0, false); /// # n.unwrap(); /// ``` #[cfg(not(feature = "use-mpfr"))] pub fn sample_gaussian(shift: f64, scale: f64, enforce_constant_time: bool) -> Result<f64> { let probability: f64 = sample_uniform(0., 1., enforce_constant_time)?; Ok(Gaussian::new(shift, scale).inverse(probability)) } /// Generates a draw from a Gaussian(loc, scale) distribution using the MPFR library. /// /// If shift = 0 and scale = 1, sampling is done in a way that respects exact rounding. /// Otherwise, the return will be the result of a composition of two operations that /// respect exact rounding (though the result will not necessarily). /// /// # Arguments /// * `shift` - The expectation of the Gaussian distribution. /// * `scale` - The scaling parameter (standard deviation) of the Gaussian distribution. /// /// # Return /// Draw from Gaussian(loc, scale) /// /// # Example /// ``` /// use smartnoise_runtime::utilities::noise::sample_gaussian; /// let gaussian = sample_gaussian(0.0, 1.0, false); /// ``` #[cfg(feature = "use-mpfr")] pub fn sample_gaussian(shift: f64, scale: f64, _enforce_constant_time: bool) -> Result<f64> { // initialize 64-bit floats within mpfr/rug // NOTE: We square the scale here because we ask for the standard deviation as the function input, but // the mpfr library wants the variance. We ask for std. dev. to be consistent with the rest of the library. let mpfr_shift = Float::with_val(53, shift); let mpfr_scale = Float::with_val(53, Float::with_val(53, scale).square()); // initialize randomness let mut rng = GeneratorOpenSSL {}; let mut state = ThreadRandState::new_custom(&mut rng); // generate Gaussian(0,1) according to mpfr standard, then convert to correct scale let gauss = Float::with_val(64, Float::random_normal(&mut state)); Ok(gauss.mul_add(&mpfr_scale, &mpfr_shift).to_f64()) } /// Sample from truncated Gaussian distribution. /// /// This function uses a rejection sampling approach. /// This means that values outside of the truncation bounds are ignored, rather /// than pushed to the bounds (as they would be for a censored distribution). /// /// # Arguments /// /// * `shift` - The expectation of the untruncated Gaussian distribution. /// * `scale` - The scaling parameter (standard deviation) of the untruncated Gaussian distribution. /// * `min` - The minimum value you want to allow to be sampled. /// * `max` - The maximum value you want to allow to be sampled. /// /// # Return /// A draw from a Gaussian(shift, scale) truncated to [min, max]. /// /// # Example /// ``` /// use smartnoise_runtime::utilities::noise::sample_gaussian_truncated; /// let n= sample_gaussian_truncated(0.0, 1.0, 0.0, 2.0, false); /// # n.unwrap(); /// ``` pub fn sample_gaussian_truncated( min: f64, max: f64, shift: f64, scale: f64, enforce_constant_time: bool ) -> Result<f64> { if min > max {return Err("lower may not be greater than upper".into());} if scale <= 0.0 {return Err("scale must be greater than zero".into());} // return draw from distribution only if it is in correct range loop { let trunc_gauss = sample_gaussian(shift, scale, enforce_constant_time)?; if trunc_gauss >= min && trunc_gauss <= max { return Ok(trunc_gauss) } } } /// Sample from the censored geometric distribution with parameter "prob" and maximum /// number of trials "max_trials". /// /// # Arguments /// * `prob` - Parameter for the geometric distribution, the probability of success on any given trials. /// * `max_trials` - The maximum number of trials allowed. /// * `enforce_constant_time` - Whether or not to enforce the algorithm to run in constant time; if true, /// it will always run for "max_trials" trials. /// /// # Return /// A draw from the censored geometric distribution. /// /// # Example /// ``` /// use smartnoise_runtime::utilities::noise::sample_geometric_censored; /// let geom = sample_geometric_censored(0.1, 20, false); /// # geom.unwrap(); /// ``` pub fn sample_geometric_censored(prob: f64, max_trials: i64, enforce_constant_time: bool) -> Result<i64> { // ensure that prob is a valid probability if prob < 0.0 || prob > 1.0 {return Err("probability is not within [0, 1]".into())} let mut bit: bool; let mut n_trials: i64 = 0; let mut geom_return: i64 = 0; // generate bits until we find a 1 // if enforcing the runtime of the algorithm to be constant, the while loop // continues after the 1 is found and just stores the first location of a 1 bit. while n_trials < max_trials { bit = sample_bit_prob(prob, enforce_constant_time)?; n_trials += 1; // If we haven't seen a 1 yet, set the return to the current number of trials if bit && geom_return == 0 { geom_return = n_trials; if !enforce_constant_time { return Ok(geom_return); } } } // set geom_return to max if we never saw a bit equaling 1 if geom_return == 0 { geom_return = max_trials; // could also set this equal to n_trials - 1. } Ok(geom_return) } /// Sample noise according to geometric mechanism /// /// This function uses coin flips to sample from the geometric distribution, /// rather than using the inverse probability transform. This is done /// to avoid finite precision attacks. /// /// For this algorithm, the number of steps it takes to sample from the geometric /// is bounded above by (max - min). /// /// # Arguments /// * `scale` - scale parameter /// * `min` - minimum value of function to which you want to add noise /// * `max` - maximum value of function to which you want to add noise /// * `enforce_constant_time` - boolean for whether or not to require the geometric to run for the maximum number of trials /// /// # Return /// noise according to the geometric mechanism /// /// # Example /// ``` /// use ndarray::prelude::*; /// use smartnoise_runtime::utilities::noise::sample_simple_geometric_mechanism; /// let geom_noise = sample_simple_geometric_mechanism(1., 0, 100, false); /// ``` pub fn sample_simple_geometric_mechanism( scale: f64, min: i64, max: i64, enforce_constant_time: bool ) -> Result<i64> { let alpha: f64 = consts::E.powf(-1. / scale); let max_trials: i64 = max - min; // return 0 noise with probability (1-alpha) / (1+alpha), otherwise sample from geometric let unif: f64 = sample_uniform(0., 1., enforce_constant_time)?; Ok(if unif < (1. - alpha) / (1. + alpha) { 0 } else { // get random sign let sign: i64 = 2 * sample_bit()? as i64 - 1; // sample from censored geometric let geom: i64 = sample_geometric_censored(1. - alpha, max_trials, enforce_constant_time)?; sign * geom }) } /// Apply noise to value according to the Snapping mechanism. /// Sensitivity is assumed to be 1 in L1 space. /// /// # Arguments /// * `value` - Non-private value of the statistic to be privatized. /// * `epsilon` - Desired privacy guarantee. /// * `b` - Upper bound on function value being privatized. /// * `enforce_constant_time` - Whether or not to enforce the algorithm to run in constant time; /// /// # Returns /// Value of statistic with noise applied according to the Snapping mechanism. /// /// # Example /// ``` /// use smartnoise_runtime::utilities::noise::apply_snapping_noise; /// let value: f64 = 50.0; /// let epsilon: f64 = 1.0; /// let b: f64 = 100.0; /// let value = apply_snapping_noise(value, epsilon, b, false); /// println!("snapped value: {:?}", value.unwrap()); /// ``` #[cfg(feature = "use-mpfr")] pub fn apply_snapping_noise( mut value: f64, mut epsilon: f64, b: f64, enforce_constant_time: bool ) -> Result<(f64, f64)> { // must be computed before redefining epsilon let precision = compute_precision(epsilon)?; // ensure that precision is supported by the OS if precision > rug::float::prec_max() { return Err("Operating system does not support sufficient precision to use the Snapping Mechanism".into()); } macro_rules! to_rug {($v:expr) => {rug::Float::with_val(precision, $v)}}; // effective epsilon is reduced due to snapping mechanism epsilon = redefine_epsilon(epsilon, b, precision); if epsilon == 0.0 { return Err("epsilon is zero due to floating-point round-off".into()) } let sign = if sample_bit()? {-1.} else {1.}; // 1.0 because sensitivity has been scaled to one let lambda = 1.0 / epsilon; // draw from {d: d in Doubles && d in (0, 1)} with probability based on unit of least precision let u_star_sample = to_rug!(sample_uniform(0., 1., enforce_constant_time)?); // add noise // rug is mandatory for ln // rug is optional for sign * lambda value += (to_rug!(sign * lambda) * u_star_sample.ln()).to_f64(); // snap to lambda let m = get_smallest_greater_or_eq_power_of_two(lambda)?; value = get_closest_multiple_of_lambda(value, m)?; Ok((value, epsilon)) } #[cfg(not(feature = "use-mpfr"))] pub fn snapping_mechanism( mechanism_input: &f64, epsilon: &f64, b: &f64, sensitivity: &f64 ) -> Result<f64> { Err(Error::from("Crate must be compiled with gmp-mpfr to use the snapping mechanism.")) } /// Sample noise from the Gumbel Distribution /// /// Based on C implementation from https://github.com/numpy/numpy/blob/d329a66dbb9710aefd03cce6a8b0f46da51490ca/numpy/random/src/distributions/distributions.c /// /// # Arguments /// * `loc` - location parameter /// * `scale` - scale parameter /// /// # Return /// Noise according to the Gumbel Distribution pub fn sample_gumbel(loc: f64, scale: f64) -> f64 { let rug_loc = Float::with_val(120, loc); let rug_scale = Float::with_val(120, scale); let u = Float::with_val(120, sample_uniform_mpfr(0.0, 1.0).unwrap()); // Accept if u > 0, otherwise reject and call function again if u.gt(&Float::with_val(120, 0.0)) { let negative_log = -(u.ln()); let log_term = negative_log.ln(); (-rug_scale.mul_add(&log_term, &rug_loc)).to_f64() } else { sample_gumbel(loc, scale) } } /// Shuffle a vector /// pub fn shuffle<T>(vector: Vec<T>, enforce_constant_time: bool) -> Result<Vec<T>> { let mut vector = vector .into_iter() .map(|v| Ok((v, n64(sample_uniform(0., 1., enforce_constant_time)?)))) .collect::<Result<Vec<_>>>()?; vector.sort_unstable_by_key(|v| v.1); Ok(vector.into_iter().map(|(v, _)| v).collect()) }
35.865789
172
0.643994
9062a415cdbbe68e47307a207bdf843e8ab66041
13,780
use super::{config::Config, Events, Shard}; use crate::EventTypeFlags; use std::{ error::Error, fmt::{Display, Formatter, Result as FmtResult}, sync::Arc, }; use twilight_gateway_queue::{LocalQueue, Queue}; use twilight_http::Client as HttpClient; use twilight_model::gateway::{ payload::outgoing::{identify::IdentifyProperties, update_presence::UpdatePresencePayload}, Intents, }; /// Large threshold configuration is invalid. /// /// Returned by [`ShardBuilder::large_threshold`]. #[derive(Debug)] pub struct LargeThresholdError { kind: LargeThresholdErrorType, } impl LargeThresholdError { /// Immutable reference to the type of error that occurred. #[must_use = "retrieving the type has no effect if left unused"] pub const fn kind(&self) -> &LargeThresholdErrorType { &self.kind } /// Consume the error, returning the source error if there is any. #[allow(clippy::unused_self)] #[must_use = "consuming the error and retrieving the source has no effect if left unused"] pub fn into_source(self) -> Option<Box<dyn Error + Send + Sync>> { None } /// Consume the error, returning the owned error type and the source error. #[must_use = "consuming the error into its parts has no effect if left unused"] pub fn into_parts( self, ) -> ( LargeThresholdErrorType, Option<Box<dyn Error + Send + Sync>>, ) { (self.kind, None) } } impl Display for LargeThresholdError { fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { match &self.kind { LargeThresholdErrorType::TooFew { .. } => { f.write_str("provided large threshold value is fewer than 50") } LargeThresholdErrorType::TooMany { .. } => { f.write_str("provided large threshold value is more than 250") } } } } impl Error for LargeThresholdError {} /// Type of [`LargeThresholdError`] that occurred. #[derive(Debug)] #[non_exhaustive] pub enum LargeThresholdErrorType { /// Provided large threshold value is too few in number. TooFew { /// Provided value. value: u64, }, /// Provided large threshold value is too many in number. TooMany { /// Provided value. value: u64, }, } /// Shard ID configuration is invalid. /// /// Returned by [`ShardBuilder::shard`]. #[derive(Debug)] pub struct ShardIdError { kind: ShardIdErrorType, } impl ShardIdError { /// Immutable reference to the type of error that occurred. #[must_use = "retrieving the type has no effect if left unused"] pub const fn kind(&self) -> &ShardIdErrorType { &self.kind } /// Consume the error, returning the source error if there is any. #[allow(clippy::unused_self)] #[must_use = "consuming the error and retrieving the source has no effect if left unused"] pub fn into_source(self) -> Option<Box<dyn Error + Send + Sync>> { None } /// Consume the error, returning the owned error type and the source error. #[must_use = "consuming the error into its parts has no effect if left unused"] pub fn into_parts(self) -> (ShardIdErrorType, Option<Box<dyn Error + Send + Sync>>) { (self.kind, None) } } impl Display for ShardIdError { fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { match &self.kind { ShardIdErrorType::IdTooLarge { id, total } => { f.write_str("provided shard ID ")?; Display::fmt(id, f)?; f.write_str(" is larger than the total ")?; Display::fmt(total, f) } } } } impl Error for ShardIdError {} /// Type of [`ShardIdError`] that occurred. #[derive(Debug)] pub enum ShardIdErrorType { /// Provided shard ID is higher than provided total shard count. IdTooLarge { /// Shard ID. id: u64, /// Total shard count. total: u64, }, } /// Builder to configure and construct a shard. /// /// Use [`ShardBuilder::new`] to start configuring a new [`Shard`]. /// /// # Examples /// /// Create a new shard, setting the [`large_threshold`] to 100 and the /// [`shard`] ID to 5 out of 10: /// /// ```rust,no_run /// use std::env; /// use twilight_gateway::{Intents, Shard}; /// /// # fn main() -> Result<(), Box<dyn std::error::Error>> { /// let token = env::var("DISCORD_TOKEN")?; /// /// let shard = Shard::builder(token, Intents::GUILD_MESSAGE_REACTIONS) /// .large_threshold(100)? /// .shard(5, 10)? /// .build(); /// # Ok(()) } /// ``` /// /// [`ShardBuilder::new`]: Self::new /// [`large_threshold`]: Self::large_threshold /// [`shard`]: Self::shard #[derive(Debug)] pub struct ShardBuilder(pub(crate) Config); impl ShardBuilder { /// Create a new builder to configure and construct a shard. /// /// Refer to each method to learn their default values. pub fn new(token: impl Into<String>, intents: Intents) -> Self { Self::_new(token.into(), intents) } fn _new(mut token: String, intents: Intents) -> Self { if !token.starts_with("Bot ") { token.insert_str(0, "Bot "); } Self(Config { event_types: EventTypeFlags::default(), gateway_url: None, http_client: Arc::new(HttpClient::new(token.clone())), identify_properties: None, intents, large_threshold: 250, presence: None, queue: Arc::new(LocalQueue::new()), shard: [0, 1], token: token.into_boxed_str(), session_id: None, sequence: None, }) } /// Consume the builder, constructing a shard. pub fn build(self) -> (Shard, Events) { Shard::new_with_config(self.0) } /// Set the event types to process. /// /// This is an optimization technique; all events not included in the /// provided event type flags will not be deserialized by the gateway and /// will be discarded. All events will still be sent if /// [`EventTypeFlags::SHARD_PAYLOAD`] is enabled. /// /// [`EventTypeFlags::SHARD_PAYLOAD`]: crate::EventTypeFlags::SHARD_PAYLOAD pub const fn event_types(mut self, event_types: EventTypeFlags) -> Self { self.0.event_types = event_types; self } /// Set the URL used for connecting to Discord's gateway pub fn gateway_url(mut self, gateway_url: Option<String>) -> Self { self.0.gateway_url = gateway_url.map(String::into_boxed_str); self } /// Set the HTTP client to be used by the shard for getting gateway /// information. /// /// Default is a new, unconfigured instance of an HTTP client. #[allow(clippy::missing_const_for_fn)] pub fn http_client(mut self, http_client: Arc<HttpClient>) -> Self { self.0.http_client = http_client; self } /// Set the properties to identify with. /// /// This may be used if you want to set a different operating system, for /// example. /// /// # Examples /// /// Set the identify properties for a shard: /// /// ```no_run /// # fn main() -> Result<(), Box<dyn std::error::Error>> { /// use std::env::{self, consts::OS}; /// use twilight_gateway::{Intents, Shard}; /// use twilight_model::gateway::payload::outgoing::identify::IdentifyProperties; /// /// let token = env::var("DISCORD_TOKEN")?; /// let properties = IdentifyProperties::new( /// "twilight.rs", /// "twilight.rs", /// OS, /// "", /// "", /// ); /// /// let builder = Shard::builder(token, Intents::empty()) /// .identify_properties(properties); /// # Ok(()) } /// ``` #[allow(clippy::missing_const_for_fn)] pub fn identify_properties(mut self, identify_properties: IdentifyProperties) -> Self { self.0.identify_properties = Some(identify_properties); self } /// Set the maximum number of members in a guild to load the member list. /// /// Default value is `250`. The minimum value is `50` and the maximum is /// `250`. /// /// # Examples /// /// If you pass `200`, then if there are 250 members in a guild the member /// list won't be sent. If there are 150 members, then the list *will* be /// sent. /// /// # Errors /// /// Returns a [`LargeThresholdErrorType::TooFew`] error type if the provided /// value is below 50. /// /// Returns a [`LargeThresholdErrorType::TooMany`] error type if the /// provided value is above 250. #[allow(clippy::missing_const_for_fn)] pub fn large_threshold(mut self, large_threshold: u64) -> Result<Self, LargeThresholdError> { match large_threshold { 0..=49 => { return Err(LargeThresholdError { kind: LargeThresholdErrorType::TooFew { value: large_threshold, }, }) } 50..=250 => {} 251..=u64::MAX => { return Err(LargeThresholdError { kind: LargeThresholdErrorType::TooMany { value: large_threshold, }, }) } } self.0.large_threshold = large_threshold; Ok(self) } /// Set the presence to use automatically when starting a new session. /// /// Default is no presence, which defaults to strictly being "online" /// with no special qualities. /// /// # Examples /// /// Set the bot user's presence to idle with the status "Not accepting /// commands": /// /// ```no_run /// use twilight_gateway::{Intents, Shard}; /// use twilight_model::gateway::{ /// payload::outgoing::update_presence::UpdatePresencePayload, /// presence::{ActivityType, MinimalActivity, Status}, /// }; /// /// # fn main() -> Result<(), Box<dyn std::error::Error>> { /// let shard = Shard::builder("token", Intents::empty()) /// .presence(UpdatePresencePayload::new( /// vec![MinimalActivity { /// kind: ActivityType::Playing, /// name: "Not accepting commands".into(), /// url: None, /// } /// .into()], /// false, /// None, /// Status::Idle, /// )?); /// # Ok(()) } /// /// ``` pub fn presence(mut self, presence: UpdatePresencePayload) -> Self { self.0.presence.replace(presence); self } /// Set the queue to use for queueing shard connections. /// /// You probably don't need to set this yourself, because the [`Cluster`] /// manages that for you. Refer to the [`queue`] module for more /// information. /// /// The default value is a queue used only by this shard, or a queue used by /// all shards when ran by a [`Cluster`]. /// /// [`Cluster`]: crate::cluster::Cluster /// [`queue`]: crate::queue pub fn queue(mut self, queue: Arc<dyn Queue>) -> Self { self.0.queue = queue; self } /// Set the shard ID to connect as, and the total number of shards used by /// the bot. /// /// The shard ID is 0-indexed, while the total is 1-indexed. /// /// The default value is a shard ID of 0 and a shard total of 1, which is /// good for smaller bots. /// /// **Note**: If your bot is in over 250'000 guilds then `shard_total` /// *should probably* be a multiple of 16 if you're in the "Large Bot /// Sharding" program. /// /// # Examples /// /// If you have 19 shards, then your last shard will have an ID of 18 out of /// a total of 19 shards: /// /// ```no_run /// use twilight_gateway::{Intents, Shard}; /// use std::env; /// /// # fn main() -> Result<(), Box<dyn std::error::Error>> { /// let token = env::var("DISCORD_TOKEN")?; /// /// let shard = Shard::builder(token, Intents::empty()).shard(18, 19)?.build(); /// # Ok(()) } /// ``` /// /// # Errors /// /// Returns a [`ShardIdErrorType::IdTooLarge`] error type if the shard ID to /// connect as is larger than the total. #[allow(clippy::missing_const_for_fn)] pub fn shard(mut self, shard_id: u64, shard_total: u64) -> Result<Self, ShardIdError> { if shard_id >= shard_total { return Err(ShardIdError { kind: ShardIdErrorType::IdTooLarge { id: shard_id, total: shard_total, }, }); } self.0.shard = [shard_id, shard_total]; Ok(self) } } impl<T: Into<String>> From<(T, Intents)> for ShardBuilder { fn from((token, intents): (T, Intents)) -> Self { Self::new(token, intents) } } #[cfg(test)] mod tests { use super::{ LargeThresholdError, LargeThresholdErrorType, ShardBuilder, ShardIdError, ShardIdErrorType, }; use crate::Intents; use static_assertions::{assert_fields, assert_impl_all}; use std::{error::Error, fmt::Debug}; assert_impl_all!(LargeThresholdErrorType: Debug, Send, Sync); assert_fields!(LargeThresholdErrorType::TooFew: value); assert_fields!(LargeThresholdErrorType::TooMany: value); assert_impl_all!(LargeThresholdError: Error, Send, Sync); assert_impl_all!(ShardBuilder: Debug, From<(String, Intents)>, Send, Sync); assert_impl_all!(ShardIdErrorType: Debug, Send, Sync); assert_fields!(ShardIdErrorType::IdTooLarge: id, total); assert_impl_all!(ShardIdError: Error, Send, Sync); }
31.389522
99
0.585414
79299b9d5a00610b8ec9c40685056ce5d0acc380
837
use winres; fn main() -> std::io::Result<()> { if cfg!(target_os = "windows") { // We need to set the 'longPathAware' manifest key, so that file paths with length >260 chars will work. // This happens sometimes since we encode IDs for duplicate files. let mut res = winres::WindowsResource::new(); res.set_manifest( r#"<?xml version="1.0" encoding="utf-8" standalone="yes"?> <assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0" xmlns:asmv3="urn:schemas-microsoft-com:asm.v3"> <application xmlns="urn:schemas-microsoft-com:asm.v3"> <windowsSettings xmlns:ws2="http://schemas.microsoft.com/SMI/2016/WindowsSettings"> <ws2:longPathAware>true</ws2:longPathAware> </windowsSettings> </application> </assembly>"#); res.compile()?; } Ok(()) }
39.857143
120
0.659498
8778ef0f1849da2abc75921669bbce527a922e76
9,176
#[doc = "Reader of register DEVCTL"] pub type R = crate::R<u8, super::DEVCTL>; #[doc = "Writer for register DEVCTL"] pub type W = crate::W<u8, super::DEVCTL>; #[doc = "Register DEVCTL `reset()`'s with value 0"] impl crate::ResetValue for super::DEVCTL { type Type = u8; #[inline(always)] fn reset_value() -> Self::Type { 0 } } #[doc = "Reader of field `SESSION`"] pub type SESSION_R = crate::R<bool, bool>; #[doc = "Write proxy for field `SESSION`"] pub struct SESSION_W<'a> { w: &'a mut W, } impl<'a> SESSION_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !0x01) | ((value as u8) & 0x01); self.w } } #[doc = "Reader of field `HOSTREQ`"] pub type HOSTREQ_R = crate::R<bool, bool>; #[doc = "Write proxy for field `HOSTREQ`"] pub struct HOSTREQ_W<'a> { w: &'a mut W, } impl<'a> HOSTREQ_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 1)) | (((value as u8) & 0x01) << 1); self.w } } #[doc = "Reader of field `HOST`"] pub type HOST_R = crate::R<bool, bool>; #[doc = "Write proxy for field `HOST`"] pub struct HOST_W<'a> { w: &'a mut W, } impl<'a> HOST_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 2)) | (((value as u8) & 0x01) << 2); self.w } } #[doc = "VBUS Level (OTG only)\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum VBUS_A { #[doc = "0: Below SessionEnd"] NONE, #[doc = "1: Above SessionEnd, below AValid"] SEND, #[doc = "2: Above AValid, below VBUSValid"] AVALID, #[doc = "3: Above VBUSValid"] VALID, } impl From<VBUS_A> for u8 { #[inline(always)] fn from(variant: VBUS_A) -> Self { match variant { VBUS_A::NONE => 0, VBUS_A::SEND => 1, VBUS_A::AVALID => 2, VBUS_A::VALID => 3, } } } #[doc = "Reader of field `VBUS`"] pub type VBUS_R = crate::R<u8, VBUS_A>; impl VBUS_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> VBUS_A { match self.bits { 0 => VBUS_A::NONE, 1 => VBUS_A::SEND, 2 => VBUS_A::AVALID, 3 => VBUS_A::VALID, _ => unreachable!(), } } #[doc = "Checks if the value of the field is `NONE`"] #[inline(always)] pub fn is_none(&self) -> bool { *self == VBUS_A::NONE } #[doc = "Checks if the value of the field is `SEND`"] #[inline(always)] pub fn is_send(&self) -> bool { *self == VBUS_A::SEND } #[doc = "Checks if the value of the field is `AVALID`"] #[inline(always)] pub fn is_avalid(&self) -> bool { *self == VBUS_A::AVALID } #[doc = "Checks if the value of the field is `VALID`"] #[inline(always)] pub fn is_valid(&self) -> bool { *self == VBUS_A::VALID } } #[doc = "Write proxy for field `VBUS`"] pub struct VBUS_W<'a> { w: &'a mut W, } impl<'a> VBUS_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: VBUS_A) -> &'a mut W { { self.bits(variant.into()) } } #[doc = "Below SessionEnd"] #[inline(always)] pub fn none(self) -> &'a mut W { self.variant(VBUS_A::NONE) } #[doc = "Above SessionEnd, below AValid"] #[inline(always)] pub fn send(self) -> &'a mut W { self.variant(VBUS_A::SEND) } #[doc = "Above AValid, below VBUSValid"] #[inline(always)] pub fn avalid(self) -> &'a mut W { self.variant(VBUS_A::AVALID) } #[doc = "Above VBUSValid"] #[inline(always)] pub fn valid(self) -> &'a mut W { self.variant(VBUS_A::VALID) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x03 << 3)) | (((value as u8) & 0x03) << 3); self.w } } #[doc = "Reader of field `LSDEV`"] pub type LSDEV_R = crate::R<bool, bool>; #[doc = "Write proxy for field `LSDEV`"] pub struct LSDEV_W<'a> { w: &'a mut W, } impl<'a> LSDEV_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 5)) | (((value as u8) & 0x01) << 5); self.w } } #[doc = "Reader of field `FSDEV`"] pub type FSDEV_R = crate::R<bool, bool>; #[doc = "Write proxy for field `FSDEV`"] pub struct FSDEV_W<'a> { w: &'a mut W, } impl<'a> FSDEV_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 6)) | (((value as u8) & 0x01) << 6); self.w } } #[doc = "Reader of field `DEV`"] pub type DEV_R = crate::R<bool, bool>; #[doc = "Write proxy for field `DEV`"] pub struct DEV_W<'a> { w: &'a mut W, } impl<'a> DEV_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 7)) | (((value as u8) & 0x01) << 7); self.w } } impl R { #[doc = "Bit 0 - Session Start/End (OTG only)"] #[inline(always)] pub fn session(&self) -> SESSION_R { SESSION_R::new((self.bits & 0x01) != 0) } #[doc = "Bit 1 - Host Request (OTG only)"] #[inline(always)] pub fn hostreq(&self) -> HOSTREQ_R { HOSTREQ_R::new(((self.bits >> 1) & 0x01) != 0) } #[doc = "Bit 2 - Host Mode"] #[inline(always)] pub fn host(&self) -> HOST_R { HOST_R::new(((self.bits >> 2) & 0x01) != 0) } #[doc = "Bits 3:4 - VBUS Level (OTG only)"] #[inline(always)] pub fn vbus(&self) -> VBUS_R { VBUS_R::new(((self.bits >> 3) & 0x03) as u8) } #[doc = "Bit 5 - Low-Speed Device Detected"] #[inline(always)] pub fn lsdev(&self) -> LSDEV_R { LSDEV_R::new(((self.bits >> 5) & 0x01) != 0) } #[doc = "Bit 6 - Full-Speed Device Detected"] #[inline(always)] pub fn fsdev(&self) -> FSDEV_R { FSDEV_R::new(((self.bits >> 6) & 0x01) != 0) } #[doc = "Bit 7 - Device Mode (OTG only)"] #[inline(always)] pub fn dev(&self) -> DEV_R { DEV_R::new(((self.bits >> 7) & 0x01) != 0) } } impl W { #[doc = "Bit 0 - Session Start/End (OTG only)"] #[inline(always)] pub fn session(&mut self) -> SESSION_W { SESSION_W { w: self } } #[doc = "Bit 1 - Host Request (OTG only)"] #[inline(always)] pub fn hostreq(&mut self) -> HOSTREQ_W { HOSTREQ_W { w: self } } #[doc = "Bit 2 - Host Mode"] #[inline(always)] pub fn host(&mut self) -> HOST_W { HOST_W { w: self } } #[doc = "Bits 3:4 - VBUS Level (OTG only)"] #[inline(always)] pub fn vbus(&mut self) -> VBUS_W { VBUS_W { w: self } } #[doc = "Bit 5 - Low-Speed Device Detected"] #[inline(always)] pub fn lsdev(&mut self) -> LSDEV_W { LSDEV_W { w: self } } #[doc = "Bit 6 - Full-Speed Device Detected"] #[inline(always)] pub fn fsdev(&mut self) -> FSDEV_W { FSDEV_W { w: self } } #[doc = "Bit 7 - Device Mode (OTG only)"] #[inline(always)] pub fn dev(&mut self) -> DEV_W { DEV_W { w: self } } }
27.97561
83
0.523322
febffa951ce26ac6f6276c5fb5ea2826677f0bea
2,393
use xmpp_xml::Element; use crate::{ns, FromXmlElement, NonStanza, Packet, ToXmlElement}; #[derive(Default, Debug, Clone)] pub struct ProceedTls { mechanism: Option<String>, challenge: Option<String>, } impl From<ProceedTls> for Packet { fn from(s: ProceedTls) -> Self { NonStanza::ProceedTls(s).into() } } impl ToXmlElement for ProceedTls { type Error = std::io::Error; fn to_element(&self) -> Result<Element, std::io::Error> { let root = Element::new((ns::TLS, "proceed")); Ok(root) } } impl FromXmlElement for ProceedTls { type Error = std::io::Error; fn from_element(e: &Element) -> Result<Self, Self::Error> { let p = Self { mechanism: e.get_attr("mechanism").map(|mechanism| mechanism.to_string()), challenge: Some(e.text().to_string()), }; Ok(p) } } #[cfg(test)] mod tests { use std::io::Write; use circular::Buffer; use xmpp_xml::{ xml::{reader::XmlEvent, ParserConfig}, WriteOptions, }; use super::*; const EXPECTED_PROCEEDTLS: &'static str = r#"<proceed xmlns="urn:ietf:params:xml:ns:xmpp-tls" />"#; #[test] fn to_element() { let proceed = ProceedTls::default(); let mut output: Vec<u8> = Vec::new(); let _ = proceed.to_element().unwrap().to_writer_with_options(&mut output, WriteOptions::new().set_xml_prolog(None)); let generated = String::from_utf8(output).unwrap(); assert!(EXPECTED_PROCEEDTLS == generated); } #[test] fn from() { let mut cfg = ParserConfig::new().whitespace_to_characters(true); cfg.ignore_end_of_stream = true; let mut reader = cfg.create_reader(Buffer::with_capacity(4096)); reader.source_mut().write(EXPECTED_PROCEEDTLS.as_bytes()).unwrap(); let _ = reader.next().unwrap(); let x = reader.next().unwrap(); assert!(matches!(x, XmlEvent::StartElement { .. })); if let XmlEvent::StartElement { name, attributes, namespace } = x { let packet = Packet::parse(&mut reader, name, namespace, attributes); assert!( matches!(packet, Ok(Packet::NonStanza(ref stanza)) if matches!(**stanza, NonStanza::ProceedTls(_))), "Packet wasn't an ProceedTls, it was: {:?}", packet ); } } }
27.825581
124
0.594233
e2292a1100347fc79953f16fee4cce7270d7682d
93
mod client; mod common; mod server; pub use client::*; pub use common::*; pub use server::*;
13.285714
18
0.677419
5d506205250c8add7775f53f9c0042cf33fb57fe
12,257
// Copyright 2019 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. extern crate log; extern crate env_logger; extern crate protobuf; extern crate raft; extern crate regex; use std::collections::{HashMap, VecDeque}; use std::sync::mpsc::{self, Receiver, Sender, SyncSender, TryRecvError}; use std::sync::{Arc, Mutex}; use std::time::{Duration, Instant}; use std::{str, thread}; use protobuf::Message as PbMessage; use raft::storage::MemStorage; use raft::{prelude::*, StateRole}; use regex::Regex; pub fn raft_cluster_main() { env_logger::init(); // Create 5 mailboxes to send/receive messages. Every node holds a `Receiver` to receive // messages from others, and uses the respective `Sender` to send messages to others. let (mut tx_vec, mut rx_vec) = (Vec::new(), Vec::new()); for _ in 0..5 { let (tx, rx) = mpsc::channel(); tx_vec.push(tx); rx_vec.push(rx); } // A global pending proposals queue. New proposals will be pushed back into the queue, and // after it's committed by the raft cluster, it will be poped from the queue. let proposals = Arc::new(Mutex::new(VecDeque::<Proposal>::new())); let mut handles = Vec::new(); for (i, rx) in rx_vec.into_iter().enumerate() { // A map[peer_id -> sender]. In the example we create 5 nodes, with ids in [1, 5]. let mailboxes = (1..6u64).zip(tx_vec.iter().cloned()).collect(); let mut node = match i { // Peer 1 is the leader. 0 => Node::create_raft_leader(1, rx, mailboxes), // Other peers are followers. _ => Node::create_raft_follower(rx, mailboxes), }; let proposals = Arc::clone(&proposals); // Tick the raft node per 100ms. So use an `Instant` to trace it. let mut t = Instant::now(); // Here we spawn the node on a new thread and keep a handle so we can join on them later. let handle = thread::spawn(move || loop { thread::sleep(Duration::from_millis(10)); loop { // Step raft messages. match node.my_mailbox.try_recv() { Ok(msg) => node.step(msg), Err(TryRecvError::Empty) => break, Err(TryRecvError::Disconnected) => return, } } let raft_group = match node.raft_group { Some(ref mut r) => r, // When Node::raft_group is `None` it means the node is not initialized. _ => continue, }; if t.elapsed() >= Duration::from_millis(100) { // Tick the raft. raft_group.tick(); t = Instant::now(); } // Let the leader pick pending proposals from the global queue. if raft_group.raft.state == StateRole::Leader { // Handle new proposals. let mut proposals = proposals.lock().unwrap(); for p in proposals.iter_mut().skip_while(|p| p.proposed > 0) { propose(raft_group, p); } } // Handle readies from the raft. on_ready(raft_group, &mut node.kv_pairs, &node.mailboxes, &proposals); }); handles.push(handle); } // Propose some conf changes so that followers can be initialized. add_all_followers(proposals.as_ref()); // Put 100 key-value pairs. (0..100u16) .filter(|i| { let (proposal, rx) = Proposal::normal(*i, "hello, world".to_owned()); proposals.lock().unwrap().push_back(proposal); // After we got a response from `rx`, we can assume the put succeeded and following // `get` operations can find the key-value pair. rx.recv().unwrap() }) .count(); for th in handles { th.join().unwrap(); } } struct Node { // None if the raft is not initialized. raft_group: Option<RawNode<MemStorage>>, my_mailbox: Receiver<Message>, mailboxes: HashMap<u64, Sender<Message>>, // Key-value pairs after applied. `MemStorage` only contains raft logs, // so we need an additional storage engine. kv_pairs: HashMap<u16, String>, } impl Node { // Create a raft leader only with itself in its configuration. fn create_raft_leader( id: u64, my_mailbox: Receiver<Message>, mailboxes: HashMap<u64, Sender<Message>>, ) -> Self { let mut cfg = example_config(); cfg.id = id; cfg.peers = vec![id]; cfg.tag = format!("peer_{}", id); let storage = MemStorage::new(); let raft_group = Some(RawNode::new(&cfg, storage, vec![]).unwrap()); Node { raft_group, my_mailbox, mailboxes, kv_pairs: Default::default(), } } // Create a raft follower. fn create_raft_follower( my_mailbox: Receiver<Message>, mailboxes: HashMap<u64, Sender<Message>>, ) -> Self { Node { raft_group: None, my_mailbox, mailboxes, kv_pairs: Default::default(), } } // Initialize raft for followers. fn initialize_raft_from_message(&mut self, msg: &Message) { if !is_initial_msg(msg) { return; } let mut cfg = example_config(); cfg.id = msg.get_to(); let storage = MemStorage::new(); self.raft_group = Some(RawNode::new(&cfg, storage, vec![]).unwrap()); } // Step a raft message, initialize the raft if need. fn step(&mut self, msg: Message) { if self.raft_group.is_none() { if is_initial_msg(&msg) { self.initialize_raft_from_message(&msg); } else { return; } } let raft_group = self.raft_group.as_mut().unwrap(); let _ = raft_group.step(msg); } } fn on_ready( raft_group: &mut RawNode<MemStorage>, kv_pairs: &mut HashMap<u16, String>, mailboxes: &HashMap<u64, Sender<Message>>, proposals: &Mutex<VecDeque<Proposal>>, ) { if !raft_group.has_ready() { return; } // Get the `Ready` with `RawNode::ready` interface. let mut ready = raft_group.ready(); // Persistent raft logs. It's necessary because in `RawNode::advance` we stabilize // raft logs to the latest position. if let Err(e) = raft_group.raft.raft_log.store.wl().append(ready.entries()) { error!("persist raft log fail: {:?}, need to retry or panic", e); return; } // Send out the messages come from the node. for msg in ready.messages.drain(..) { let to = msg.get_to(); if mailboxes[&to].send(msg).is_err() { warn!("send raft message to {} fail, let Raft retry it", to); } } // Apply all committed proposals. if let Some(committed_entries) = ready.committed_entries.take() { for entry in committed_entries { if entry.get_data().is_empty() { // From new elected leaders. continue; } if let EntryType::EntryConfChange = entry.get_entry_type() { // For conf change messages, make them effective. let mut cc = ConfChange::new(); cc.merge_from_bytes(entry.get_data()).unwrap(); let node_id = cc.get_node_id(); match cc.get_change_type() { ConfChangeType::AddNode => raft_group.raft.add_node(node_id).unwrap(), ConfChangeType::RemoveNode => raft_group.raft.remove_node(node_id).unwrap(), ConfChangeType::AddLearnerNode => raft_group.raft.add_learner(node_id).unwrap(), ConfChangeType::BeginMembershipChange | ConfChangeType::FinalizeMembershipChange => unimplemented!(), } } else { // For normal proposals, extract the key-value pair and then // insert them into the kv engine. let data = str::from_utf8(entry.get_data()).unwrap(); let reg = Regex::new("put ([0-9]+) (.+)").unwrap(); if let Some(caps) = reg.captures(&data) { kv_pairs.insert(caps[1].parse().unwrap(), caps[2].to_string()); } } if raft_group.raft.state == StateRole::Leader { // The leader should response to the clients, tell them if their proposals // succeeded or not. let proposal = proposals.lock().unwrap().pop_front().unwrap(); proposal.propose_success.send(true).unwrap(); } } } // Call `RawNode::advance` interface to update position flags in the raft. raft_group.advance(ready); } fn example_config() -> Config { Config { election_tick: 10, heartbeat_tick: 3, ..Default::default() } } // The message can be used to initialize a raft node or not. fn is_initial_msg(msg: &Message) -> bool { let msg_type = msg.get_msg_type(); msg_type == MessageType::MsgRequestVote || msg_type == MessageType::MsgRequestPreVote || (msg_type == MessageType::MsgHeartbeat && msg.get_commit() == 0) } struct Proposal { normal: Option<(u16, String)>, // key is an u16 integer, and value is a string. conf_change: Option<ConfChange>, // conf change. transfer_leader: Option<u64>, // If it's proposed, it will be set to the index of the entry. proposed: u64, propose_success: SyncSender<bool>, } impl Proposal { fn conf_change(cc: &ConfChange) -> (Self, Receiver<bool>) { let (tx, rx) = mpsc::sync_channel(1); let proposal = Proposal { normal: None, conf_change: Some(cc.clone()), transfer_leader: None, proposed: 0, propose_success: tx, }; (proposal, rx) } fn normal(key: u16, value: String) -> (Self, Receiver<bool>) { let (tx, rx) = mpsc::sync_channel(1); let proposal = Proposal { normal: Some((key, value)), conf_change: None, transfer_leader: None, proposed: 0, propose_success: tx, }; (proposal, rx) } } fn propose(raft_group: &mut RawNode<MemStorage>, proposal: &mut Proposal) { let last_index1 = raft_group.raft.raft_log.last_index() + 1; if let Some((ref key, ref value)) = proposal.normal { let data = format!("put {} {}", key, value).into_bytes(); let _ = raft_group.propose(vec![], data); } else if let Some(ref cc) = proposal.conf_change { let _ = raft_group.propose_conf_change(vec![], cc.clone()); } else if let Some(_tranferee) = proposal.transfer_leader { // TODO: implement tranfer leader. unimplemented!(); } let last_index2 = raft_group.raft.raft_log.last_index() + 1; if last_index2 == last_index1 { // Propose failed, don't forget to respond to the client. proposal.propose_success.send(false).unwrap(); } else { proposal.proposed = last_index1; } } // Proposes some conf change for peers [2, 5]. fn add_all_followers(proposals: &Mutex<VecDeque<Proposal>>) { for i in 2..6u64 { let mut conf_change = ConfChange::default(); conf_change.set_node_id(i); conf_change.set_change_type(ConfChangeType::AddNode); loop { let (proposal, rx) = Proposal::conf_change(&conf_change); proposals.lock().unwrap().push_back(proposal); if rx.recv().unwrap() { break; } thread::sleep(Duration::from_millis(100)); } } }
35.630814
100
0.577874
72b86eca01d69a36caceeaebcc29c1cffe6862b5
3,134
use proc_macro2::TokenStream as TokenStream2; use quote::quote; use syn::{FieldsNamed, FieldsUnnamed, Ident, ItemEnum, ItemStruct}; use crate::shared::{ generate_arms, generate_named_fields, generate_unnamed_fields, process_impl_generics, process_trait_bounds, FieldInfo, StructInfo, }; pub fn impl_tobytes_struct(is: ItemStruct) -> TokenStream2 { let info = StructInfo::from_item_struct(is, Some("ToBytes"), "to_bytes_bound", true); let (struct_name, generics, generics_without_bounds, field_names, field_types, _, padded) = info.into_tuple(); if field_names.is_empty() { return quote! { impl neli::ToBytes for #struct_name { fn to_bytes(&self, _: &mut std::io::Cursor<Vec<u8>>) -> Result<(), neli::err::SerError> { Ok(()) } } }; } let padding = if padded { quote! { <#struct_name#generics_without_bounds as neli::ToBytes>::pad(&self, buffer)?; } } else { TokenStream2::new() }; quote! { impl#generics neli::ToBytes for #struct_name#generics_without_bounds { fn to_bytes(&self, buffer: &mut std::io::Cursor<Vec<u8>>) -> Result<(), neli::err::SerError> { #( <#field_types as neli::ToBytes>::to_bytes(&self.#field_names, buffer)?; )* #padding Ok(()) } } } } fn generate_named_pat_and_expr( enum_name: Ident, var_name: Ident, fields: FieldsNamed, ) -> TokenStream2 { let (field_names, types, _) = FieldInfo::to_vecs(generate_named_fields(fields).into_iter()); quote! { #enum_name::#var_name { #(#field_names),* } => { #(<#types as neli::ToBytes>::to_bytes(&#field_names, buffer)?; )* Ok(()) }, } } fn generate_unnamed_pat_and_expr( enum_name: Ident, var_name: Ident, fields: FieldsUnnamed, ) -> TokenStream2 { let (field_names, types, _) = FieldInfo::to_vecs(generate_unnamed_fields(fields, false).into_iter()); quote! { #enum_name::#var_name( #( #field_names ),* ) => { #( <#types as neli::ToBytes>::to_bytes(#field_names, buffer)?; )* Ok(()) } } } pub fn impl_tobytes_enum(ie: ItemEnum) -> TokenStream2 { let (generics, generics_without_bounds) = process_impl_generics(ie.generics, Some("ToBytes")); let trait_bounds = process_trait_bounds(&ie.attrs, "to_bytes_bound"); let enum_name = ie.ident; let arms = generate_arms( enum_name.clone(), ie.variants.into_iter().collect::<Vec<_>>(), generate_named_pat_and_expr, generate_unnamed_pat_and_expr, quote! { Ok(()) }, ); quote! { impl#generics neli::ToBytes for #enum_name#generics_without_bounds where #( #trait_bounds ),* { fn to_bytes(&self, buffer: &mut std::io::Cursor<Vec<u8>>) -> Result<(), neli::err::SerError> { match self { #(#arms)* } } } } }
31.029703
106
0.574027
e8bd1379869522787bcbbd56a8b8ab88f52d41b9
1,896
use serde_yaml; use error::*; use metadata::ClusterId; use std::collections::HashMap; use std::fs::File; use std::io::prelude::*; fn default_true() -> bool { true } #[derive(Serialize, Deserialize, Debug, Clone)] pub struct ClusterConfig { pub cluster_id: Option<ClusterId>, // This will always be available after load pub broker_list: Vec<String>, pub zookeeper: String, pub jolokia_port: Option<i32>, pub graph_url: Option<String>, #[serde(default = "default_true")] pub enable_tailing: bool, #[serde(default = "default_true")] pub show_zk_reassignments: bool, } impl ClusterConfig { pub fn bootstrap_servers(&self) -> String { self.broker_list.join(",") } } #[derive(Serialize, Deserialize, Debug, Clone)] pub struct CachingConfig { pub cluster: ClusterId, pub topic: String, } #[derive(Serialize, Deserialize, Debug, Clone)] pub struct Config { pub listen_port: u16, pub listen_host: String, pub metadata_refresh: u64, pub metrics_refresh: u64, pub offsets_store_duration: u64, pub consumer_offsets_group_id: String, pub clusters: HashMap<ClusterId, ClusterConfig>, pub caching: CachingConfig, } impl Config { pub fn cluster(&self, cluster_id: &ClusterId) -> Option<&ClusterConfig> { self.clusters.get(cluster_id) } } pub fn read_config(path: &str) -> Result<Config> { let mut f = File::open(path).chain_err(|| "Unable to open configuration file")?;; let mut s = String::new(); f.read_to_string(&mut s) .chain_err(|| "Unable to read configuration file")?; let mut config: Config = serde_yaml::from_str(&s).chain_err(|| "Unable to parse configuration file")?; for (cluster_id, cluster) in &mut config.clusters { cluster.cluster_id = Some(cluster_id.clone()); } info!("Configuration: {:?}", config); Ok(config) }
25.621622
85
0.670359
0a7d7c19342e5fcc700607490cf2f37c65568f03
768
#![allow(non_snake_case)] #![allow(non_camel_case_types)] #![allow(non_upper_case_globals)] include!("bindgen.rs"); include!("lua_macros.rs"); include!("lauxlib_macros.rs"); use std::borrow::Cow; use std::ffi::{CStr, CString}; /// Trait to convert types into nul-terminated strings, for when they need to be passed to API /// functions such as `lua_setglobal` or `lua_getglobal`. pub trait AsCStr { fn as_cstr(&self) -> Cow<CStr>; } impl<T> AsCStr for T where T: AsRef<[u8]>, { fn as_cstr(&self) -> Cow<CStr> { let string = self.as_ref(); if let Some(0) = string.last() { unsafe { Cow::Borrowed(CStr::from_ptr(string.as_ptr() as _)) } } else { Cow::Owned(CString::new(string).unwrap()) } } }
24.774194
94
0.623698
f55775f0ce833ca25e69fdd456e0d61f2d87f72a
3,891
// Copyright (c) The Libra Core Contributors // SPDX-License-Identifier: Apache-2.0 use crate::{ chained_bft::QuorumCert, counters::OP_COUNTERS, state_synchronizer::{coordinator::CoordinatorMsg, PeerId}, }; use failure::prelude::*; use futures::{channel::mpsc, SinkExt, StreamExt}; use logger::prelude::*; use network::{proto::RequestChunk, validator_network::ConsensusNetworkSender}; use proto_conv::IntoProto; use rand::{thread_rng, Rng}; use std::time::Duration; use types::proto::transaction::TransactionListWithProof; /// Used for communication between coordinator and downloader /// and represents a single fetch request #[derive(Clone)] pub struct FetchChunkMsg { // target version that we want to fetch pub target: QuorumCert, // version from which to start fetching (the offset version) pub start_version: u64, } /// Used to download chunks of transactions from peers pub struct Downloader { receiver_from_coordinator: mpsc::Receiver<FetchChunkMsg>, sender_to_coordinator: mpsc::UnboundedSender<CoordinatorMsg>, network: ConsensusNetworkSender, batch_size: u64, retries: usize, } impl Downloader { pub fn new( receiver_from_coordinator: mpsc::Receiver<FetchChunkMsg>, sender_to_coordinator: mpsc::UnboundedSender<CoordinatorMsg>, network: ConsensusNetworkSender, batch_size: u64, retries: usize, ) -> Self { Self { receiver_from_coordinator, sender_to_coordinator, network, batch_size, retries, } } /// Starts chunk downloader that listens to FetchChunkMsgs pub async fn start(mut self) { while let Some(msg) = self.receiver_from_coordinator.next().await { for attempt in 0..self.retries { let peer_id = self.pick_peer_id(&msg); let download_result = self.download_chunk(peer_id, msg.clone()).await; if download_result.is_ok() || attempt == self.retries - 1 { let send_result = self .sender_to_coordinator .send(CoordinatorMsg::Fetched(download_result, msg.target)) .await; if send_result.is_err() { log_collector_error!("[state synchronizer] failed to send chunk from downloader to coordinator"); } break; } } } } /// Downloads a chunk from another validator or from a cloud provider. /// It then verifies that the data in the chunk is valid and returns the validated data. async fn download_chunk( &mut self, peer_id: PeerId, msg: FetchChunkMsg, ) -> Result<TransactionListWithProof> { // Construct the message and use rpc call via network stack let mut req = RequestChunk::new(); req.set_start_version(msg.start_version); req.set_target(msg.target.clone().into_proto()); req.set_batch_size(self.batch_size); // Longer-term, we will read from a cloud provider. But for testnet, just read // from the node which is proposing this block let mut resp = self .network .request_chunk(peer_id, req, Duration::from_millis(1000)) .await?; OP_COUNTERS.inc_by( "download", resp.get_txn_list_with_proof().get_transactions().len(), ); Ok(resp.take_txn_list_with_proof()) } fn pick_peer_id(&self, msg: &FetchChunkMsg) -> PeerId { let signatures = msg.target.ledger_info().signatures(); let idx = thread_rng().gen_range(0, signatures.len()); signatures .keys() .nth(idx) .cloned() .expect("[state synchronizer] failed to pick peer from qc") } }
35.372727
121
0.624261
5dba4fa3027fcaa428bf167db675ca92fe177e95
12,334
use crate::{ errors::WorkflowMissingError, protosext::ValidPollWFTQResponse, telemetry::metrics::{workflow_type, MetricsContext}, workflow::{ workflow_tasks::{OutstandingActivation, OutstandingTask}, HistoryUpdate, Result, WFMachinesError, WorkflowManager, }, }; use futures::future::{BoxFuture, FutureExt}; use parking_lot::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard}; use std::{ collections::HashMap, fmt::Debug, ops::{Deref, DerefMut}, }; use temporal_sdk_core_protos::coresdk::workflow_activation::WfActivation; /// Provides a thread-safe way to access workflow machines for specific workflow runs pub(crate) struct WorkflowConcurrencyManager { /// Maps run id -> data about and machines for that run runs: RwLock<HashMap<String, ManagedRun>>, } struct ManagedRun { wfm: Mutex<WorkflowManager>, wft: Option<OutstandingTask>, activation: Option<OutstandingActivation>, metrics: MetricsContext, /// If set, it indicates there is a buffered poll response from the server that applies to this /// run. This can happen when lang takes too long to complete a task and the task times out, for /// example. Upon next completion, the buffered response will be removed and can be made ready /// to be returned from polling buffered_resp: Option<ValidPollWFTQResponse>, } impl ManagedRun { fn new(wfm: WorkflowManager, metrics: MetricsContext) -> Self { Self { wfm: Mutex::new(wfm), wft: None, activation: None, metrics, buffered_resp: None, } } } impl WorkflowConcurrencyManager { pub fn new() -> Self { Self { runs: Default::default(), } } /// Allows access to outstanding task for a run. Returns `None` if there is no knowledge of /// the run at all, or if the run exists but there is no outstanding workflow task. pub(crate) fn get_task( &self, run_id: &str, ) -> Option<impl Deref<Target = OutstandingTask> + '_> { let readlock = self.runs.read(); if let Some(run) = readlock.get(run_id) { if run.wft.is_some() { Some(RwLockReadGuard::map(readlock, |hm| { // Unwraps are safe because we hold the lock and just ensured run is in the map hm.get(run_id).unwrap().wft.as_ref().unwrap() })) } else { None } } else { None } } /// Allows access to outstanding activation slot for a run. Returns `None` if there is no /// knowledge of the run at all, or if the run exists but there is no outstanding activation. pub(crate) fn get_activation(&self, run_id: &str) -> Option<OutstandingActivation> { let readlock = self.runs.read(); if readlock.contains_key(run_id) { readlock.get(run_id).unwrap().activation } else { None } } /// Allows mutable access to outstanding workflow task slot for a run pub(crate) fn get_task_mut( &self, run_id: &str, ) -> Result<impl DerefMut<Target = Option<OutstandingTask>> + '_, WorkflowMissingError> { let writelock = self.runs.write(); if writelock.contains_key(run_id) { Ok(RwLockWriteGuard::map(writelock, |hm| { // Unwrap is safe because we hold the lock and just ensured run is in the map &mut hm.get_mut(run_id).unwrap().wft })) } else { Err(WorkflowMissingError { run_id: run_id.to_owned(), }) } } /// Fetch metrics context for a run pub(crate) fn run_metrics( &self, run_id: &str, ) -> Option<impl Deref<Target = MetricsContext> + '_> { let readlock = self.runs.read(); if readlock.get(run_id).is_some() { Some(RwLockReadGuard::map(readlock, |hm| { // Unwraps are safe because we hold the lock and just ensured run is in the map &hm.get(run_id).unwrap().metrics })) } else { None } } /// Stores some work if there is any outstanding WFT or activation for the run. If there was /// not, returns the work back out inside the option. pub fn buffer_resp_if_outstanding_work( &self, work: ValidPollWFTQResponse, ) -> Option<ValidPollWFTQResponse> { let mut writelock = self.runs.write(); let run_id = &work.workflow_execution.run_id; if let Some(mut run) = writelock.get_mut(run_id) { if run.wft.is_some() || run.activation.is_some() { debug!(run_id = %run_id, "Got new WFT for a run with outstanding work"); run.buffered_resp = Some(work); None } else { Some(work) } } else { Some(work) } } pub fn insert_wft( &self, run_id: &str, task: OutstandingTask, ) -> Result<(), WorkflowMissingError> { let mut dereffer = self.get_task_mut(run_id)?; *dereffer = Some(task); Ok(()) } /// Indicate it's finished and remove any outstanding workflow task associated with the run pub fn complete_wft( &self, run_id: &str, send_wft_complete_to_srv: bool, ) -> Option<OutstandingTask> { // If the WFT completion wasn't sent to the server, but we did see the final event, we still // want to clear the workflow task. This can really only happen in replay testing, where we // will generate poll responses with complete history but no attached query, and such a WFT // would never really exist. The server wouldn't send a workflow task with nothing to do, // but they are very useful for testing complete replay. let saw_final = self .access_sync(run_id, |wfm| wfm.machines.have_seen_terminal_event) .unwrap_or_default(); if !saw_final && !send_wft_complete_to_srv { return None; } let retme = if let Ok(ot) = self.get_task_mut(run_id).as_deref_mut() { (*ot).take() } else { None }; if let Some(ot) = &retme { if let Some(m) = self.run_metrics(run_id) { m.wf_task_latency(ot.start_time.elapsed()); } } retme } pub fn insert_activation( &self, run_id: &str, activation: OutstandingActivation, ) -> Result<Option<OutstandingActivation>, WorkflowMissingError> { let mut writelock = self.runs.write(); let machine_ref = writelock.get_mut(run_id); if let Some(run) = machine_ref { Ok(run.activation.replace(activation)) } else { Err(WorkflowMissingError { run_id: run_id.to_owned(), }) } } pub fn delete_activation(&self, run_id: &str) -> Option<OutstandingActivation> { let mut writelock = self.runs.write(); let machine_ref = writelock.get_mut(run_id); machine_ref.and_then(|run| run.activation.take()) } pub fn exists(&self, run_id: &str) -> bool { self.runs.read().get(run_id).is_some() } /// Create or update some workflow's machines. Borrowed arguments are cloned in the case of a /// new workflow instance. pub async fn create_or_update( &self, run_id: &str, history: HistoryUpdate, workflow_id: &str, namespace: &str, wf_type: &str, parent_metrics: &MetricsContext, ) -> Result<WfActivation> { let span = debug_span!("create_or_update machines", %run_id); if self.runs.read().contains_key(run_id) { let activation = self .access(run_id, move |wfm: &mut WorkflowManager| { async move { let _enter = span.enter(); wfm.machines.metrics.sticky_cache_hit(); wfm.feed_history_from_server(history).await } .boxed() }) .await?; Ok(activation) } else { // Create a new workflow machines instance for this workflow, initialize it, and // track it. let metrics = parent_metrics.with_new_attrs([workflow_type(wf_type.to_string())]); let mut wfm = WorkflowManager::new( history, namespace.to_owned(), workflow_id.to_owned(), wf_type.to_owned(), run_id.to_owned(), metrics.clone(), ); match wfm.get_next_activation().await { Ok(activation) => { if activation.jobs.is_empty() { Err(WFMachinesError::Fatal( "Machines created with no jobs".to_string(), )) } else { self.runs .write() .insert(run_id.to_string(), ManagedRun::new(wfm, metrics)); Ok(activation) } } Err(e) => Err(e), } } } pub async fn access<F, Fout>(&self, run_id: &str, mutator: F) -> Result<Fout> where F: for<'a> FnOnce(&'a mut WorkflowManager) -> BoxFuture<Result<Fout>>, Fout: Send + Debug, { let readlock = self.runs.read(); let m = readlock .get(run_id) .ok_or_else(|| WFMachinesError::Fatal("Missing workflow machines".to_string()))?; // This holds a non-async mutex across an await point which is technically a no-no, but // we never access the machines for the same run simultaneously anyway. This should all // get fixed with a generally different approach which moves the runs inside workers. let mut wfm_mutex = m.wfm.lock(); let res = mutator(&mut wfm_mutex).await; res } pub fn access_sync<F, Fout>( &self, run_id: &str, mutator: F, ) -> Result<Fout, WorkflowMissingError> where F: for<'a> FnOnce(&'a mut WorkflowManager) -> Fout, Fout: Send + Debug, { let readlock = self.runs.read(); let m = readlock.get(run_id).ok_or_else(|| WorkflowMissingError { run_id: run_id.to_string(), })?; let mut wfm_mutex = m.wfm.lock(); Ok(mutator(&mut wfm_mutex)) } /// Remove the workflow with the provided run id from management pub fn evict(&self, run_id: &str) -> Option<ValidPollWFTQResponse> { let val = self.runs.write().remove(run_id); val.and_then(|v| v.buffered_resp) } /// Clear and return any buffered polling response for this run ID pub fn take_buffered_poll(&self, run_id: &str) -> Option<ValidPollWFTQResponse> { let mut writelock = self.runs.write(); let val = writelock.get_mut(run_id); val.and_then(|v| v.buffered_resp.take()) } pub fn outstanding_wft(&self) -> usize { self.runs .read() .iter() .filter(|(_, run)| run.wft.is_some()) .count() } } #[cfg(test)] mod tests { use super::*; // We test mostly error paths here since the happy paths are well covered by the tests of the // core sdk itself, and setting up the fake data is onerous here. If we make the concurrency // manager generic, testing the happy path is simpler. #[tokio::test] async fn returns_errors_on_creation() { let mgr = WorkflowConcurrencyManager::new(); let res = mgr .create_or_update( "some_run_id", HistoryUpdate::new_from_events(vec![], 0), "fake_wf_id", "fake_namespace", "fake_wf_type", &Default::default(), ) .await; // Should whine that the machines have nothing to do (history empty) assert_matches!(res.unwrap_err(), WFMachinesError::Fatal { .. }); } }
35.544669
100
0.568834
0ef545873deca7cf41680daa27e149dd691bed16
5,843
use pest::iterators::{Pair, Pairs}; use pest::Error; use literal::Literal; use parser::symbolish::parse_symbolish; use parser::Rule; use util::convert_hex_digit; pub fn convert_program(mut pairs: Pairs<Rule>) -> Result<Vec<Literal>, Error<Rule>> { let pair = pairs.next().unwrap(); assert_eq!(pair.as_rule(), Rule::program); pair.into_inner() .map(|pair| { assert_eq!(pair.as_rule(), Rule::value); convert_value(pair.into_inner()) }) .collect() } fn convert_value(mut pairs: Pairs<Rule>) -> Result<Literal, Error<Rule>> { let pair = pairs.next().unwrap(); match pair.as_rule() { Rule::bytes => convert_bytes(pair.into_inner()), Rule::list => convert_list(pair.into_inner()), Rule::rmacro => { let mut pairs = pair.into_inner(); let rmacro = pairs.next().unwrap(); let value = convert_value(pairs.next().unwrap().into_inner())?; assert!(pairs.next().is_none()); convert_rmacro(rmacro, value) } Rule::string => convert_string(pair.into_inner()), Rule::symbolish => parse_symbolish(pair.as_str()).map_err(|err| Error::CustomErrorSpan { message: err, span: pair.into_span(), }), Rule::vector => pair.into_inner() .map(|pair| { assert_eq!(pair.as_rule(), Rule::value); convert_value(pair.into_inner()) }) .collect::<Result<_, _>>() .map(Literal::Vector), r => panic!("Invalid rule: {:?}", r), } } fn convert_list(pairs: Pairs<Rule>) -> Result<Literal, Error<Rule>> { let mut head = Literal::Nil; // TODO: https://github.com/pest-parser/pest/issues/205 let pairs = pairs.collect::<Vec<_>>().into_iter().rev(); for pair in pairs { match pair.as_rule() { Rule::value => { let val = Box::new(convert_value(pair.into_inner())?); head = Literal::Cons(val, Box::new(head)); } Rule::cons_split => { if let Literal::Cons(h, _) = head { head = *h; } else { panic!("Invalid CST") } } r => panic!("Invalid rule: {:?}", r), } } Ok(head) } fn convert_bytes(pairs: Pairs<Rule>) -> Result<Literal, Error<Rule>> { let mut bs = Vec::new(); for pair in pairs { match pair.as_rule() { Rule::bytes_raw_ch => { let s = pair.as_str(); assert_eq!(s.len(), 1); let n = s.chars().next().unwrap() as u32; assert!(n <= 0xff); bs.push(n as u8); } Rule::bytes_esc_ch => { let pair = pair.into_inner().next().unwrap(); bs.push(convert_byte_escape(pair)); } r => panic!("Invalid rule: {:?}", r), } } Ok(Literal::Bytes(bs)) } fn convert_rmacro(pair: Pair<Rule>, value: Literal) -> Result<Literal, Error<Rule>> { fn simple_macro(name: &'static str, value: Literal) -> Literal { Literal::Cons( Box::new(Literal::Symbol(name.into())), Box::new(Literal::Cons(Box::new(value), Box::new(Literal::Nil))), ) } Ok(match pair.as_str() { "'" => simple_macro("quote", value), "`" => simple_macro("quasiquote", value), ",@" => simple_macro("unquote-splicing", value), "," => simple_macro("unquote", value), "\\" => Literal::Cons( Box::new(Literal::Symbol("intrinsics:fn".into())), Box::new(Literal::Cons( Box::new(Literal::Cons( Box::new(Literal::Symbol("$".into())), Box::new(Literal::Nil), )), Box::new(Literal::Cons(Box::new(value), Box::new(Literal::Nil))), )), ), "%" => simple_macro("debug-trace", value), rm => panic!("Invalid reader macro: {:?}", rm), }) } fn convert_string(pairs: Pairs<Rule>) -> Result<Literal, Error<Rule>> { let mut s = String::new(); for pair in pairs { match pair.as_rule() { Rule::string_raw_ch => { s += pair.as_str(); } Rule::string_esc_ch => { s.push(convert_string_escape(pair)?); } r => panic!("Invalid rule: {:?}", r), } } Ok(Literal::String(s)) } fn convert_string_escape(old_pair: Pair<Rule>) -> Result<char, Error<Rule>> { let pair = old_pair.clone().into_inner().next().unwrap(); match pair.as_rule() { Rule::string_4_esc | Rule::string_8_esc => { let n = convert_hex_escape(pair.into_inner()); ::std::char::from_u32(n).ok_or_else(move || Error::CustomErrorSpan { message: format!("Invalid Unicode Escape: {}", old_pair.as_str()), span: old_pair.into_span(), }) } _ => Ok(convert_byte_escape(pair) as char), } } fn convert_byte_escape(pair: Pair<Rule>) -> u8 { match pair.as_rule() { Rule::hex_esc => convert_hex_escape(pair.into_inner()) as u8, Rule::predef_esc => match pair.as_str() { "0" => b'\0', "n" => b'\n', "r" => b'\r', "t" => b'\t', "\\" => b'\\', "\"" => b'"', "'" => b'\'', e => panic!("Invalid escape: '\\{}'", e), }, r => panic!("Invalid rule: {:?}", r), } } fn convert_hex_escape(pairs: Pairs<Rule>) -> u32 { let mut n = 0; for pair in pairs { assert_eq!(pair.as_rule(), Rule::hex_digit); n = (n << 4) + (convert_hex_digit(pair.as_str()) as u32); } n }
33.388571
96
0.498888
bfa91f3bf89916ad729b55b65f6d456ae553a0fc
47,235
#[cfg(test)] mod session_description_test; use super::common_description::*; use super::error::Error; use super::media_description::*; use super::util::*; use anyhow::Result; use std::collections::HashMap; use std::time::{SystemTime, UNIX_EPOCH}; use std::{fmt, io}; use url::Url; /// Constants for SDP attributes used in JSEP pub const ATTR_KEY_CANDIDATE: &str = "candidate"; pub const ATTR_KEY_END_OF_CANDIDATES: &str = "end-of-candidates"; pub const ATTR_KEY_IDENTITY: &str = "identity"; pub const ATTR_KEY_GROUP: &str = "group"; pub const ATTR_KEY_SSRC: &str = "ssrc"; pub const ATTR_KEY_SSRCGROUP: &str = "ssrc-group"; pub const ATTR_KEY_MSID: &str = "msid"; pub const ATTR_KEY_MSID_SEMANTIC: &str = "msid-semantic"; pub const ATTR_KEY_CONNECTION_SETUP: &str = "setup"; pub const ATTR_KEY_MID: &str = "mid"; pub const ATTR_KEY_ICELITE: &str = "ice-lite"; pub const ATTR_KEY_RTCPMUX: &str = "rtcp-mux"; pub const ATTR_KEY_RTCPRSIZE: &str = "rtcp-rsize"; pub const ATTR_KEY_INACTIVE: &str = "inactive"; pub const ATTR_KEY_RECV_ONLY: &str = "recvonly"; pub const ATTR_KEY_SEND_ONLY: &str = "sendonly"; pub const ATTR_KEY_SEND_RECV: &str = "sendrecv"; pub const ATTR_KEY_EXT_MAP: &str = "extmap"; /// Constants for semantic tokens used in JSEP pub const SEMANTIC_TOKEN_LIP_SYNCHRONIZATION: &str = "LS"; pub const SEMANTIC_TOKEN_FLOW_IDENTIFICATION: &str = "FID"; pub const SEMANTIC_TOKEN_FORWARD_ERROR_CORRECTION: &str = "FEC"; pub const SEMANTIC_TOKEN_WEB_RTCMEDIA_STREAMS: &str = "WMS"; /// Version describes the value provided by the "v=" field which gives /// the version of the Session Description Protocol. pub type Version = isize; /// Origin defines the structure for the "o=" field which provides the /// originator of the session plus a session identifier and version number. #[derive(Debug, Default, Clone)] pub struct Origin { pub username: String, pub session_id: u64, pub session_version: u64, pub network_type: String, pub address_type: String, pub unicast_address: String, } impl fmt::Display for Origin { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "{} {} {} {} {} {}", self.username, self.session_id, self.session_version, self.network_type, self.address_type, self.unicast_address, ) } } impl Origin { pub fn new() -> Self { Origin { username: "".to_owned(), session_id: 0, session_version: 0, network_type: "".to_owned(), address_type: "".to_owned(), unicast_address: "".to_owned(), } } } /// SessionName describes a structured representations for the "s=" field /// and is the textual session name. pub type SessionName = String; /// EmailAddress describes a structured representations for the "e=" line /// which specifies email contact information for the person responsible for /// the conference. pub type EmailAddress = String; /// PhoneNumber describes a structured representations for the "p=" line /// specify phone contact information for the person responsible for the /// conference. pub type PhoneNumber = String; /// TimeZone defines the structured object for "z=" line which describes /// repeated sessions scheduling. #[derive(Debug, Default, Clone)] pub struct TimeZone { pub adjustment_time: u64, pub offset: i64, } impl fmt::Display for TimeZone { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{} {}", self.adjustment_time, self.offset) } } /// TimeDescription describes "t=", "r=" fields of the session description /// which are used to specify the start and stop times for a session as well as /// repeat intervals and durations for the scheduled session. #[derive(Debug, Default, Clone)] pub struct TimeDescription { /// t=<start-time> <stop-time> /// https://tools.ietf.org/html/rfc4566#section-5.9 pub timing: Timing, /// r=<repeat interval> <active duration> <offsets from start-time> /// https://tools.ietf.org/html/rfc4566#section-5.10 pub repeat_times: Vec<RepeatTime>, } /// Timing defines the "t=" field's structured representation for the start and /// stop times. #[derive(Debug, Default, Clone)] pub struct Timing { pub start_time: u64, pub stop_time: u64, } impl fmt::Display for Timing { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{} {}", self.start_time, self.stop_time) } } /// RepeatTime describes the "r=" fields of the session description which /// represents the intervals and durations for repeated scheduled sessions. #[derive(Debug, Default, Clone)] pub struct RepeatTime { pub interval: i64, pub duration: i64, pub offsets: Vec<i64>, } impl fmt::Display for RepeatTime { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mut fields = vec![format!("{}", self.interval), format!("{}", self.duration)]; for value in &self.offsets { fields.push(format!("{}", value)); } write!(f, "{}", fields.join(" ")) } } /// SessionDescription is a a well-defined format for conveying sufficient /// information to discover and participate in a multimedia session. #[derive(Debug, Default, Clone)] pub struct SessionDescription { /// v=0 /// https://tools.ietf.org/html/rfc4566#section-5.1 pub version: Version, /// o=<username> <sess-id> <sess-version> <nettype> <addrtype> <unicast-address> /// https://tools.ietf.org/html/rfc4566#section-5.2 pub origin: Origin, /// s=<session name> /// https://tools.ietf.org/html/rfc4566#section-5.3 pub session_name: SessionName, /// i=<session description> /// https://tools.ietf.org/html/rfc4566#section-5.4 pub session_information: Option<Information>, /// u=<uri> /// https://tools.ietf.org/html/rfc4566#section-5.5 pub uri: Option<Url>, /// e=<email-address> /// https://tools.ietf.org/html/rfc4566#section-5.6 pub email_address: Option<EmailAddress>, /// p=<phone-number> /// https://tools.ietf.org/html/rfc4566#section-5.6 pub phone_number: Option<PhoneNumber>, /// c=<nettype> <addrtype> <connection-address> /// https://tools.ietf.org/html/rfc4566#section-5.7 pub connection_information: Option<ConnectionInformation>, /// b=<bwtype>:<bandwidth> /// https://tools.ietf.org/html/rfc4566#section-5.8 pub bandwidth: Vec<Bandwidth>, /// https://tools.ietf.org/html/rfc4566#section-5.9 /// https://tools.ietf.org/html/rfc4566#section-5.10 pub time_descriptions: Vec<TimeDescription>, /// z=<adjustment time> <offset> <adjustment time> <offset> ... /// https://tools.ietf.org/html/rfc4566#section-5.11 pub time_zones: Vec<TimeZone>, /// k=<method> /// k=<method>:<encryption key> /// https://tools.ietf.org/html/rfc4566#section-5.12 pub encryption_key: Option<EncryptionKey>, /// a=<attribute> /// a=<attribute>:<value> /// https://tools.ietf.org/html/rfc4566#section-5.13 pub attributes: Vec<Attribute>, /// https://tools.ietf.org/html/rfc4566#section-5.14 pub media_descriptions: Vec<MediaDescription>, } /// Reset cleans the SessionDescription, and sets all fields back to their default values impl SessionDescription { /// API to match draft-ietf-rtcweb-jsep /// Move to webrtc or its own package? /// NewJSEPSessionDescription creates a new SessionDescription with /// some settings that are required by the JSEP spec. pub fn new_jsep_session_description(identity: bool) -> Self { let d = SessionDescription { version: 0, origin: Origin { username: "-".to_string(), session_id: new_session_id(), session_version: SystemTime::now() .duration_since(UNIX_EPOCH) .expect("Time went backwards") .subsec_nanos() as u64, network_type: "IN".to_string(), address_type: "IP4".to_string(), unicast_address: "0.0.0.0".to_string(), }, session_name: "-".to_string(), session_information: None, uri: None, email_address: None, phone_number: None, connection_information: None, bandwidth: vec![], time_descriptions: vec![TimeDescription { timing: Timing { start_time: 0, stop_time: 0, }, repeat_times: vec![], }], time_zones: vec![], encryption_key: None, attributes: vec![], // TODO: implement trickle ICE media_descriptions: vec![], }; if identity { d.with_property_attribute(ATTR_KEY_IDENTITY.to_string()) } else { d } } /// WithPropertyAttribute adds a property attribute 'a=key' to the session description pub fn with_property_attribute(mut self, key: String) -> Self { self.attributes.push(Attribute::new(key, None)); self } /// WithValueAttribute adds a value attribute 'a=key:value' to the session description pub fn with_value_attribute(mut self, key: String, value: String) -> Self { self.attributes.push(Attribute::new(key, Some(value))); self } /// WithFingerprint adds a fingerprint to the session description pub fn with_fingerprint(self, algorithm: String, value: String) -> Self { self.with_value_attribute("fingerprint".to_string(), algorithm + " " + value.as_str()) } /// WithMedia adds a media description to the session description pub fn with_media(mut self, md: MediaDescription) -> Self { self.media_descriptions.push(md); self } fn build_codec_map(&self) -> HashMap<u8, Codec> { let mut codecs: HashMap<u8, Codec> = HashMap::new(); for m in &self.media_descriptions { for a in &m.attributes { let attr = a.to_string(); if attr.starts_with("rtpmap:") { if let Ok(codec) = parse_rtpmap(&attr) { merge_codecs(codec, &mut codecs); } } else if attr.starts_with("fmtp:") { if let Ok(codec) = parse_fmtp(&attr) { merge_codecs(codec, &mut codecs); } } else if attr.starts_with("rtcp-fb:") { if let Ok(codec) = parse_rtcp_fb(&attr) { merge_codecs(codec, &mut codecs); } } } } codecs } /// get_codec_for_payload_type scans the SessionDescription for the given payload type and returns the codec pub fn get_codec_for_payload_type(&self, payload_type: u8) -> Result<Codec> { let codecs = self.build_codec_map(); if let Some(codec) = codecs.get(&payload_type) { Ok(codec.clone()) } else { Err(Error::PayloadTypeNotFound.into()) } } /// get_payload_type_for_codec scans the SessionDescription for a codec that matches the provided codec /// as closely as possible and returns its payload type pub fn get_payload_type_for_codec(&self, wanted: &Codec) -> Result<u8> { let codecs = self.build_codec_map(); for (payload_type, codec) in codecs.iter() { if codecs_match(wanted, codec) { return Ok(*payload_type); } } Err(Error::CodecNotFound.into()) } /// Attribute returns the value of an attribute and if it exists pub fn attribute(&self, key: &str) -> Option<&String> { for a in &self.attributes { if a.key == key { return a.value.as_ref(); } } None } /// Marshal takes a SDP struct to text /// https://tools.ietf.org/html/rfc4566#section-5 /// Session description /// v= (protocol version) /// o= (originator and session identifier) /// s= (session name) /// i=* (session information) /// u=* (URI of description) /// e=* (email address) /// p=* (phone number) /// c=* (connection information -- not required if included in /// all media) /// b=* (zero or more bandwidth information lines) /// One or more time descriptions ("t=" and "r=" lines; see below) /// z=* (time zone adjustments) /// k=* (encryption key) /// a=* (zero or more session attribute lines) /// Zero or more media descriptions /// /// Time description /// t= (time the session is active) /// r=* (zero or more repeat times) /// /// Media description, if present /// m= (media name and transport address) /// i=* (media title) /// c=* (connection information -- optional if included at /// session level) /// b=* (zero or more bandwidth information lines) /// k=* (encryption key) /// a=* (zero or more media attribute lines) pub fn marshal(&self) -> String { let mut result = String::new(); result += key_value_build("v=", Some(&self.version.to_string())).as_str(); result += key_value_build("o=", Some(&self.origin.to_string())).as_str(); result += key_value_build("s=", Some(&self.session_name)).as_str(); result += key_value_build("i=", self.session_information.as_ref()).as_str(); if let Some(uri) = &self.uri { result += key_value_build("u=", Some(&format!("{}", uri))).as_str(); } result += key_value_build("e=", self.email_address.as_ref()).as_str(); result += key_value_build("p=", self.phone_number.as_ref()).as_str(); if let Some(connection_information) = &self.connection_information { result += key_value_build("c=", Some(&connection_information.to_string())).as_str(); } for bandwidth in &self.bandwidth { result += key_value_build("b=", Some(&bandwidth.to_string())).as_str(); } for time_description in &self.time_descriptions { result += key_value_build("t=", Some(&time_description.timing.to_string())).as_str(); for repeat_time in &time_description.repeat_times { result += key_value_build("r=", Some(&repeat_time.to_string())).as_str(); } } if !self.time_zones.is_empty() { let mut time_zones = vec![]; for time_zone in &self.time_zones { time_zones.push(time_zone.to_string()); } result += key_value_build("z=", Some(&time_zones.join(" "))).as_str(); } result += key_value_build("k=", self.encryption_key.as_ref()).as_str(); for attribute in &self.attributes { result += key_value_build("a=", Some(&attribute.to_string())).as_str(); } for media_description in &self.media_descriptions { result += key_value_build("m=", Some(&media_description.media_name.to_string())).as_str(); result += key_value_build("i=", media_description.media_title.as_ref()).as_str(); if let Some(connection_information) = &media_description.connection_information { result += key_value_build("c=", Some(&connection_information.to_string())).as_str(); } for bandwidth in &media_description.bandwidth { result += key_value_build("b=", Some(&bandwidth.to_string())).as_str(); } result += key_value_build("k=", media_description.encryption_key.as_ref()).as_str(); for attribute in &media_description.attributes { result += key_value_build("a=", Some(&attribute.to_string())).as_str(); } } result } /// Unmarshal is the primary function that deserializes the session description /// message and stores it inside of a structured SessionDescription object. /// /// The States Transition Table describes the computation flow between functions /// (namely s1, s2, s3, ...) for a parsing procedure that complies with the /// specifications laid out by the rfc4566#section-5 as well as by JavaScript /// Session Establishment Protocol draft. Links: /// https://tools.ietf.org/html/rfc4566#section-5 /// https://tools.ietf.org/html/draft-ietf-rtcweb-jsep-24 /// /// https://tools.ietf.org/html/rfc4566#section-5 /// Session description /// v= (protocol version) /// o= (originator and session identifier) /// s= (session name) /// i=* (session information) /// u=* (URI of description) /// e=* (email address) /// p=* (phone number) /// c=* (connection information -- not required if included in /// all media) /// b=* (zero or more bandwidth information lines) /// One or more time descriptions ("t=" and "r=" lines; see below) /// z=* (time zone adjustments) /// k=* (encryption key) /// a=* (zero or more session attribute lines) /// Zero or more media descriptions /// /// Time description /// t= (time the session is active) /// r=* (zero or more repeat times) /// /// Media description, if present /// m= (media name and transport address) /// i=* (media title) /// c=* (connection information -- optional if included at /// session level) /// b=* (zero or more bandwidth information lines) /// k=* (encryption key) /// a=* (zero or more media attribute lines) /// /// In order to generate the following state table and draw subsequent /// deterministic finite-state automota ("DFA") the following regex was used to /// derive the DFA: /// vosi?u?e?p?c?b*(tr*)+z?k?a*(mi?c?b*k?a*)* /// possible place and state to exit: /// ** * * * ** * * * * /// 99 1 1 1 11 1 1 1 1 /// 3 1 1 26 5 5 4 4 /// /// Please pay close attention to the `k`, and `a` parsing states. In the table /// below in order to distinguish between the states belonging to the media /// description as opposed to the session description, the states are marked /// with an asterisk ("a*", "k*"). /// +--------+----+-------+----+-----+----+-----+---+----+----+---+---+-----+---+---+----+---+----+ /// | STATES | a* | a*,k* | a | a,k | b | b,c | e | i | m | o | p | r,t | s | t | u | v | z | /// +--------+----+-------+----+-----+----+-----+---+----+----+---+---+-----+---+---+----+---+----+ /// | s1 | | | | | | | | | | | | | | | | 2 | | /// | s2 | | | | | | | | | | 3 | | | | | | | | /// | s3 | | | | | | | | | | | | | 4 | | | | | /// | s4 | | | | | | 5 | 6 | 7 | | | 8 | | | 9 | 10 | | | /// | s5 | | | | | 5 | | | | | | | | | 9 | | | | /// | s6 | | | | | | 5 | | | | | 8 | | | 9 | | | | /// | s7 | | | | | | 5 | 6 | | | | 8 | | | 9 | 10 | | | /// | s8 | | | | | | 5 | | | | | | | | 9 | | | | /// | s9 | | | | 11 | | | | | 12 | | | 9 | | | | | 13 | /// | s10 | | | | | | 5 | 6 | | | | 8 | | | 9 | | | | /// | s11 | | | 11 | | | | | | 12 | | | | | | | | | /// | s12 | | 14 | | | | 15 | | 16 | 12 | | | | | | | | | /// | s13 | | | | 11 | | | | | 12 | | | | | | | | | /// | s14 | 14 | | | | | | | | 12 | | | | | | | | | /// | s15 | | 14 | | | 15 | | | | 12 | | | | | | | | | /// | s16 | | 14 | | | | 15 | | | 12 | | | | | | | | | /// +--------+----+-------+----+-----+----+-----+---+----+----+---+---+-----+---+---+----+---+----+ pub fn unmarshal<R: io::BufRead + io::Seek>(reader: &mut R) -> Result<Self> { let mut lexer = Lexer { desc: SessionDescription { version: 0, origin: Origin::new(), session_name: "".to_owned(), session_information: None, uri: None, email_address: None, phone_number: None, connection_information: None, bandwidth: vec![], time_descriptions: vec![], time_zones: vec![], encryption_key: None, attributes: vec![], media_descriptions: vec![], }, reader, }; let mut state = Some(StateFn { f: s1 }); while let Some(s) = state { state = (s.f)(&mut lexer)?; } Ok(lexer.desc) } } fn s1<'a, R: io::BufRead + io::Seek>(lexer: &mut Lexer<'a, R>) -> Result<Option<StateFn<'a, R>>> { let (key, _) = read_type(lexer.reader)?; if &key == "v=" { return Ok(Some(StateFn { f: unmarshal_protocol_version, })); } Err(Error::SdpInvalidSyntax(key).into()) } fn s2<'a, R: io::BufRead + io::Seek>(lexer: &mut Lexer<'a, R>) -> Result<Option<StateFn<'a, R>>> { let (key, _) = read_type(lexer.reader)?; if &key == "o=" { return Ok(Some(StateFn { f: unmarshal_origin, })); } Err(Error::SdpInvalidSyntax(key).into()) } fn s3<'a, R: io::BufRead + io::Seek>(lexer: &mut Lexer<'a, R>) -> Result<Option<StateFn<'a, R>>> { let (key, _) = read_type(lexer.reader)?; if &key == "s=" { return Ok(Some(StateFn { f: unmarshal_session_name, })); } Err(Error::SdpInvalidSyntax(key).into()) } fn s4<'a, R: io::BufRead + io::Seek>(lexer: &mut Lexer<'a, R>) -> Result<Option<StateFn<'a, R>>> { let (key, _) = read_type(lexer.reader)?; match key.as_str() { "i=" => Ok(Some(StateFn { f: unmarshal_session_information, })), "u=" => Ok(Some(StateFn { f: unmarshal_uri })), "e=" => Ok(Some(StateFn { f: unmarshal_email })), "p=" => Ok(Some(StateFn { f: unmarshal_phone })), "c=" => Ok(Some(StateFn { f: unmarshal_session_connection_information, })), "b=" => Ok(Some(StateFn { f: unmarshal_session_bandwidth, })), "t=" => Ok(Some(StateFn { f: unmarshal_timing, })), _ => Err(Error::SdpInvalidSyntax(key).into()), } } fn s5<'a, R: io::BufRead + io::Seek>(lexer: &mut Lexer<'a, R>) -> Result<Option<StateFn<'a, R>>> { let (key, _) = read_type(lexer.reader)?; match key.as_str() { "b=" => Ok(Some(StateFn { f: unmarshal_session_bandwidth, })), "t=" => Ok(Some(StateFn { f: unmarshal_timing, })), _ => Err(Error::SdpInvalidSyntax(key).into()), } } fn s6<'a, R: io::BufRead + io::Seek>(lexer: &mut Lexer<'a, R>) -> Result<Option<StateFn<'a, R>>> { let (key, _) = read_type(lexer.reader)?; match key.as_str() { "p=" => Ok(Some(StateFn { f: unmarshal_phone })), "c=" => Ok(Some(StateFn { f: unmarshal_session_connection_information, })), "b=" => Ok(Some(StateFn { f: unmarshal_session_bandwidth, })), "t=" => Ok(Some(StateFn { f: unmarshal_timing, })), _ => Err(Error::SdpInvalidSyntax(key).into()), } } fn s7<'a, R: io::BufRead + io::Seek>(lexer: &mut Lexer<'a, R>) -> Result<Option<StateFn<'a, R>>> { let (key, _) = read_type(lexer.reader)?; match key.as_str() { "u=" => Ok(Some(StateFn { f: unmarshal_uri })), "e=" => Ok(Some(StateFn { f: unmarshal_email })), "p=" => Ok(Some(StateFn { f: unmarshal_phone })), "c=" => Ok(Some(StateFn { f: unmarshal_session_connection_information, })), "b=" => Ok(Some(StateFn { f: unmarshal_session_bandwidth, })), "t=" => Ok(Some(StateFn { f: unmarshal_timing, })), _ => Err(Error::SdpInvalidSyntax(key).into()), } } fn s8<'a, R: io::BufRead + io::Seek>(lexer: &mut Lexer<'a, R>) -> Result<Option<StateFn<'a, R>>> { let (key, _) = read_type(lexer.reader)?; match key.as_str() { "c=" => Ok(Some(StateFn { f: unmarshal_session_connection_information, })), "b=" => Ok(Some(StateFn { f: unmarshal_session_bandwidth, })), "t=" => Ok(Some(StateFn { f: unmarshal_timing, })), _ => Err(Error::SdpInvalidSyntax(key).into()), } } fn s9<'a, R: io::BufRead + io::Seek>(lexer: &mut Lexer<'a, R>) -> Result<Option<StateFn<'a, R>>> { let (key, num_bytes) = read_type(lexer.reader)?; if key.is_empty() && num_bytes == 0 { return Ok(None); } match key.as_str() { "z=" => Ok(Some(StateFn { f: unmarshal_time_zones, })), "k=" => Ok(Some(StateFn { f: unmarshal_session_encryption_key, })), "a=" => Ok(Some(StateFn { f: unmarshal_session_attribute, })), "r=" => Ok(Some(StateFn { f: unmarshal_repeat_times, })), "t=" => Ok(Some(StateFn { f: unmarshal_timing, })), "m=" => Ok(Some(StateFn { f: unmarshal_media_description, })), _ => Err(Error::SdpInvalidSyntax(key).into()), } } fn s10<'a, R: io::BufRead + io::Seek>(lexer: &mut Lexer<'a, R>) -> Result<Option<StateFn<'a, R>>> { let (key, _) = read_type(lexer.reader)?; match key.as_str() { "e=" => Ok(Some(StateFn { f: unmarshal_email })), "p=" => Ok(Some(StateFn { f: unmarshal_phone })), "c=" => Ok(Some(StateFn { f: unmarshal_session_connection_information, })), "b=" => Ok(Some(StateFn { f: unmarshal_session_bandwidth, })), "t=" => Ok(Some(StateFn { f: unmarshal_timing, })), _ => Err(Error::SdpInvalidSyntax(key).into()), } } fn s11<'a, R: io::BufRead + io::Seek>(lexer: &mut Lexer<'a, R>) -> Result<Option<StateFn<'a, R>>> { let (key, num_bytes) = read_type(lexer.reader)?; if key.is_empty() && num_bytes == 0 { return Ok(None); } match key.as_str() { "a=" => Ok(Some(StateFn { f: unmarshal_session_attribute, })), "m=" => Ok(Some(StateFn { f: unmarshal_media_description, })), _ => Err(Error::SdpInvalidSyntax(key).into()), } } fn s12<'a, R: io::BufRead + io::Seek>(lexer: &mut Lexer<'a, R>) -> Result<Option<StateFn<'a, R>>> { let (key, num_bytes) = read_type(lexer.reader)?; if key.is_empty() && num_bytes == 0 { return Ok(None); } match key.as_str() { "a=" => Ok(Some(StateFn { f: unmarshal_media_attribute, })), "k=" => Ok(Some(StateFn { f: unmarshal_media_encryption_key, })), "b=" => Ok(Some(StateFn { f: unmarshal_media_bandwidth, })), "c=" => Ok(Some(StateFn { f: unmarshal_media_connection_information, })), "i=" => Ok(Some(StateFn { f: unmarshal_media_title, })), "m=" => Ok(Some(StateFn { f: unmarshal_media_description, })), _ => Err(Error::SdpInvalidSyntax(key).into()), } } fn s13<'a, R: io::BufRead + io::Seek>(lexer: &mut Lexer<'a, R>) -> Result<Option<StateFn<'a, R>>> { let (key, num_bytes) = read_type(lexer.reader)?; if key.is_empty() && num_bytes == 0 { return Ok(None); } match key.as_str() { "a=" => Ok(Some(StateFn { f: unmarshal_session_attribute, })), "k=" => Ok(Some(StateFn { f: unmarshal_session_encryption_key, })), "m=" => Ok(Some(StateFn { f: unmarshal_media_description, })), _ => Err(Error::SdpInvalidSyntax(key).into()), } } fn s14<'a, R: io::BufRead + io::Seek>(lexer: &mut Lexer<'a, R>) -> Result<Option<StateFn<'a, R>>> { let (key, num_bytes) = read_type(lexer.reader)?; if key.is_empty() && num_bytes == 0 { return Ok(None); } match key.as_str() { "a=" => Ok(Some(StateFn { f: unmarshal_media_attribute, })), // Non-spec ordering "k=" => Ok(Some(StateFn { f: unmarshal_media_encryption_key, })), // Non-spec ordering "b=" => Ok(Some(StateFn { f: unmarshal_media_bandwidth, })), // Non-spec ordering "c=" => Ok(Some(StateFn { f: unmarshal_media_connection_information, })), // Non-spec ordering "i=" => Ok(Some(StateFn { f: unmarshal_media_title, })), "m=" => Ok(Some(StateFn { f: unmarshal_media_description, })), _ => Err(Error::SdpInvalidSyntax(key).into()), } } fn s15<'a, R: io::BufRead + io::Seek>(lexer: &mut Lexer<'a, R>) -> Result<Option<StateFn<'a, R>>> { let (key, num_bytes) = read_type(lexer.reader)?; if key.is_empty() && num_bytes == 0 { return Ok(None); } match key.as_str() { "a=" => Ok(Some(StateFn { f: unmarshal_media_attribute, })), "k=" => Ok(Some(StateFn { f: unmarshal_media_encryption_key, })), "b=" => Ok(Some(StateFn { f: unmarshal_media_bandwidth, })), "c=" => Ok(Some(StateFn { f: unmarshal_media_connection_information, })), // Non-spec ordering "i=" => Ok(Some(StateFn { f: unmarshal_media_title, })), "m=" => Ok(Some(StateFn { f: unmarshal_media_description, })), _ => Err(Error::SdpInvalidSyntax(key).into()), } } fn s16<'a, R: io::BufRead + io::Seek>(lexer: &mut Lexer<'a, R>) -> Result<Option<StateFn<'a, R>>> { let (key, num_bytes) = read_type(lexer.reader)?; if key.is_empty() && num_bytes == 0 { return Ok(None); } match key.as_str() { "a=" => Ok(Some(StateFn { f: unmarshal_media_attribute, })), "k=" => Ok(Some(StateFn { f: unmarshal_media_encryption_key, })), "c=" => Ok(Some(StateFn { f: unmarshal_media_connection_information, })), "b=" => Ok(Some(StateFn { f: unmarshal_media_bandwidth, })), // Non-spec ordering "i=" => Ok(Some(StateFn { f: unmarshal_media_title, })), "m=" => Ok(Some(StateFn { f: unmarshal_media_description, })), _ => Err(Error::SdpInvalidSyntax(key).into()), } } fn unmarshal_protocol_version<'a, R: io::BufRead + io::Seek>( lexer: &mut Lexer<'a, R>, ) -> Result<Option<StateFn<'a, R>>> { let (value, _) = read_value(lexer.reader)?; let version = value.parse::<u32>()?; // As off the latest draft of the rfc this value is required to be 0. // https://tools.ietf.org/html/draft-ietf-rtcweb-jsep-24#section-5.8.1 if version != 0 { return Err(Error::SdpInvalidSyntax(value).into()); } Ok(Some(StateFn { f: s2 })) } fn unmarshal_origin<'a, R: io::BufRead + io::Seek>( lexer: &mut Lexer<'a, R>, ) -> Result<Option<StateFn<'a, R>>> { let (value, _) = read_value(lexer.reader)?; let fields: Vec<&str> = value.split_whitespace().collect(); if fields.len() != 6 { return Err(Error::SdpInvalidSyntax(format!("`o={}`", value)).into()); } let session_id = fields[1].parse::<u64>()?; let session_version = fields[2].parse::<u64>()?; // Set according to currently registered with IANA // https://tools.ietf.org/html/rfc4566#section-8.2.6 let i = index_of(fields[3], &["IN"]); if i == -1 { return Err(Error::SdpInvalidValue(fields[3].to_owned()).into()); } // Set according to currently registered with IANA // https://tools.ietf.org/html/rfc4566#section-8.2.7 let i = index_of(fields[4], &["IP4", "IP6"]); if i == -1 { return Err(Error::SdpInvalidValue(fields[4].to_owned()).into()); } // TODO validated UnicastAddress lexer.desc.origin = Origin { username: fields[0].to_owned(), session_id, session_version, network_type: fields[3].to_owned(), address_type: fields[4].to_owned(), unicast_address: fields[5].to_owned(), }; Ok(Some(StateFn { f: s3 })) } fn unmarshal_session_name<'a, R: io::BufRead + io::Seek>( lexer: &mut Lexer<'a, R>, ) -> Result<Option<StateFn<'a, R>>> { let (value, _) = read_value(lexer.reader)?; lexer.desc.session_name = value; Ok(Some(StateFn { f: s4 })) } fn unmarshal_session_information<'a, R: io::BufRead + io::Seek>( lexer: &mut Lexer<'a, R>, ) -> Result<Option<StateFn<'a, R>>> { let (value, _) = read_value(lexer.reader)?; lexer.desc.session_information = Some(value); Ok(Some(StateFn { f: s7 })) } fn unmarshal_uri<'a, R: io::BufRead + io::Seek>( lexer: &mut Lexer<'a, R>, ) -> Result<Option<StateFn<'a, R>>> { let (value, _) = read_value(lexer.reader)?; lexer.desc.uri = Some(Url::parse(&value)?); Ok(Some(StateFn { f: s10 })) } fn unmarshal_email<'a, R: io::BufRead + io::Seek>( lexer: &mut Lexer<'a, R>, ) -> Result<Option<StateFn<'a, R>>> { let (value, _) = read_value(lexer.reader)?; lexer.desc.email_address = Some(value); Ok(Some(StateFn { f: s6 })) } fn unmarshal_phone<'a, R: io::BufRead + io::Seek>( lexer: &mut Lexer<'a, R>, ) -> Result<Option<StateFn<'a, R>>> { let (value, _) = read_value(lexer.reader)?; lexer.desc.phone_number = Some(value); Ok(Some(StateFn { f: s8 })) } fn unmarshal_session_connection_information<'a, R: io::BufRead + io::Seek>( lexer: &mut Lexer<'a, R>, ) -> Result<Option<StateFn<'a, R>>> { let (value, _) = read_value(lexer.reader)?; lexer.desc.connection_information = unmarshal_connection_information(&value)?; Ok(Some(StateFn { f: s5 })) } fn unmarshal_connection_information(value: &str) -> Result<Option<ConnectionInformation>> { let fields: Vec<&str> = value.split_whitespace().collect(); if fields.len() < 2 { return Err(Error::SdpInvalidSyntax(format!("`c={}`", value)).into()); } // Set according to currently registered with IANA // https://tools.ietf.org/html/rfc4566#section-8.2.6 let i = index_of(fields[0], &["IN"]); if i == -1 { return Err(Error::SdpInvalidValue(fields[0].to_owned()).into()); } // Set according to currently registered with IANA // https://tools.ietf.org/html/rfc4566#section-8.2.7 let i = index_of(fields[1], &["IP4", "IP6"]); if i == -1 { return Err(Error::SdpInvalidValue(fields[1].to_owned()).into()); } let address = if fields.len() > 2 { Some(Address { address: fields[2].to_owned(), ttl: None, range: None, }) } else { None }; Ok(Some(ConnectionInformation { network_type: fields[0].to_owned(), address_type: fields[1].to_owned(), address, })) } fn unmarshal_session_bandwidth<'a, R: io::BufRead + io::Seek>( lexer: &mut Lexer<'a, R>, ) -> Result<Option<StateFn<'a, R>>> { let (value, _) = read_value(lexer.reader)?; lexer.desc.bandwidth.push(unmarshal_bandwidth(&value)?); Ok(Some(StateFn { f: s5 })) } fn unmarshal_bandwidth(value: &str) -> Result<Bandwidth> { let mut parts: Vec<&str> = value.split(':').collect(); if parts.len() != 2 { return Err(Error::SdpInvalidSyntax(format!("`b={}`", value)).into()); } let experimental = parts[0].starts_with("X-"); if experimental { parts[0] = parts[0].trim_start_matches("X-"); } else { // Set according to currently registered with IANA // https://tools.ietf.org/html/rfc4566#section-5.8 let i = index_of(parts[0], &["CT", "AS"]); if i == -1 { return Err(Error::SdpInvalidValue(parts[0].to_owned()).into()); } } let bandwidth = parts[1].parse::<u64>()?; Ok(Bandwidth { experimental, bandwidth_type: parts[0].to_owned(), bandwidth, }) } fn unmarshal_timing<'a, R: io::BufRead + io::Seek>( lexer: &mut Lexer<'a, R>, ) -> Result<Option<StateFn<'a, R>>> { let (value, _) = read_value(lexer.reader)?; let fields: Vec<&str> = value.split_whitespace().collect(); if fields.len() < 2 { return Err(Error::SdpInvalidSyntax(format!("`t={}`", value)).into()); } let start_time = fields[0].parse::<u64>()?; let stop_time = fields[1].parse::<u64>()?; lexer.desc.time_descriptions.push(TimeDescription { timing: Timing { start_time, stop_time, }, repeat_times: vec![], }); Ok(Some(StateFn { f: s9 })) } fn unmarshal_repeat_times<'a, R: io::BufRead + io::Seek>( lexer: &mut Lexer<'a, R>, ) -> Result<Option<StateFn<'a, R>>> { let (value, _) = read_value(lexer.reader)?; let fields: Vec<&str> = value.split_whitespace().collect(); if fields.len() < 3 { return Err(Error::SdpInvalidSyntax(format!("`r={}`", value)).into()); } if let Some(latest_time_desc) = lexer.desc.time_descriptions.last_mut() { let interval = parse_time_units(fields[0])?; let duration = parse_time_units(fields[1])?; let mut offsets = vec![]; for field in fields.iter().skip(2) { let offset = parse_time_units(field)?; offsets.push(offset); } latest_time_desc.repeat_times.push(RepeatTime { interval, duration, offsets, }); Ok(Some(StateFn { f: s9 })) } else { Err(Error::SdpEmptyTimeDescription.into()) } } fn unmarshal_time_zones<'a, R: io::BufRead + io::Seek>( lexer: &mut Lexer<'a, R>, ) -> Result<Option<StateFn<'a, R>>> { let (value, _) = read_value(lexer.reader)?; // These fields are transimitted in pairs // z=<adjustment time> <offset> <adjustment time> <offset> .... // so we are making sure that there are actually multiple of 2 total. let fields: Vec<&str> = value.split_whitespace().collect(); if fields.len() % 2 != 0 { return Err(Error::SdpInvalidSyntax(format!("`t={}`", value)).into()); } for i in (0..fields.len()).step_by(2) { let adjustment_time = fields[i].parse::<u64>()?; let offset = parse_time_units(fields[i + 1])?; lexer.desc.time_zones.push(TimeZone { adjustment_time, offset, }); } Ok(Some(StateFn { f: s13 })) } fn unmarshal_session_encryption_key<'a, R: io::BufRead + io::Seek>( lexer: &mut Lexer<'a, R>, ) -> Result<Option<StateFn<'a, R>>> { let (value, _) = read_value(lexer.reader)?; lexer.desc.encryption_key = Some(value); Ok(Some(StateFn { f: s11 })) } fn unmarshal_session_attribute<'a, R: io::BufRead + io::Seek>( lexer: &mut Lexer<'a, R>, ) -> Result<Option<StateFn<'a, R>>> { let (value, _) = read_value(lexer.reader)?; let fields: Vec<&str> = value.splitn(2, ':').collect(); let attribute = if fields.len() == 2 { Attribute { key: fields[0].to_owned(), value: Some(fields[1].to_owned()), } } else { Attribute { key: fields[0].to_owned(), value: None, } }; lexer.desc.attributes.push(attribute); Ok(Some(StateFn { f: s11 })) } fn unmarshal_media_description<'a, R: io::BufRead + io::Seek>( lexer: &mut Lexer<'a, R>, ) -> Result<Option<StateFn<'a, R>>> { let (value, _) = read_value(lexer.reader)?; let fields: Vec<&str> = value.split_whitespace().collect(); if fields.len() < 4 { return Err(Error::SdpInvalidSyntax(format!("`m={}`", value)).into()); } // <media> // Set according to currently registered with IANA // https://tools.ietf.org/html/rfc4566#section-5.14 let i = index_of( fields[0], &["audio", "video", "text", "application", "message"], ); if i == -1 { return Err(Error::SdpInvalidValue(fields[0].to_owned()).into()); } // <port> let parts: Vec<&str> = fields[1].split('/').collect(); let port_value = parts[0].parse::<u16>()? as isize; let port_range = if parts.len() > 1 { Some(parts[1].parse::<i32>()? as isize) } else { None }; // <proto> // Set according to currently registered with IANA // https://tools.ietf.org/html/rfc4566#section-5.14 let mut protos = vec![]; for proto in fields[2].split('/').collect::<Vec<&str>>() { let i = index_of( proto, &[ "UDP", "RTP", "AVP", "SAVP", "SAVPF", "TLS", "DTLS", "SCTP", "AVPF", ], ); if i == -1 { return Err(Error::SdpInvalidValue(fields[2].to_owned()).into()); } protos.push(proto.to_owned()); } // <fmt>... let mut formats = vec![]; for field in fields.iter().skip(3) { formats.push(field.to_string()); } lexer.desc.media_descriptions.push(MediaDescription { media_name: MediaName { media: fields[0].to_owned(), port: RangedPort { value: port_value, range: port_range, }, protos, formats, }, media_title: None, connection_information: None, bandwidth: vec![], encryption_key: None, attributes: vec![], }); Ok(Some(StateFn { f: s12 })) } fn unmarshal_media_title<'a, R: io::BufRead + io::Seek>( lexer: &mut Lexer<'a, R>, ) -> Result<Option<StateFn<'a, R>>> { let (value, _) = read_value(lexer.reader)?; if let Some(latest_media_desc) = lexer.desc.media_descriptions.last_mut() { latest_media_desc.media_title = Some(value); Ok(Some(StateFn { f: s16 })) } else { Err(Error::SdpEmptyTimeDescription.into()) } } fn unmarshal_media_connection_information<'a, R: io::BufRead + io::Seek>( lexer: &mut Lexer<'a, R>, ) -> Result<Option<StateFn<'a, R>>> { let (value, _) = read_value(lexer.reader)?; if let Some(latest_media_desc) = lexer.desc.media_descriptions.last_mut() { latest_media_desc.connection_information = unmarshal_connection_information(&value)?; Ok(Some(StateFn { f: s15 })) } else { Err(Error::SdpEmptyTimeDescription.into()) } } fn unmarshal_media_bandwidth<'a, R: io::BufRead + io::Seek>( lexer: &mut Lexer<'a, R>, ) -> Result<Option<StateFn<'a, R>>> { let (value, _) = read_value(lexer.reader)?; if let Some(latest_media_desc) = lexer.desc.media_descriptions.last_mut() { let bandwidth = unmarshal_bandwidth(&value)?; latest_media_desc.bandwidth.push(bandwidth); Ok(Some(StateFn { f: s15 })) } else { Err(Error::SdpEmptyTimeDescription.into()) } } fn unmarshal_media_encryption_key<'a, R: io::BufRead + io::Seek>( lexer: &mut Lexer<'a, R>, ) -> Result<Option<StateFn<'a, R>>> { let (value, _) = read_value(lexer.reader)?; if let Some(latest_media_desc) = lexer.desc.media_descriptions.last_mut() { latest_media_desc.encryption_key = Some(value); Ok(Some(StateFn { f: s14 })) } else { Err(Error::SdpEmptyTimeDescription.into()) } } fn unmarshal_media_attribute<'a, R: io::BufRead + io::Seek>( lexer: &mut Lexer<'a, R>, ) -> Result<Option<StateFn<'a, R>>> { let (value, _) = read_value(lexer.reader)?; let fields: Vec<&str> = value.splitn(2, ':').collect(); let attribute = if fields.len() == 2 { Attribute { key: fields[0].to_owned(), value: Some(fields[1].to_owned()), } } else { Attribute { key: fields[0].to_owned(), value: None, } }; if let Some(latest_media_desc) = lexer.desc.media_descriptions.last_mut() { latest_media_desc.attributes.push(attribute); Ok(Some(StateFn { f: s14 })) } else { Err(Error::SdpEmptyTimeDescription.into()) } } fn parse_time_units(value: &str) -> Result<i64> { // Some time offsets in the protocol can be provided with a shorthand // notation. This code ensures to convert it to NTP timestamp format. let val = value.as_bytes(); let len = val.len(); let (num, factor) = match val.last() { Some(b'd') => (&value[..len - 1], 86400), // days Some(b'h') => (&value[..len - 1], 3600), // hours Some(b'm') => (&value[..len - 1], 60), // minutes Some(b's') => (&value[..len - 1], 1), // seconds (allowed for completeness) _ => (value, 1), }; num.parse::<i64>()? .checked_mul(factor) .ok_or_else(|| Error::SdpInvalidValue(value.to_owned()).into()) }
35.595328
113
0.532296
bf465f9573e79006f80c3e5190d5643db6123ae4
159
mod pointer_data; #[test] pub fn from_lab() { pointer_data::run_from_lab_tests(); } #[test] pub fn from_lch() { pointer_data::run_from_lch_tests(); }
14.454545
39
0.685535