hexsha
stringlengths
40
40
size
int64
2
1.05M
content
stringlengths
2
1.05M
avg_line_length
float64
1.33
100
max_line_length
int64
1
1k
alphanum_fraction
float64
0.25
1
b9f403487c8ae5fe473fe5589c875cf81fe4894a
2,038
extern crate bip_handshake; extern crate futures; extern crate tokio_core; use std::time::Duration; use std::thread; use std::io::{self, Write, BufRead}; use std::net::{SocketAddr, ToSocketAddrs}; use bip_handshake::{HandshakerBuilder, InitiateMessage, Protocol}; use bip_handshake::transports::TcpTransport; use futures::{Future, Sink, Stream}; use tokio_core::reactor::Core; fn main() { let mut stdout = io::stdout(); let stdin = io::stdin(); let mut lines = stdin.lock().lines(); stdout.write(b"Enter An InfoHash In Hex Format: ").unwrap(); stdout.flush().unwrap(); let hex_hash = lines.next().unwrap().unwrap(); let hash = hex_to_bytes(&hex_hash).into(); stdout.write(b"Enter An Address And Port (eg: addr:port): ").unwrap(); stdout.flush().unwrap(); let str_addr = lines.next().unwrap().unwrap(); let addr = str_to_addr(&str_addr); let mut core = Core::new().unwrap(); // Show up as a uTorrent client... let peer_id = (*b"-UT2060-000000000000").into(); let handshaker = HandshakerBuilder::new() .with_peer_id(peer_id) .build(TcpTransport, core.handle()) .unwrap() .send(InitiateMessage::new(Protocol::BitTorrent, hash, addr)) .wait() .unwrap(); let _peer = core.run( handshaker.into_future().map(|(opt_peer, _)| opt_peer.unwrap()) ).unwrap_or_else(|_| panic!("")); println!("\nConnection With Peer Established...Closing In 10 Seconds"); thread::sleep(Duration::from_millis(10000)); } fn hex_to_bytes(hex: &str) -> [u8; 20] { let mut exact_bytes = [0u8; 20]; for byte_index in 0..20 { let high_index = byte_index * 2; let low_index = (byte_index * 2) + 1; let hex_chunk = &hex[high_index..low_index + 1]; let byte_value = u8::from_str_radix(hex_chunk, 16).unwrap(); exact_bytes[byte_index] = byte_value; } exact_bytes } fn str_to_addr(addr: &str) -> SocketAddr { addr.to_socket_addrs().unwrap().next().unwrap() }
28.704225
75
0.638371
ccd152be66ad547eefb7bf8493b60f32b9d158f5
1,964
use crate::{ client::Client, error::Error, request::{Pending, Request}, routing::Route, }; use serde::Serialize; use twilight_model::id::{ChannelId, GuildId, UserId}; #[derive(Serialize)] struct UpdateUserVoiceStateFields { channel_id: ChannelId, #[serde(skip_serializing_if = "Option::is_none")] suppress: Option<bool>, } /// Update another user's voice state. pub struct UpdateUserVoiceState<'a> { fields: UpdateUserVoiceStateFields, fut: Option<Pending<'a, ()>>, guild_id: GuildId, http: &'a Client, user_id: UserId, } impl<'a> UpdateUserVoiceState<'a> { pub(crate) fn new( http: &'a Client, guild_id: GuildId, user_id: UserId, channel_id: ChannelId, ) -> Self { Self { fields: UpdateUserVoiceStateFields { channel_id, suppress: None, }, fut: None, guild_id, http, user_id, } } /// Toggle the user's suppress state. /// /// # Caveats /// /// - You must have the [`MUTE_MEMBERS`] permission to use this method. /// - When unsuppressed, non-bot users will have their /// `request_to_speak_timestamp` set to the current time. Bot users will /// not. /// - When suppressed, the user will have their `request_to_speak_timestamp` /// removed. /// /// [`MUTE_MEMBERS`]: twilight_model::guild::Permissions::MUTE_MEMBERS pub fn suppress(mut self) -> Self { self.fields.suppress.replace(true); self } fn start(&mut self) -> Result<(), Error> { let request = Request::builder(Route::UpdateUserVoiceState { guild_id: self.guild_id.0, user_id: self.user_id.0, }) .json(&self.fields)? .build(); self.fut.replace(Box::pin(self.http.verify(request))); Ok(()) } } poll_req!(UpdateUserVoiceState<'_>, ());
25.179487
80
0.580957
4b38e51839a959bcc62600b394bef95bd232f65f
53
pub mod error; pub mod configure; pub mod listener;
17.666667
19
0.754717
1acb8e7fffc0ff88ecfa2591ab4e8cdf6db37a9a
2,613
// RGB standard library // Written in 2020 by // Dr. Maxim Orlovsky <[email protected]> // // To the extent possible under law, the author(s) have dedicated all // copyright and related and neighboring rights to this software to // the public domain worldwide. This software is distributed without // any warranty. // // You should have received a copy of the MIT License // along with this software. // If not, see <https://opensource.org/licenses/MIT>. use core::str::FromStr; use regex::Regex; use serde::{Deserialize, Serialize}; use bitcoin::hashes::hex::FromHex; use bitcoin::Txid; use lnpbp::bitcoin; use lnpbp::bp; use lnpbp::rgb::SealDefinition; use crate::error::ParseError; #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Display)] #[display_from(Debug)] pub struct Outcoins { pub coins: f32, pub vout: u16, pub txid: Option<Txid>, } impl Outcoins { pub fn seal_definition(&self) -> SealDefinition { use lnpbp::rand::{self, RngCore}; let mut rng = rand::thread_rng(); let entropy = rng.next_u32(); // Not an amount blinding factor but outpoint blinding match self.txid { Some(txid) => SealDefinition::TxOutpoint(bp::blind::OutpointReveal { blinding: entropy, txid, vout: self.vout, }), None => SealDefinition::WitnessVout { vout: self.vout, blinding: entropy, }, } } } impl FromStr for Outcoins { type Err = ParseError; fn from_str(s: &str) -> Result<Self, Self::Err> { let re = Regex::new( r"(?x) ^(?P<coins>[\d.,_']+) # float amount @ ((?P<txid>[a-f\d]{64}) # Txid :) (?P<vout>\d+)$ # Vout ", ) .expect("Regex parse failure"); if let Some(m) = re.captures(&s.to_ascii_lowercase()) { match (m.name("coins"), m.name("txid"), m.name("vout")) { (Some(amount), Some(txid), Some(vout)) => Ok(Self { coins: amount.as_str().parse()?, vout: vout.as_str().parse()?, txid: Some(Txid::from_hex(txid.as_str())?), }), (Some(amount), None, Some(vout)) => Ok(Self { coins: amount.as_str().parse()?, vout: vout.as_str().parse()?, txid: None, }), _ => Err(ParseError), } } else { Err(ParseError) } } }
30.383721
92
0.530808
6a1c6a3f5b4bfd3971cf24b398a79d087d74d52e
4,146
// //! Copyright © 2019 Benedict Gaster. All rights reserved. // //use net2::{unix::UnixUdpBuilderExt, UdpBuilder}; use std::net::{UdpSocket, SocketAddrV4}; use std::str::FromStr; use std::sync::mpsc::{Sender}; use rosc::{OscPacket, OscMessage, OscType}; use rosc::encoder; use std::time::Duration; use std::sync::atomic::{AtomicBool, Ordering}; use crate::DISCONNECT; use super::orac_serial_device; //#[derive(Debug)] pub struct Transport { socket: UdpSocket, from_addr: SocketAddrV4, osc_sender: Sender<(OscPacket, Option<SocketAddrV4>)>, serial_send: orac_serial_device::SerialSend, } impl Transport { pub fn new( from_addr: &str, osc_sender: Sender<(OscPacket, Option<SocketAddrV4>)>, serial_send: orac_serial_device::SerialSend) -> Result<Self, &'static str> { // let addr = match SocketAddrV4::from_str(from_addr) { // Ok(addr) => addr, // Err(_) => panic!("moo"), // }; // // we need to be able to reuse the socket as it will be open // let sock = UdpBuilder::new_v4().unwrap() // .reuse_address(true).unwrap() // .reuse_port(true).unwrap() // .bind(from_addr).unwrap(); // //let sock = UdpSocket::bind(addr).unwrap(); // Ok(Transport { // socket: sock, // from_addr: addr, // osc_sender: osc_sender, // }) Transport::get_addr_from_arg(from_addr) .and_then(|from_addr| UdpSocket::bind(from_addr) .and_then(|sock| Ok(Transport { socket: sock, from_addr: from_addr, osc_sender: osc_sender, serial_send: serial_send,})) .map_err(|_| "failed to open socket")) } fn get_addr_from_arg(arg: &str) -> Result<SocketAddrV4, &'static str> { match SocketAddrV4::from_str(arg) { Ok(addr) => Ok(addr), Err(_) => Err("failed to create socket addr") } } pub fn run(&mut self) { // timeout read every 3 secs to check quit //self.socket.set_read_timeout(Some(Duration::new(3, 0))); let mut buf = [0u8; rosc::decoder::MTU]; while !DISCONNECT.load(Ordering::SeqCst) { match self.socket.recv_from(&mut buf) { Ok((size, addr)) => { //info!("Received packet with size {} from: {}", size, addr); let packet = rosc::decoder::decode(&buf[..size]).unwrap(); Transport::info_packet(&packet); self.serial_send.send(&packet); self.osc_sender.send((packet, None)); } Err(e) => { //error!("Error receiving from socket: {}", e); } } } } pub fn info_packet(packet: &OscPacket) { match packet { OscPacket::Message(msg) => { info!("OSC address: {}", msg.addr); match &msg.args { Some(args) => { info!("OSC arguments: {:?}", args); } None => info!("No arguments in message."), } } OscPacket::Bundle(bundle) => { println!("OSC Bundle: {:?}", bundle); } } } // fn enable_port_reuse(socket: &UdpSocket) -> std::io::Result<()> { // use std::os::unix::prelude::*; // use std::mem; // use libc; // unsafe { // let optval: libc::c_int = 1; // let ret = libc::setsockopt( // socket.as_raw_fd(), // libc::SOL_SOCKET, // libc::SO_REUSEPORT, // &optval as *const _ as *const libc::c_void, // mem::size_of_val(&optval) as libc::socklen_t, // ); // if ret != 0 { // return Err(std::io::Error::last_os_error()); // } // } // Ok(()) // } }
31.648855
84
0.486734
7abb76708df2f093b632d2da614c02fed5c210e1
2,771
// Copyright 2018-2019 the Deno authors. All rights reserved. MIT license. use crate::deno_dir::DenoDir; use deno::ErrBox; use rustyline; use std::fs; use std::path::PathBuf; #[cfg(not(windows))] use rustyline::Editor; // Work around the issue that on Windows, `struct Editor` does not implement the // `Send` trait, because it embeds a windows HANDLE which is a type alias for // *mut c_void. This value isn't actually a pointer and there's nothing that // can be mutated through it, so hack around it. TODO: a prettier solution. #[cfg(windows)] use std::ops::{Deref, DerefMut}; #[cfg(windows)] struct Editor<T: rustyline::Helper> { inner: rustyline::Editor<T>, } #[cfg(windows)] unsafe impl<T: rustyline::Helper> Send for Editor<T> {} #[cfg(windows)] impl<T: rustyline::Helper> Editor<T> { pub fn new() -> Editor<T> { Editor { inner: rustyline::Editor::<T>::new(), } } } #[cfg(windows)] impl<T: rustyline::Helper> Deref for Editor<T> { type Target = rustyline::Editor<T>; fn deref(&self) -> &rustyline::Editor<T> { &self.inner } } #[cfg(windows)] impl<T: rustyline::Helper> DerefMut for Editor<T> { fn deref_mut(&mut self) -> &mut rustyline::Editor<T> { &mut self.inner } } pub struct Repl { editor: Editor<()>, history_file: PathBuf, } impl Repl { pub fn new(history_file: PathBuf) -> Self { let mut repl = Self { editor: Editor::<()>::new(), history_file, }; repl.load_history(); repl } fn load_history(&mut self) { debug!("Loading REPL history: {:?}", self.history_file); self .editor .load_history(&self.history_file.to_str().unwrap()) .map_err(|e| { debug!("Unable to load history file: {:?} {}", self.history_file, e) }) // ignore this error (e.g. it occurs on first load) .unwrap_or(()) } fn save_history(&mut self) -> Result<(), ErrBox> { fs::create_dir_all(self.history_file.parent().unwrap())?; self .editor .save_history(&self.history_file.to_str().unwrap()) .map(|_| debug!("Saved REPL history to: {:?}", self.history_file)) .map_err(|e| { eprintln!("Unable to save REPL history: {:?} {}", self.history_file, e); ErrBox::from(e) }) } pub fn readline(&mut self, prompt: &str) -> Result<String, ErrBox> { self .editor .readline(&prompt) .map(|line| { self.editor.add_history_entry(line.clone()); line }) .map_err(ErrBox::from) // Forward error to TS side for processing } } impl Drop for Repl { fn drop(&mut self) { self.save_history().unwrap(); } } pub fn history_path(dir: &DenoDir, history_file: &str) -> PathBuf { let mut p: PathBuf = dir.root.clone(); p.push(history_file); p }
24.095652
80
0.621797
f4a4fd9ab2e9b34072e814b66b16a4da15f6b3b0
7,556
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. /*! Runtime services, including the task scheduler and I/O dispatcher The `rt` module provides the private runtime infrastructure necessary to support core language features like the exchange and local heap, the garbage collector, logging, local data and unwinding. It also implements the default task scheduler and task model. Initialization routines are provided for setting up runtime resources in common configurations, including that used by `rustc` when generating executables. It is intended that the features provided by `rt` can be factored in a way such that the core library can be built with different 'profiles' for different use cases, e.g. excluding the task scheduler. A number of runtime features though are critical to the functioning of the language and an implementation must be provided regardless of the execution environment. Of foremost importance is the global exchange heap, in the module `global_heap`. Very little practical Rust code can be written without access to the global heap. Unlike most of `rt` the global heap is truly a global resource and generally operates independently of the rest of the runtime. All other runtime features are task-local, including the local heap, the garbage collector, local storage, logging and the stack unwinder. The relationship between `rt` and the rest of the core library is not entirely clear yet and some modules will be moving into or out of `rt` as development proceeds. Several modules in `core` are clients of `rt`: * `std::task` - The user-facing interface to the Rust task model. * `std::local_data` - The interface to local data. * `std::gc` - The garbage collector. * `std::unstable::lang` - Miscellaneous lang items, some of which rely on `std::rt`. * `std::cleanup` - Local heap destruction. * `std::io` - In the future `std::io` will use an `rt` implementation. * `std::logging` * `std::comm` */ // FIXME: this should not be here. #![allow(missing_doc)] use any::Any; use kinds::Send; use option::Option; use result::Result; use task::TaskOpts; use self::task::{Task, BlockedTask}; // this is somewhat useful when a program wants to spawn a "reasonable" number // of workers based on the constraints of the system that it's running on. // Perhaps this shouldn't be a `pub use` though and there should be another // method... pub use self::util::default_sched_threads; // Export unwinding facilities used by the failure macros pub use self::unwind::{begin_unwind, begin_unwind_raw, begin_unwind_fmt}; // FIXME: these probably shouldn't be public... #[doc(hidden)] pub mod shouldnt_be_public { #[cfg(not(test))] pub use super::local_ptr::native::maybe_tls_key; #[cfg(not(windows), not(target_os = "android"))] pub use super::local_ptr::compiled::RT_TLS_PTR; } // Internal macros used by the runtime. mod macros; /// The global (exchange) heap. pub mod global_heap; /// Implementations of language-critical runtime features like @. pub mod task; /// The EventLoop and internal synchronous I/O interface. pub mod rtio; /// The Local trait for types that are accessible via thread-local /// or task-local storage. pub mod local; /// Bindings to system threading libraries. pub mod thread; /// The runtime configuration, read from environment variables. pub mod env; /// The local, managed heap pub mod local_heap; /// The runtime needs to be able to put a pointer into thread-local storage. mod local_ptr; /// Bindings to pthread/windows thread-local storage. mod thread_local_storage; /// Stack unwinding pub mod unwind; /// The interface to libunwind that rust is using. mod libunwind; /// Simple backtrace functionality (to print on failure) pub mod backtrace; /// Just stuff mod util; // Global command line argument storage pub mod args; // Support for running procedures when a program has exited. mod at_exit_imp; // Bookkeeping for task counts pub mod bookkeeping; // Stack overflow protection pub mod stack; /// The default error code of the rust runtime if the main task fails instead /// of exiting cleanly. pub static DEFAULT_ERROR_CODE: int = 101; /// The interface to the current runtime. /// /// This trait is used as the abstraction between 1:1 and M:N scheduling. The /// two independent crates, libnative and libgreen, both have objects which /// implement this trait. The goal of this trait is to encompass all the /// fundamental differences in functionality between the 1:1 and M:N runtime /// modes. pub trait Runtime { // Necessary scheduling functions, used for channels and blocking I/O // (sometimes). fn yield_now(~self, cur_task: ~Task); fn maybe_yield(~self, cur_task: ~Task); fn deschedule(~self, times: uint, cur_task: ~Task, f: |BlockedTask| -> Result<(), BlockedTask>); fn reawaken(~self, to_wake: ~Task); // Miscellaneous calls which are very different depending on what context // you're in. fn spawn_sibling(~self, cur_task: ~Task, opts: TaskOpts, f: proc():Send); fn local_io<'a>(&'a mut self) -> Option<rtio::LocalIo<'a>>; /// The (low, high) edges of the current stack. fn stack_bounds(&self) -> (uint, uint); // (lo, hi) fn can_block(&self) -> bool; // FIXME: This is a serious code smell and this should not exist at all. fn wrap(~self) -> ~Any; } /// One-time runtime initialization. /// /// Initializes global state, including frobbing /// the crate's logging flags, registering GC /// metadata, and storing the process arguments. pub fn init(argc: int, argv: **u8) { // FIXME: Derefing these pointers is not safe. // Need to propagate the unsafety to `start`. unsafe { args::init(argc, argv); env::init(); local_ptr::init(); at_exit_imp::init(); } } /// Enqueues a procedure to run when the runtime is cleaned up /// /// The procedure passed to this function will be executed as part of the /// runtime cleanup phase. For normal rust programs, this means that it will run /// after all other tasks have exited. /// /// The procedure is *not* executed with a local `Task` available to it, so /// primitives like logging, I/O, channels, spawning, etc, are *not* available. /// This is meant for "bare bones" usage to clean up runtime details, this is /// not meant as a general-purpose "let's clean everything up" function. /// /// It is forbidden for procedures to register more `at_exit` handlers when they /// are running, and doing so will lead to a process abort. pub fn at_exit(f: proc():Send) { at_exit_imp::push(f); } /// One-time runtime cleanup. /// /// This function is unsafe because it performs no checks to ensure that the /// runtime has completely ceased running. It is the responsibility of the /// caller to ensure that the runtime is entirely shut down and nothing will be /// poking around at the internal components. /// /// Invoking cleanup while portions of the runtime are still in use may cause /// undefined behavior. pub unsafe fn cleanup() { bookkeeping::wait_for_other_tasks(); at_exit_imp::run(); args::cleanup(); local_ptr::cleanup(); }
34.66055
84
0.727766
22d1346018ea40daa597bb07fbbee93b276976d1
333
mod error; #[macro_use] mod executor; mod runtime; #[cfg(test)] mod tests; pub use self::error::Error; pub use self::executor::{Executor, WasmiExecutor}; pub use self::runtime::{ create_rng, extract_access_rights_from_keys, instance_and_memory, Runtime, }; pub const MINT_NAME: &str = "mint"; pub const POS_NAME: &str = "pos";
20.8125
78
0.726727
e853e44d6f9b5776e245f557b20ce0133317e553
7,162
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Atomic types //! //! Atomic types provide primitive shared-memory communication between //! threads, and are the building blocks of other concurrent //! types. //! //! This module defines atomic versions of a select number of primitive //! types, including `AtomicBool`, `AtomicInt`, `AtomicUint`, and `AtomicOption`. //! Atomic types present operations that, when used correctly, synchronize //! updates between threads. //! //! Each method takes an `Ordering` which represents the strength of //! the memory barrier for that operation. These orderings are the //! same as [C++11 atomic orderings][1]. //! //! [1]: http://gcc.gnu.org/wiki/Atomic/GCCMM/AtomicSync //! //! Atomic variables are safe to share between threads (they implement `Sync`) //! but they do not themselves provide the mechanism for sharing. The most //! common way to share an atomic variable is to put it into an `Arc` (an //! atomically-reference-counted shared pointer). //! //! Most atomic types may be stored in static variables, initialized using //! the provided static initializers like `INIT_ATOMIC_BOOL`. Atomic statics //! are often used for lazy global initialization. //! //! //! # Examples //! //! A simple spinlock: //! //! ``` //! use std::sync::Arc; //! use std::sync::atomic::{AtomicUint, SeqCst}; //! use std::task::deschedule; //! //! fn main() { //! let spinlock = Arc::new(AtomicUint::new(1)); //! //! let spinlock_clone = spinlock.clone(); //! spawn(proc() { //! spinlock_clone.store(0, SeqCst); //! }); //! //! // Wait for the other task to release the lock //! while spinlock.load(SeqCst) != 0 { //! // Since tasks may not be preemptive (if they are green threads) //! // yield to the scheduler to let the other task run. Low level //! // concurrent code needs to take into account Rust's two threading //! // models. //! deschedule(); //! } //! } //! ``` //! //! Transferring a heap object with `AtomicOption`: //! //! ``` //! use std::sync::Arc; //! use std::sync::atomic::{AtomicOption, SeqCst}; //! //! fn main() { //! struct BigObject; //! //! let shared_big_object = Arc::new(AtomicOption::empty()); //! //! let shared_big_object_clone = shared_big_object.clone(); //! spawn(proc() { //! let unwrapped_big_object = shared_big_object_clone.take(SeqCst); //! if unwrapped_big_object.is_some() { //! println!("got a big object from another task"); //! } else { //! println!("other task hasn't sent big object yet"); //! } //! }); //! //! shared_big_object.swap(box BigObject, SeqCst); //! } //! ``` //! //! Keep a global count of live tasks: //! //! ``` //! use std::sync::atomic::{AtomicUint, SeqCst, INIT_ATOMIC_UINT}; //! //! static GLOBAL_TASK_COUNT: AtomicUint = INIT_ATOMIC_UINT; //! //! let old_task_count = GLOBAL_TASK_COUNT.fetch_add(1, SeqCst); //! println!("live tasks: {}", old_task_count + 1); //! ``` #![allow(deprecated)] use alloc::boxed::Box; use core::mem; use core::prelude::{Drop, None, Option, Some}; pub use core::atomic::{AtomicBool, AtomicInt, AtomicUint, AtomicPtr}; pub use core::atomic::{Ordering, Relaxed, Release, Acquire, AcqRel, SeqCst}; pub use core::atomic::{INIT_ATOMIC_BOOL, INIT_ATOMIC_INT, INIT_ATOMIC_UINT}; pub use core::atomic::fence; /// An atomic, nullable unique pointer /// /// This can be used as the concurrency primitive for operations that transfer /// owned heap objects across tasks. #[unsafe_no_drop_flag] #[deprecated = "no longer used; will eventually be replaced by a higher-level\ concept like MVar"] pub struct AtomicOption<T> { p: AtomicUint, } impl<T> AtomicOption<T> { /// Create a new `AtomicOption` pub fn new(p: Box<T>) -> AtomicOption<T> { unsafe { AtomicOption { p: AtomicUint::new(mem::transmute(p)) } } } /// Create a new `AtomicOption` that doesn't contain a value pub fn empty() -> AtomicOption<T> { AtomicOption { p: AtomicUint::new(0) } } /// Store a value, returning the old value #[inline] pub fn swap(&self, val: Box<T>, order: Ordering) -> Option<Box<T>> { let val = unsafe { mem::transmute(val) }; match self.p.swap(val, order) { 0 => None, n => Some(unsafe { mem::transmute(n) }), } } /// Remove the value, leaving the `AtomicOption` empty. #[inline] pub fn take(&self, order: Ordering) -> Option<Box<T>> { unsafe { self.swap(mem::transmute(0u), order) } } /// Replace an empty value with a non-empty value. /// /// Succeeds if the option is `None` and returns `None` if so. If /// the option was already `Some`, returns `Some` of the rejected /// value. #[inline] pub fn fill(&self, val: Box<T>, order: Ordering) -> Option<Box<T>> { unsafe { let val = mem::transmute(val); let expected = mem::transmute(0u); let oldval = self.p.compare_and_swap(expected, val, order); if oldval == expected { None } else { Some(mem::transmute(val)) } } } /// Returns `true` if the `AtomicOption` is empty. /// /// Be careful: The caller must have some external method of ensuring the /// result does not get invalidated by another task after this returns. #[inline] pub fn is_empty(&self, order: Ordering) -> bool { self.p.load(order) as uint == 0 } } #[unsafe_destructor] impl<T> Drop for AtomicOption<T> { fn drop(&mut self) { let _ = self.take(SeqCst); } } #[cfg(test)] mod test { use std::prelude::*; use super::*; #[test] fn option_empty() { let option: AtomicOption<()> = AtomicOption::empty(); assert!(option.is_empty(SeqCst)); } #[test] fn option_swap() { let p = AtomicOption::new(box 1i); let a = box 2i; let b = p.swap(a, SeqCst); assert!(b == Some(box 1)); assert!(p.take(SeqCst) == Some(box 2)); } #[test] fn option_take() { let p = AtomicOption::new(box 1i); assert!(p.take(SeqCst) == Some(box 1)); assert!(p.take(SeqCst) == None); let p2 = box 2i; p.swap(p2, SeqCst); assert!(p.take(SeqCst) == Some(box 2)); } #[test] fn option_fill() { let p = AtomicOption::new(box 1i); assert!(p.fill(box 2i, SeqCst).is_some()); // should fail; shouldn't leak! assert!(p.take(SeqCst) == Some(box 1)); assert!(p.fill(box 2i, SeqCst).is_none()); // shouldn't fail assert!(p.take(SeqCst) == Some(box 2)); } }
31.004329
82
0.610863
0861d6a536de3a942d0dd26c28b366b03811d6b6
15,045
//! A lightweight actor model inspired framework to build //! customizable components with message-based intercommunications. #![warn(missing_docs, missing_debug_implementations, rust_2018_idioms)] mod error; use std::{ any::type_name, fmt::{self, Debug}, os::raw::c_void, panic::catch_unwind, sync::{Arc, Weak}, thread, time::Duration, }; use { async_io::block_on, flume::{bounded, unbounded, Receiver, RecvTimeoutError, SendError, Sender, TrySendError}, futures_lite::future::{or, pending}, once_cell::sync::Lazy, }; pub use crate::error::Error; /// An async executor with a customized execution dedication per task. pub type Executor<'a> = async_executor::Executor<'a>; /// A default executor. It runs on per-core threads and is fair /// in terms of task priorities. pub static DEFAULT_EXECUTOR: Lazy<Executor<'static>> = Lazy::new(|| { let num_threads = num_cpus::get(); for n in 1..=num_threads { thread::Builder::new() .name(format!("appliance-{}", n)) .spawn(|| loop { catch_unwind(|| block_on(DEFAULT_EXECUTOR.run(pending::<()>()))).ok(); }) .expect("cannot spawn an appliance executor thread"); } Executor::new() }); /// `Message` must be implemented for any type which is intended for /// sending to appliances. /// /// # Example /// ``` /// # use std::time::Duration; /// # use appliance::{Appliance, Descriptor, Handler, Message}; /// type Counter = Appliance<'static, usize>; /// /// struct Ping; /// /// impl Message for Ping { type Result = usize; } /// /// impl Handler<Ping> for Counter { /// fn handle(&mut self, _msg: &Ping) -> usize { /// *self.state() += 1; /// *self.state() /// } /// } /// /// fn do_ping(descriptor: Descriptor<Counter>) { /// match descriptor.send_and_wait_with_timeout(Ping, Duration::from_secs(10)) { /// Ok(cnt) => println!("Appliance was pinged successfully {} times", *cnt), /// Err(err) => panic!("Ping to appliance has failed: {}", err), /// } /// } /// ``` pub trait Message: Send { /// The type of replies generated by handling this message. type Result: Send; } /// A trait which must be implemented for all appliances which are intended to receive /// messages of type `M`. One appliance can handle multiple message types. /// /// Handler's logic is strongly encouraged to include only fast (non-blocking) and synchronous /// mutations of the appliance state. Otherwise, the appliance's event loop may get slow, and /// hence flood the internal buffer causing message sending denials. /// /// # Example /// ``` /// # use std::time::Duration; /// # use appliance::{Appliance, Descriptor, DEFAULT_EXECUTOR, Handler, Message}; /// type Counter = Appliance<'static, usize>; /// /// struct Ping; /// /// impl Message for Ping { type Result = usize; } /// /// impl Handler<Ping> for Counter { /// fn handle(&mut self, _msg: &Ping) -> usize { /// *self.state() += 1; /// *self.state() /// } /// } /// /// struct Reset; /// /// impl Message for Reset { type Result = (); } /// /// impl Handler<Reset> for Counter { /// fn handle(&mut self, _msg: &Reset) { /// *self.state() = 0; /// } /// } /// /// const BUF_SIZE: usize = 10; /// /// fn main() -> Result<(), appliance::Error> { /// let descriptor = Appliance::new_bounded(&DEFAULT_EXECUTOR, 0, BUF_SIZE); /// assert_eq!(*descriptor.send_and_wait_sync(Ping)?, 1); /// assert_eq!(*descriptor.send_and_wait_sync(Ping)?, 2); /// descriptor.send(Reset)?; /// assert_eq!(*descriptor.send_and_wait_sync(Ping)?, 1); /// Ok(()) /// } /// ``` pub trait Handler<M: Message> { /// Handle the incoming message. fn handle(&mut self, msg: M) -> M::Result; } /// A dual trait for `Handler`. For any type of messages `M` and any type of handlers `H`, /// if `impl Handler<M> for H`, then `impl HandledBy<H> for M`. I.e. we can either ask /// "which messages can be handled by this appliance" or "which appliances can handle this message", /// and the answers to these questions are dual. The trait `HandledBy` answers the second /// question. /// /// Normally one should always implement `Handler<M>`, unless for some reason it is impossible /// to do. The dual `HandledBy` impl is then provided automatically. /// /// Generic methods, on the other hand, should use the trait constraint `T: HandledBy<H>`, since /// the set of types for which `T: HandledBy<H>` is strictly larger than those for which /// `H: Handler<T>`. An example where the client would need to implement `HandledBy` is if they /// want to add custom messages for a library-provided handler type. pub trait HandledBy<H: ?Sized>: Message { /// Handle the given message with the provided handler. /// /// The return type is wrapped in order to remove generic parameters from this function. /// The actual result value can be recovered with `ResultWrapper::downcast` if the type /// of the result is known. fn handle_by(self, handler: &mut H) -> Self::Result; } impl<H, M: Message> HandledBy<H> for M where H: Handler<M>, { fn handle_by(self, handler: &mut H) -> Self::Result { handler.handle(self) } } struct InnerMessage<'a, H: ?Sized + 'a> { handle_message: Box<dyn FnOnce(Option<&mut H>) -> *mut c_void + Send + 'a>, } impl<'a, H: ?Sized + 'a> InnerMessage<'a, H> { fn new<M>(message: M, reply_channel: Option<Sender<M::Result>>) -> Self where M: HandledBy<H> + 'a, { InnerMessage { handle_message: Box::new(move |handler| { if let Some(h) = handler { let result = message.handle_by(h); if let Some(rc) = reply_channel { rc.send(result).ok(); } return std::ptr::null_mut(); } let bm = Box::new(message); Box::into_raw(bm) as *mut c_void }), } } } type MessageSender<'a, H> = Sender<InnerMessage<'a, H>>; /// A stateful entity that only allows to interact with via sending messages. /// /// The appliance itself is not directly available. Messages must be sent to it /// using its descriptor which is returned by the `Appliance::new_bounded` and /// `Appliance::new_unbounded` methods, and can also be obtained from `&Appliance` /// using `Appliance::descriptor` method. Note that the latter route is generally /// available only for message handler `Handler` implementations. pub struct Appliance<'s, S> { state: S, descriptor: Weak<MessageSender<'s, Self>>, } impl<'s, S: Debug + 's> Debug for Appliance<'s, S> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "Appliance({:?})", self.state) } } impl<'s, S> Appliance<'s, S> where S: Send + 's, { /// Creates a new appliance with a bounded message buffer, /// a state, and a handler. pub fn new_bounded(executor: &'s Executor<'s>, state: S, size: usize) -> Descriptor<'s, Self> { Self::run(executor, state, size) } /// Creates a new appliance with an unbounded message buffer, /// a state, and a handler. It's not recommended to use this /// version of appliance in production just like any other /// memory unbounded contruct. pub fn new_unbounded(executor: &'s Executor<'s>, state: S) -> Descriptor<'s, Self> { Self::run(executor, state, None) } /// Creates a new appliance with the given state, message handler, /// and buffer size, if any. fn run( executor: &'s Executor<'s>, state: S, size: impl Into<Option<usize>>, ) -> Descriptor<'s, Self> { let (in_, out_) = if let Some(mbs) = size.into() { bounded(mbs) } else { unbounded() }; let descriptor = Descriptor { inner: Arc::new(in_), }; let mut appliance = Appliance { state, descriptor: Arc::downgrade(&descriptor.inner), }; executor .spawn(async move { appliance.handle_messages(out_).await }) .detach(); descriptor } /// Returns a descriptor object of the appliance. /// /// Any descriptor is a cloneable object which allows to send messages to the appliance. /// /// This function will return `None` if all appliance descriptors have already been dropped. /// In this case the appliance becomes unusable. pub fn descriptor(&'s self) -> Option<Descriptor<'s, Self>> { self.descriptor.upgrade().map(|inner| Descriptor { inner }) } /// The mutable inner state of the appliance. /// /// Note that this function requires mutable access to the appliance itself (not its /// descriptor), but the appliance object is never returned by the API. The only place where /// the appliance can be accessed is the implementation of `Handler` and `HandledBy` traits /// for the message types, which is thus also the only place where one can (and should) mutate /// its state. pub fn state(&mut self) -> &mut S { &mut self.state } async fn handle_messages(&mut self, out_: Receiver<InnerMessage<'_, Self>>) { while let Ok(InnerMessage { handle_message }) = out_.recv_async().await { handle_message(Some(self)); } } } /// Appliance descriptor is a cloneable object which allows to send messages to the appliance. /// /// Once all descriptors to the appliance are dropped, the appliance will terminate its event /// loop and be destroyed. pub struct Descriptor<'a, A: ?Sized> { /// The incoming channel which is used to send messages to the appliance. /// /// We are forced to stupidly wrap `Sender` in an `Arc` even though it already is a /// wrapped `Arc`. We need the extra `Arc` so that we can pass a weak reference to it into /// the `Appliance` object, but unfortunately `flume::Sender` doesn't provide weak references /// in the API. /// /// Make sure that the inner `Sender` is never leaked outside of the containing `Arc`. If that /// happens, the appliance will stay alive after all descriptors are dropped, which violates /// the API contract. inner: Arc<MessageSender<'a, A>>, } impl<'a, A: ?Sized + 'a> Debug for Descriptor<'a, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "Descriptor<{}>(..)", type_name::<A>()) } } impl<'a, A: ?Sized + 'a> Clone for Descriptor<'a, A> { fn clone(&self) -> Self { Descriptor { inner: self.inner.clone(), } } } impl<'a, A: ?Sized + 'a> Descriptor<'a, A> { /// Sends a message to the current appliance without /// waiting for the message to be handled. pub fn send_sync<M>(&self, message: M) -> Result<(), Error<M>> where M: HandledBy<A> + 'a, { match self.inner.try_send(InnerMessage::new(message, None)) { Err(TrySendError::Full(im)) => { let p = (im.handle_message)(None); let bm = unsafe { Box::from_raw(p as *mut M) }; Err(Error::FullBuffer(*bm)) } Err(TrySendError::Disconnected(im)) => { let p = (im.handle_message)(None); let bm = unsafe { Box::from_raw(p as *mut M) }; Err(Error::UnexpectedFailure(Some(*bm))) } _ => Ok(()), } } /// Does conceptually the same thing as `send_sync` but gets intended /// to be used in async context. pub async fn send_async<M>(&self, message: M) -> Result<(), Error<M>> where M: HandledBy<A> + 'a, { self.inner .send_async(InnerMessage::new(message, None)) .await .map_err(|SendError(im)| { let p = (im.handle_message)(None); let bm = unsafe { Box::from_raw(p as *mut M) }; Error::UnexpectedFailure(Some(*bm)) }) } /// Sends a message to the current appliance and waits /// forever, if `timeout` is None, or for only given time /// for the message to be handled. /// This synchronous blocking method is a fit for callers /// who don't use async execution and must be assured that /// the message has been handled. /// Note, it is supposed to be used less often than `send` /// as it may suffer a significant performance hit due to /// synchronization with the handling loop. pub fn send_and_wait_sync<M>( &self, message: M, timeout: Option<Duration>, ) -> Result<M::Result, Error<M>> where M: HandledBy<A> + 'a, { let (s, r) = bounded(1); let im = InnerMessage::new(message, Some(s)); match self.inner.try_send(im) { Err(TrySendError::Full(im)) => { let p = (im.handle_message)(None); let bm = unsafe { Box::from_raw(p as *mut M) }; return Err(Error::FullBuffer(*bm)); } Err(TrySendError::Disconnected(im)) => { let p = (im.handle_message)(None); let bm = unsafe { Box::from_raw(p as *mut M) }; return Err(Error::UnexpectedFailure(Some(*bm))); } _ => {} } if let Some(timeout) = timeout { match r.recv_timeout(timeout) { Ok(r) => Ok(r), Err(RecvTimeoutError::Timeout) => Err(Error::Timeout), Err(RecvTimeoutError::Disconnected) => Err(Error::UnexpectedFailure(None)), } } else { r.recv().map_err(|_| Error::UnexpectedFailure(None)) } } /// Does conceptually the same thing as `send_and_wait_sync` /// but gets intended to be used in async context. This method /// is well suited for waiting for a result. pub async fn send_and_wait_async<M>( &self, message: M, timeout: Option<Duration>, ) -> Result<M::Result, Error<M>> where M: HandledBy<A> + 'a, { let (send, recv) = bounded(1); let im = InnerMessage::new(message, Some(send)); if let Err(SendError(im)) = self.inner.send_async(im).await { let p = (im.handle_message)(None); let bm = unsafe { Box::from_raw(p as *mut M) }; return Err(Error::UnexpectedFailure(Some(*bm))); } if let Some(timeout) = timeout { let f1 = async { recv.recv_async() .await .map_err(|_| Error::UnexpectedFailure(None)) }; let f2 = async { async_io::Timer::after(timeout).await; Err(Error::Timeout) }; or(f1, f2).await } else { recv.recv_async() .await .map_err(|_| Error::UnexpectedFailure(None)) } } }
35.736342
100
0.591027
ef4507df8d1c4f1a786fa1f087472a510121eb20
25,169
use tempfile; use testutil; use crate::{ CommandRunner as CommandRunnerTrait, Context, FallibleProcessResultWithPlatform, Platform, PlatformConstraint, Process, RelativePath, }; use hashing::EMPTY_DIGEST; use spectral::{assert_that, string::StrAssertions}; use std; use std::collections::{BTreeMap, BTreeSet}; use std::path::PathBuf; use std::time::Duration; use store::Store; use tempfile::TempDir; use testutil::data::{TestData, TestDirectory}; use testutil::path::find_bash; use testutil::{as_bytes, owned_string_vec}; use tokio::runtime::Handle; #[tokio::test] #[cfg(unix)] async fn stdout() { let result = run_command_locally(Process { argv: owned_string_vec(&["/bin/echo", "-n", "foo"]), env: BTreeMap::new(), working_directory: None, input_files: EMPTY_DIGEST, output_files: BTreeSet::new(), output_directories: BTreeSet::new(), timeout: one_second(), description: "echo foo".to_string(), unsafe_local_only_files_because_we_favor_speed_over_correctness_for_this_rule: EMPTY_DIGEST, jdk_home: None, target_platform: PlatformConstraint::None, is_nailgunnable: false, }) .await; assert_eq!( result.unwrap(), FallibleProcessResultWithPlatform { stdout: as_bytes("foo"), stderr: as_bytes(""), exit_code: 0, output_directory: EMPTY_DIGEST, execution_attempts: vec![], platform: Platform::current().unwrap(), } ) } #[tokio::test] #[cfg(unix)] async fn stdout_and_stderr_and_exit_code() { let result = run_command_locally(Process { argv: owned_string_vec(&["/bin/bash", "-c", "echo -n foo ; echo >&2 -n bar ; exit 1"]), env: BTreeMap::new(), working_directory: None, input_files: EMPTY_DIGEST, output_files: BTreeSet::new(), output_directories: BTreeSet::new(), timeout: one_second(), description: "echo foo and fail".to_string(), unsafe_local_only_files_because_we_favor_speed_over_correctness_for_this_rule: EMPTY_DIGEST, jdk_home: None, target_platform: PlatformConstraint::None, is_nailgunnable: false, }) .await; assert_eq!( result.unwrap(), FallibleProcessResultWithPlatform { stdout: as_bytes("foo"), stderr: as_bytes("bar"), exit_code: 1, output_directory: EMPTY_DIGEST, execution_attempts: vec![], platform: Platform::current().unwrap(), } ) } #[tokio::test] #[cfg(unix)] async fn capture_exit_code_signal() { // Launch a process that kills itself with a signal. let result = run_command_locally(Process { argv: owned_string_vec(&["/bin/bash", "-c", "kill $$"]), env: BTreeMap::new(), working_directory: None, input_files: EMPTY_DIGEST, output_files: BTreeSet::new(), output_directories: BTreeSet::new(), timeout: one_second(), description: "kill self".to_string(), unsafe_local_only_files_because_we_favor_speed_over_correctness_for_this_rule: EMPTY_DIGEST, jdk_home: None, target_platform: PlatformConstraint::None, is_nailgunnable: false, }) .await; assert_eq!( result.unwrap(), FallibleProcessResultWithPlatform { stdout: as_bytes(""), stderr: as_bytes(""), exit_code: -15, output_directory: EMPTY_DIGEST, execution_attempts: vec![], platform: Platform::current().unwrap(), } ) } #[tokio::test] #[cfg(unix)] async fn env() { let mut env: BTreeMap<String, String> = BTreeMap::new(); env.insert("FOO".to_string(), "foo".to_string()); env.insert("BAR".to_string(), "not foo".to_string()); let result = run_command_locally(Process { argv: owned_string_vec(&["/usr/bin/env"]), env: env.clone(), working_directory: None, input_files: EMPTY_DIGEST, output_files: BTreeSet::new(), output_directories: BTreeSet::new(), timeout: one_second(), description: "run env".to_string(), unsafe_local_only_files_because_we_favor_speed_over_correctness_for_this_rule: EMPTY_DIGEST, jdk_home: None, target_platform: PlatformConstraint::None, is_nailgunnable: false, }) .await; let stdout = String::from_utf8(result.unwrap().stdout.to_vec()).unwrap(); let got_env: BTreeMap<String, String> = stdout .split("\n") .filter(|line| !line.is_empty()) .map(|line| line.splitn(2, "=")) .map(|mut parts| { ( parts.next().unwrap().to_string(), parts.next().unwrap_or("").to_string(), ) }) .filter(|x| x.0 != "PATH") .collect(); assert_eq!(env, got_env); } #[tokio::test] #[cfg(unix)] async fn env_is_deterministic() { fn make_request() -> Process { let mut env = BTreeMap::new(); env.insert("FOO".to_string(), "foo".to_string()); env.insert("BAR".to_string(), "not foo".to_string()); Process { argv: owned_string_vec(&["/usr/bin/env"]), env: env, working_directory: None, input_files: EMPTY_DIGEST, output_files: BTreeSet::new(), output_directories: BTreeSet::new(), timeout: one_second(), description: "run env".to_string(), unsafe_local_only_files_because_we_favor_speed_over_correctness_for_this_rule: EMPTY_DIGEST, jdk_home: None, target_platform: PlatformConstraint::None, is_nailgunnable: false, } } let result1 = run_command_locally(make_request()).await; let result2 = run_command_locally(make_request()).await; assert_eq!(result1.unwrap(), result2.unwrap()); } #[tokio::test] async fn binary_not_found() { run_command_locally(Process { argv: owned_string_vec(&["echo", "-n", "foo"]), env: BTreeMap::new(), working_directory: None, input_files: EMPTY_DIGEST, output_files: BTreeSet::new(), output_directories: BTreeSet::new(), timeout: one_second(), description: "echo foo".to_string(), unsafe_local_only_files_because_we_favor_speed_over_correctness_for_this_rule: EMPTY_DIGEST, jdk_home: None, target_platform: PlatformConstraint::None, is_nailgunnable: false, }) .await .expect_err("Want Err"); } #[tokio::test] async fn output_files_none() { let result = run_command_locally(Process { argv: owned_string_vec(&[&find_bash(), "-c", "exit 0"]), env: BTreeMap::new(), working_directory: None, input_files: EMPTY_DIGEST, output_files: BTreeSet::new(), output_directories: BTreeSet::new(), timeout: one_second(), description: "bash".to_string(), unsafe_local_only_files_because_we_favor_speed_over_correctness_for_this_rule: EMPTY_DIGEST, jdk_home: None, target_platform: PlatformConstraint::None, is_nailgunnable: false, }) .await; assert_eq!( result.unwrap(), FallibleProcessResultWithPlatform { stdout: as_bytes(""), stderr: as_bytes(""), exit_code: 0, output_directory: EMPTY_DIGEST, execution_attempts: vec![], platform: Platform::current().unwrap(), } ) } #[tokio::test] async fn output_files_one() { let result = run_command_locally(Process { argv: vec![ find_bash(), "-c".to_owned(), format!("echo -n {} > {}", TestData::roland().string(), "roland"), ], env: BTreeMap::new(), working_directory: None, input_files: EMPTY_DIGEST, output_files: vec![PathBuf::from("roland")].into_iter().collect(), output_directories: BTreeSet::new(), timeout: one_second(), description: "bash".to_string(), unsafe_local_only_files_because_we_favor_speed_over_correctness_for_this_rule: EMPTY_DIGEST, jdk_home: None, target_platform: PlatformConstraint::None, is_nailgunnable: false, }) .await; assert_eq!( result.unwrap(), FallibleProcessResultWithPlatform { stdout: as_bytes(""), stderr: as_bytes(""), exit_code: 0, output_directory: TestDirectory::containing_roland().digest(), execution_attempts: vec![], platform: Platform::current().unwrap(), } ) } #[tokio::test] async fn output_dirs() { let result = run_command_locally(Process { argv: vec![ find_bash(), "-c".to_owned(), format!( "/bin/mkdir cats && echo -n {} > {} ; echo -n {} > treats", TestData::roland().string(), "cats/roland", TestData::catnip().string() ), ], env: BTreeMap::new(), working_directory: None, input_files: EMPTY_DIGEST, output_files: vec![PathBuf::from("treats")].into_iter().collect(), output_directories: vec![PathBuf::from("cats")].into_iter().collect(), timeout: one_second(), description: "bash".to_string(), unsafe_local_only_files_because_we_favor_speed_over_correctness_for_this_rule: EMPTY_DIGEST, jdk_home: None, target_platform: PlatformConstraint::None, is_nailgunnable: false, }) .await; assert_eq!( result.unwrap(), FallibleProcessResultWithPlatform { stdout: as_bytes(""), stderr: as_bytes(""), exit_code: 0, output_directory: TestDirectory::recursive().digest(), execution_attempts: vec![], platform: Platform::current().unwrap(), } ) } #[tokio::test] async fn output_files_many() { let result = run_command_locally(Process { argv: vec![ find_bash(), "-c".to_owned(), format!( "echo -n {} > cats/roland ; echo -n {} > treats", TestData::roland().string(), TestData::catnip().string() ), ], env: BTreeMap::new(), working_directory: None, input_files: EMPTY_DIGEST, output_files: vec![PathBuf::from("cats/roland"), PathBuf::from("treats")] .into_iter() .collect(), output_directories: BTreeSet::new(), timeout: one_second(), description: "treats-roland".to_string(), unsafe_local_only_files_because_we_favor_speed_over_correctness_for_this_rule: EMPTY_DIGEST, jdk_home: None, target_platform: PlatformConstraint::None, is_nailgunnable: false, }) .await; assert_eq!( result.unwrap(), FallibleProcessResultWithPlatform { stdout: as_bytes(""), stderr: as_bytes(""), exit_code: 0, output_directory: TestDirectory::recursive().digest(), execution_attempts: vec![], platform: Platform::current().unwrap(), } ) } #[tokio::test] async fn output_files_execution_failure() { let result = run_command_locally(Process { argv: vec![ find_bash(), "-c".to_owned(), format!( "echo -n {} > {} ; exit 1", TestData::roland().string(), "roland" ), ], env: BTreeMap::new(), working_directory: None, input_files: EMPTY_DIGEST, output_files: vec![PathBuf::from("roland")].into_iter().collect(), output_directories: BTreeSet::new(), timeout: one_second(), description: "echo foo".to_string(), unsafe_local_only_files_because_we_favor_speed_over_correctness_for_this_rule: EMPTY_DIGEST, jdk_home: None, target_platform: PlatformConstraint::None, is_nailgunnable: false, }) .await; assert_eq!( result.unwrap(), FallibleProcessResultWithPlatform { stdout: as_bytes(""), stderr: as_bytes(""), exit_code: 1, output_directory: TestDirectory::containing_roland().digest(), execution_attempts: vec![], platform: Platform::current().unwrap(), } ) } #[tokio::test] async fn output_files_partial_output() { let result = run_command_locally(Process { argv: vec![ find_bash(), "-c".to_owned(), format!("echo -n {} > {}", TestData::roland().string(), "roland"), ], env: BTreeMap::new(), working_directory: None, input_files: EMPTY_DIGEST, output_files: vec![PathBuf::from("roland"), PathBuf::from("susannah")] .into_iter() .collect(), output_directories: BTreeSet::new(), timeout: one_second(), description: "echo-roland".to_string(), unsafe_local_only_files_because_we_favor_speed_over_correctness_for_this_rule: EMPTY_DIGEST, jdk_home: None, target_platform: PlatformConstraint::None, is_nailgunnable: false, }) .await; assert_eq!( result.unwrap(), FallibleProcessResultWithPlatform { stdout: as_bytes(""), stderr: as_bytes(""), exit_code: 0, output_directory: TestDirectory::containing_roland().digest(), execution_attempts: vec![], platform: Platform::current().unwrap(), } ) } #[tokio::test] async fn output_overlapping_file_and_dir() { let result = run_command_locally(Process { argv: vec![ find_bash(), "-c".to_owned(), format!("echo -n {} > cats/roland", TestData::roland().string()), ], env: BTreeMap::new(), working_directory: None, input_files: EMPTY_DIGEST, output_files: vec![PathBuf::from("cats/roland")].into_iter().collect(), output_directories: vec![PathBuf::from("cats")].into_iter().collect(), timeout: one_second(), description: "bash".to_string(), unsafe_local_only_files_because_we_favor_speed_over_correctness_for_this_rule: EMPTY_DIGEST, jdk_home: None, target_platform: PlatformConstraint::None, is_nailgunnable: false, }) .await; assert_eq!( result.unwrap(), FallibleProcessResultWithPlatform { stdout: as_bytes(""), stderr: as_bytes(""), exit_code: 0, output_directory: TestDirectory::nested().digest(), execution_attempts: vec![], platform: Platform::current().unwrap(), } ) } #[tokio::test] async fn jdk_symlink() { let preserved_work_tmpdir = TempDir::new().unwrap(); let roland = TestData::roland().bytes(); std::fs::write(preserved_work_tmpdir.path().join("roland"), roland.clone()) .expect("Writing temporary file"); let result = run_command_locally(Process { argv: vec!["/bin/cat".to_owned(), ".jdk/roland".to_owned()], env: BTreeMap::new(), working_directory: None, input_files: EMPTY_DIGEST, output_files: BTreeSet::new(), output_directories: BTreeSet::new(), timeout: one_second(), description: "cat roland".to_string(), unsafe_local_only_files_because_we_favor_speed_over_correctness_for_this_rule: EMPTY_DIGEST, jdk_home: Some(preserved_work_tmpdir.path().to_path_buf()), target_platform: PlatformConstraint::None, is_nailgunnable: false, }) .await; assert_eq!( result, Ok(FallibleProcessResultWithPlatform { stdout: roland, stderr: as_bytes(""), exit_code: 0, output_directory: EMPTY_DIGEST, execution_attempts: vec![], platform: Platform::current().unwrap(), }) ) } #[tokio::test] async fn test_directory_preservation() { let preserved_work_tmpdir = TempDir::new().unwrap(); let preserved_work_root = preserved_work_tmpdir.path().to_owned(); let result = run_command_locally_in_dir( Process { argv: vec![ find_bash(), "-c".to_owned(), format!("echo -n {} > {}", TestData::roland().string(), "roland"), ], env: BTreeMap::new(), working_directory: None, input_files: EMPTY_DIGEST, output_files: vec![PathBuf::from("roland")].into_iter().collect(), output_directories: BTreeSet::new(), timeout: one_second(), description: "bash".to_string(), unsafe_local_only_files_because_we_favor_speed_over_correctness_for_this_rule: EMPTY_DIGEST, jdk_home: None, target_platform: PlatformConstraint::None, is_nailgunnable: false, }, preserved_work_root.clone(), false, None, None, ) .await; result.unwrap(); assert!(preserved_work_root.exists()); // Collect all of the top level sub-dirs under our test workdir. let subdirs = testutil::file::list_dir(&preserved_work_root); assert_eq!(subdirs.len(), 1); // Then look for a file like e.g. `/tmp/abc1234/process-execution7zt4pH/roland` let rolands_path = preserved_work_root.join(&subdirs[0]).join("roland"); assert!(rolands_path.exists()); } #[tokio::test] async fn test_directory_preservation_error() { let preserved_work_tmpdir = TempDir::new().unwrap(); let preserved_work_root = preserved_work_tmpdir.path().to_owned(); assert!(preserved_work_root.exists()); assert_eq!(testutil::file::list_dir(&preserved_work_root).len(), 0); run_command_locally_in_dir( Process { argv: vec!["doesnotexist".to_owned()], env: BTreeMap::new(), working_directory: None, input_files: EMPTY_DIGEST, output_files: BTreeSet::new(), output_directories: BTreeSet::new(), timeout: one_second(), description: "failing execution".to_string(), unsafe_local_only_files_because_we_favor_speed_over_correctness_for_this_rule: EMPTY_DIGEST, jdk_home: None, target_platform: PlatformConstraint::None, is_nailgunnable: false, }, preserved_work_root.clone(), false, None, None, ) .await .expect_err("Want process to fail"); assert!(preserved_work_root.exists()); // Collect all of the top level sub-dirs under our test workdir. assert_eq!(testutil::file::list_dir(&preserved_work_root).len(), 1); } #[tokio::test] async fn all_containing_directories_for_outputs_are_created() { let result = run_command_locally(Process { argv: vec![ find_bash(), "-c".to_owned(), format!( // mkdir would normally fail, since birds/ doesn't yet exist, as would echo, since cats/ // does not exist, but we create the containing directories for all outputs before the // process executes. "/bin/mkdir birds/falcons && echo -n {} > cats/roland", TestData::roland().string() ), ], env: BTreeMap::new(), working_directory: None, input_files: EMPTY_DIGEST, output_files: vec![PathBuf::from("cats/roland")].into_iter().collect(), output_directories: vec![PathBuf::from("birds/falcons")].into_iter().collect(), timeout: one_second(), description: "create nonoverlapping directories and file".to_string(), unsafe_local_only_files_because_we_favor_speed_over_correctness_for_this_rule: EMPTY_DIGEST, jdk_home: None, target_platform: PlatformConstraint::None, is_nailgunnable: false, }) .await; assert_eq!( result.unwrap(), FallibleProcessResultWithPlatform { stdout: as_bytes(""), stderr: as_bytes(""), exit_code: 0, output_directory: TestDirectory::nested_dir_and_file().digest(), execution_attempts: vec![], platform: Platform::current().unwrap(), } ) } #[tokio::test] async fn output_empty_dir() { let result = run_command_locally(Process { argv: vec![ find_bash(), "-c".to_owned(), "/bin/mkdir falcons".to_string(), ], env: BTreeMap::new(), working_directory: None, input_files: EMPTY_DIGEST, output_files: BTreeSet::new(), output_directories: vec![PathBuf::from("falcons")].into_iter().collect(), timeout: one_second(), description: "bash".to_string(), unsafe_local_only_files_because_we_favor_speed_over_correctness_for_this_rule: EMPTY_DIGEST, jdk_home: None, target_platform: PlatformConstraint::None, is_nailgunnable: false, }) .await; assert_eq!( result.unwrap(), FallibleProcessResultWithPlatform { stdout: as_bytes(""), stderr: as_bytes(""), exit_code: 0, output_directory: TestDirectory::containing_falcons_dir().digest(), execution_attempts: vec![], platform: Platform::current().unwrap(), } ) } /// This test attempts to make sure local only scratch files are materialized correctly by /// making sure that with input_files being empty, we would be able to capture the content of /// the local only scratch inputs as outputs. #[tokio::test] async fn local_only_scratch_files_materialized() { let store_dir = TempDir::new().unwrap(); let executor = task_executor::Executor::new(Handle::current()); let store = Store::local_only(executor.clone(), store_dir.path()).unwrap(); // Prepare the store to contain roland, because the EPR needs to materialize it let roland_directory_digest = TestDirectory::containing_roland().digest(); store .record_directory(&TestDirectory::containing_roland().directory(), true) .await .expect("Error saving directory"); store .store_file_bytes(TestData::roland().bytes(), false) .await .expect("Error saving file bytes"); let work_dir = TempDir::new().unwrap(); let result = run_command_locally_in_dir( Process { argv: vec![find_bash(), "-c".to_owned(), format!("echo -n ''")], env: BTreeMap::new(), working_directory: None, input_files: EMPTY_DIGEST, output_files: vec![PathBuf::from("roland")].into_iter().collect(), output_directories: BTreeSet::new(), timeout: one_second(), description: "treats-roland".to_string(), unsafe_local_only_files_because_we_favor_speed_over_correctness_for_this_rule: roland_directory_digest, jdk_home: None, target_platform: PlatformConstraint::None, is_nailgunnable: false, }, work_dir.path().to_owned(), true, Some(store), Some(executor), ) .await; assert_eq!( result.unwrap(), FallibleProcessResultWithPlatform { stdout: as_bytes(""), stderr: as_bytes(""), exit_code: 0, output_directory: roland_directory_digest, execution_attempts: vec![], platform: Platform::current().unwrap(), } ); } #[tokio::test] async fn timeout() { let result = run_command_locally(Process { argv: vec![ find_bash(), "-c".to_owned(), "/bin/sleep 0.2; /bin/echo -n 'European Burmese'".to_string(), ], env: BTreeMap::new(), working_directory: None, input_files: EMPTY_DIGEST, output_files: BTreeSet::new(), output_directories: BTreeSet::new(), timeout: Some(Duration::from_millis(100)), description: "sleepy-cat".to_string(), unsafe_local_only_files_because_we_favor_speed_over_correctness_for_this_rule: EMPTY_DIGEST, jdk_home: None, target_platform: PlatformConstraint::None, is_nailgunnable: false, }) .await .unwrap(); assert_eq!(result.exit_code, -15); let error_msg = String::from_utf8(result.stdout.to_vec()).unwrap(); assert_that(&error_msg).contains("Exceeded timeout"); assert_that(&error_msg).contains("sleepy-cat"); } #[tokio::test] async fn working_directory() { let store_dir = TempDir::new().unwrap(); let executor = task_executor::Executor::new(Handle::current()); let store = Store::local_only(executor.clone(), store_dir.path()).unwrap(); // Prepare the store to contain /cats/roland, because the EPR needs to materialize it and then run // from the ./cats directory. store .store_file_bytes(TestData::roland().bytes(), false) .await .expect("Error saving file bytes"); store .record_directory(&TestDirectory::containing_roland().directory(), true) .await .expect("Error saving directory"); store .record_directory(&TestDirectory::nested().directory(), true) .await .expect("Error saving directory"); let work_dir = TempDir::new().unwrap(); let result = run_command_locally_in_dir( Process { argv: vec![find_bash(), "-c".to_owned(), "/bin/ls".to_string()], env: BTreeMap::new(), working_directory: Some(RelativePath::new("cats").unwrap()), input_files: TestDirectory::nested().digest(), output_files: BTreeSet::new(), output_directories: BTreeSet::new(), timeout: one_second(), description: "confused-cat".to_string(), unsafe_local_only_files_because_we_favor_speed_over_correctness_for_this_rule: EMPTY_DIGEST, jdk_home: None, target_platform: PlatformConstraint::None, is_nailgunnable: false, }, work_dir.path().to_owned(), true, Some(store), Some(executor), ) .await; assert_eq!( result.unwrap(), FallibleProcessResultWithPlatform { stdout: as_bytes("roland\n"), stderr: as_bytes(""), exit_code: 0, output_directory: EMPTY_DIGEST, execution_attempts: vec![], platform: Platform::current().unwrap(), } ); } async fn run_command_locally(req: Process) -> Result<FallibleProcessResultWithPlatform, String> { let work_dir = TempDir::new().unwrap(); run_command_locally_in_dir_with_cleanup(req, work_dir.path().to_owned()).await } async fn run_command_locally_in_dir_with_cleanup( req: Process, dir: PathBuf, ) -> Result<FallibleProcessResultWithPlatform, String> { run_command_locally_in_dir(req, dir, true, None, None).await } async fn run_command_locally_in_dir( req: Process, dir: PathBuf, cleanup: bool, store: Option<Store>, executor: Option<task_executor::Executor>, ) -> Result<FallibleProcessResultWithPlatform, String> { let store_dir = TempDir::new().unwrap(); let executor = executor.unwrap_or_else(|| task_executor::Executor::new(Handle::current())); let store = store.unwrap_or_else(|| Store::local_only(executor.clone(), store_dir.path()).unwrap()); let runner = crate::local::CommandRunner::new(store, executor.clone(), dir, cleanup); runner.run(req.into(), Context::default()).await } fn one_second() -> Option<Duration> { Some(Duration::from_millis(1000)) }
30.434099
100
0.67309
fc80f37806f31b09120bd812309356c80d0c6161
68
#[cfg(any(feature = "google-chromeos-uidetection-v1"))] pub mod v1;
22.666667
55
0.705882
ab831083cbd7832b4bfbc14ec47e12310813e16f
6,975
use std::fs; use std::io::{self, prelude::*}; use std::path::Path; use std::sync::Arc; use calypso_repl::Repl; use clap::ArgMatches; use calypso_base::ui::termcolor::{Color, ColorSpec, WriteColor}; use calypso_common::gcx::GlobalCtxt; use calypso_diagnostic::prelude::*; use calypso_diagnostic::reporting::files::Files; use calypso_parsing::lexer::{Lexer, TokenType}; use calypso_parsing::pretty::Printer; // use calypso_repl::Repl; use crate::buildinfo::BUILD_INFO; #[allow(clippy::single_match)] pub fn internal(gcx: &Arc<GlobalCtxt>, matches: &ArgMatches) -> CalResult<()> { match matches.subcommand() { ("lexer", Some(matches)) => lexer(gcx, matches), ("buildinfo", _) => buildinfo(gcx), ("panic", _) => panic!("Intentional panic to test ICE handling, please ignore."), _ => Ok(()), } } pub fn lexer(gcx: &Arc<GlobalCtxt>, matches: &ArgMatches) -> CalResult<()> { let ignore_ws = matches.is_present("ignore_ws"); let path = matches.value_of("INPUT").unwrap(); let (file_name, contents) = if path == "-" { if matches.is_present("repl") { return lexer_repl(gcx, ignore_ws); } let stdin = io::stdin(); let mut contents = String::new(); if let Err(err) = stdin.lock().read_to_string(&mut contents) { gcx.emit .write() .err .error(None, "while reading from stdin:", None)? .error(None, &format!("{}", err), None)? .flush()?; return Ok(()); } ("<stdin>".to_string(), contents) } else { let path = Path::new(path); if !path.exists() { gcx.emit .write() .err .error( None, "file does not exist", Some(&format!("`{}`", path.display())), )? .flush()?; return Ok(()); } ( path.display().to_string(), match fs::read_to_string(&path) { Ok(v) => v, Err(err) => { gcx.emit .write() .err .error( None, "while reading file", Some(&format!("`{}`:", path.display())), )? .error(None, &format!("{}", err), None)? .flush()?; return Ok(()); } }, ) }; run_lexer(gcx, ignore_ws, file_name, contents) } pub fn run_lexer( gcx: &Arc<GlobalCtxt>, ignore_ws: bool, file_name: String, contents: String, ) -> CalResult<()> { let file_id = gcx.sourcemgr.write().add(file_name, contents); let sourcemgr = gcx.sourcemgr.read(); let source = sourcemgr.source(file_id).unwrap(); let mut lexer = Lexer::new(file_id, source, Arc::clone(gcx)); let mut tokens = Vec::new(); let mut printer = Printer::new(file_id, Arc::clone(gcx)); loop { let token = lexer.scan(); if let Err(err) = token { let mut emit = gcx.emit.write(); emit.err.error(None, "while lexing input:", None)?; if let Some(DiagnosticError::Diagnostic) = err.try_downcast_ref::<DiagnosticError>() { let mut buf = emit.err.buffer(); gcx.grcx .read() .fatal() .unwrap() .render(&mut buf, &sourcemgr, None)?; emit.err.emit(&buf)?; } else { emit.err.error(None, &format!("{}", err), None)?; } break; } else if let Ok(token) = token { let token_ty = token.value().0; if !ignore_ws || token_ty != TokenType::Ws { tokens.push(token); } if token_ty == TokenType::Eof { break; } } } gcx.grcx .read() .errors() .iter() .try_for_each(|e| -> CalResult<()> { let mut emit = gcx.emit.write(); let mut buf = emit.err.buffer(); e.render(&mut buf, &sourcemgr, None)?; emit.err.emit(&buf)?; Ok(()) })?; let tokens = tokens .iter() .map(|v| printer.print_token(v)) .collect::<Result<Vec<String>, _>>(); match tokens { Ok(tokens) => println!("{}", tokens.join("\n")), Err(err) => { gcx.emit .write() .err .error(None, "while pretty-printing tokens:", None)? .error(None, &format!("{}", err), None)?; } } Ok(()) } pub fn lexer_repl(gcx: &Arc<GlobalCtxt>, ignore_ws: bool) -> CalResult<()> { struct ReplCtx {} let repl_gcx = Arc::clone(gcx); let mut repl = Repl::new( Box::new(move |_ctx, contents| { run_lexer(&repl_gcx, ignore_ws, "<repl>".to_string(), contents) .ok() .map(|_| String::new()) }), ReplCtx {}, ) .prefix("\\".to_string()); repl.run( &format!( "Calypso CLI v{} - internal debugging command: lexer", BUILD_INFO.version ), |_| String::from(">>> "), ) .expect("REPL failure"); Ok(()) } pub fn buildinfo(gcx: &Arc<GlobalCtxt>) -> CalResult<()> { let mut bi = BUILD_INFO; let mut emit = gcx.emit.write(); let out = &mut emit.out; out.info("=:= Version =:=", None)? .newline()? .info("version", Some(bi.version))? .info("git branch", Some(bi.git_branch))? .info("git commit", Some(bi.git_commit))? .newline()? .info("=:= Build Env =:=", None)? .newline()? .info("features:", None)?; if bi.cargo_features.is_empty() { bi.cargo_features = "no cargo features enabled"; } for feature in bi.cargo_features.split(',') { out.set_color( ColorSpec::new() .set_fg(Some(Color::Green)) .set_bold(true) .set_intense(true), )?; out.print(" =>")?; out.reset()?; out.print(&format!(" {}", feature))?.newline()?; } out.info("profile", Some(bi.cargo_profile))? .info("target triple", Some(bi.cargo_target_triple))? .newline()? .info("=:= Rust =:=", None)? .newline()? .info("channel", Some(bi.rustc_channel))? .info("commit date", Some(bi.rustc_commit_date))? .info("commit hash", Some(bi.rustc_commit_hash))? .info("host triple", Some(bi.rustc_host_triple))? .info("llvm version", Some(bi.rustc_llvm_version))? .info("version", Some(bi.rustc_version))? .flush()?; Ok(()) }
29.807692
98
0.479713
e50693ea804b6993a852ea82bbcea5ae054dccb5
11,744
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. /*! typeck.rs, an introduction The type checker is responsible for: 1. Determining the type of each expression 2. Resolving methods and traits 3. Guaranteeing that most type rules are met ("most?", you say, "why most?" Well, dear reader, read on) The main entry point is `check_crate()`. Type checking operates in several major phases: 1. The collect phase first passes over all items and determines their type, without examining their "innards". 2. Variance inference then runs to compute the variance of each parameter 3. Coherence checks for overlapping or orphaned impls 4. Finally, the check phase then checks function bodies and so forth. Within the check phase, we check each function body one at a time (bodies of function expressions are checked as part of the containing function). Inference is used to supply types wherever they are unknown. The actual checking of a function itself has several phases (check, regionck, writeback), as discussed in the documentation for the `check` module. The type checker is defined into various submodules which are documented independently: - astconv: converts the AST representation of types into the `ty` representation - collect: computes the types of each top-level item and enters them into the `cx.tcache` table for later use - coherence: enforces coherence rules, builds some tables - variance: variance inference - check: walks over function bodies and type checks them, inferring types for local variables, type parameters, etc as necessary. - infer: finds the types to use for each type variable such that all subtyping and assignment constraints are met. In essence, the check module specifies the constraints, and the infer module solves them. # Note This API is completely unstable and subject to change. */ // Do not remove on snapshot creation. Needed for bootstrap. (Issue #22364) #![cfg_attr(stage0, feature(custom_attribute))] #![crate_name = "rustc_typeck"] #![unstable(feature = "rustc_private")] #![staged_api] #![crate_type = "dylib"] #![crate_type = "rlib"] #![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", html_favicon_url = "http://www.rust-lang.org/favicon.ico", html_root_url = "http://doc.rust-lang.org/nightly/")] #![allow(non_camel_case_types)] #![feature(box_patterns)] #![feature(box_syntax)] #![feature(collections)] #![feature(core)] #![feature(quote)] #![feature(rustc_diagnostic_macros)] #![feature(rustc_private)] #![feature(staged_api)] #[macro_use] extern crate log; #[macro_use] extern crate syntax; extern crate arena; extern crate fmt_macros; extern crate rustc; pub use rustc::lint; pub use rustc::metadata; pub use rustc::middle; pub use rustc::session; pub use rustc::util; use middle::def; use middle::infer; use middle::subst; use middle::ty::{self, Ty}; use session::config; use util::common::time; use util::ppaux::Repr; use util::ppaux; use syntax::codemap::Span; use syntax::print::pprust::*; use syntax::{ast, ast_map, abi}; use syntax::ast_util::local_def; use std::cell::RefCell; // NB: This module needs to be declared first so diagnostics are // registered before they are used. pub mod diagnostics; mod check; mod rscope; mod astconv; mod collect; mod constrained_type_params; mod coherence; mod variance; pub struct TypeAndSubsts<'tcx> { pub substs: subst::Substs<'tcx>, pub ty: Ty<'tcx>, } pub struct CrateCtxt<'a, 'tcx: 'a> { // A mapping from method call sites to traits that have that method. trait_map: ty::TraitMap, /// A vector of every trait accessible in the whole crate /// (i.e. including those from subcrates). This is used only for /// error reporting, and so is lazily initialised and generally /// shouldn't taint the common path (hence the RefCell). all_traits: RefCell<Option<check::method::AllTraitsVec>>, tcx: &'a ty::ctxt<'tcx>, } // Functions that write types into the node type table fn write_ty_to_tcx<'tcx>(tcx: &ty::ctxt<'tcx>, node_id: ast::NodeId, ty: Ty<'tcx>) { debug!("write_ty_to_tcx({}, {})", node_id, ppaux::ty_to_string(tcx, ty)); assert!(!ty::type_needs_infer(ty)); tcx.node_type_insert(node_id, ty); } fn write_substs_to_tcx<'tcx>(tcx: &ty::ctxt<'tcx>, node_id: ast::NodeId, item_substs: ty::ItemSubsts<'tcx>) { if !item_substs.is_noop() { debug!("write_substs_to_tcx({}, {})", node_id, item_substs.repr(tcx)); assert!(item_substs.substs.types.all(|t| !ty::type_needs_infer(*t))); tcx.item_substs.borrow_mut().insert(node_id, item_substs); } } fn lookup_full_def(tcx: &ty::ctxt, sp: Span, id: ast::NodeId) -> def::Def { match tcx.def_map.borrow().get(&id) { Some(x) => x.full_def(), None => { span_fatal!(tcx.sess, sp, E0242, "internal error looking up a definition") } } } fn require_same_types<'a, 'tcx, M>(tcx: &ty::ctxt<'tcx>, maybe_infcx: Option<&infer::InferCtxt<'a, 'tcx>>, t1_is_expected: bool, span: Span, t1: Ty<'tcx>, t2: Ty<'tcx>, msg: M) -> bool where M: FnOnce() -> String, { let result = match maybe_infcx { None => { let infcx = infer::new_infer_ctxt(tcx); infer::mk_eqty(&infcx, t1_is_expected, infer::Misc(span), t1, t2) } Some(infcx) => { infer::mk_eqty(infcx, t1_is_expected, infer::Misc(span), t1, t2) } }; match result { Ok(_) => true, Err(ref terr) => { span_err!(tcx.sess, span, E0211, "{}: {}", msg(), ty::type_err_to_str(tcx, terr)); ty::note_and_explain_type_err(tcx, terr, span); false } } } fn check_main_fn_ty(ccx: &CrateCtxt, main_id: ast::NodeId, main_span: Span) { let tcx = ccx.tcx; let main_t = ty::node_id_to_type(tcx, main_id); match main_t.sty { ty::ty_bare_fn(..) => { match tcx.map.find(main_id) { Some(ast_map::NodeItem(it)) => { match it.node { ast::ItemFn(_, _, _, ref ps, _) if ps.is_parameterized() => { span_err!(ccx.tcx.sess, main_span, E0131, "main function is not allowed to have type parameters"); return; } _ => () } } _ => () } let se_ty = ty::mk_bare_fn(tcx, Some(local_def(main_id)), tcx.mk_bare_fn(ty::BareFnTy { unsafety: ast::Unsafety::Normal, abi: abi::Rust, sig: ty::Binder(ty::FnSig { inputs: Vec::new(), output: ty::FnConverging(ty::mk_nil(tcx)), variadic: false }) })); require_same_types(tcx, None, false, main_span, main_t, se_ty, || { format!("main function expects type: `{}`", ppaux::ty_to_string(ccx.tcx, se_ty)) }); } _ => { tcx.sess.span_bug(main_span, &format!("main has a non-function type: found \ `{}`", ppaux::ty_to_string(tcx, main_t))); } } } fn check_start_fn_ty(ccx: &CrateCtxt, start_id: ast::NodeId, start_span: Span) { let tcx = ccx.tcx; let start_t = ty::node_id_to_type(tcx, start_id); match start_t.sty { ty::ty_bare_fn(..) => { match tcx.map.find(start_id) { Some(ast_map::NodeItem(it)) => { match it.node { ast::ItemFn(_,_,_,ref ps,_) if ps.is_parameterized() => { span_err!(tcx.sess, start_span, E0132, "start function is not allowed to have type parameters"); return; } _ => () } } _ => () } let se_ty = ty::mk_bare_fn(tcx, Some(local_def(start_id)), tcx.mk_bare_fn(ty::BareFnTy { unsafety: ast::Unsafety::Normal, abi: abi::Rust, sig: ty::Binder(ty::FnSig { inputs: vec!( tcx.types.isize, ty::mk_imm_ptr(tcx, ty::mk_imm_ptr(tcx, tcx.types.u8)) ), output: ty::FnConverging(tcx.types.isize), variadic: false, }), })); require_same_types(tcx, None, false, start_span, start_t, se_ty, || { format!("start function expects type: `{}`", ppaux::ty_to_string(ccx.tcx, se_ty)) }); } _ => { tcx.sess.span_bug(start_span, &format!("start has a non-function type: found \ `{}`", ppaux::ty_to_string(tcx, start_t))); } } } fn check_for_entry_fn(ccx: &CrateCtxt) { let tcx = ccx.tcx; match *tcx.sess.entry_fn.borrow() { Some((id, sp)) => match tcx.sess.entry_type.get() { Some(config::EntryMain) => check_main_fn_ty(ccx, id, sp), Some(config::EntryStart) => check_start_fn_ty(ccx, id, sp), Some(config::EntryNone) => {} None => tcx.sess.bug("entry function without a type") }, None => {} } } pub fn check_crate(tcx: &ty::ctxt, trait_map: ty::TraitMap) { let time_passes = tcx.sess.time_passes(); let ccx = CrateCtxt { trait_map: trait_map, all_traits: RefCell::new(None), tcx: tcx }; time(time_passes, "type collecting", (), |_| collect::collect_item_types(tcx)); // this ensures that later parts of type checking can assume that items // have valid types and not error tcx.sess.abort_if_errors(); time(time_passes, "variance inference", (), |_| variance::infer_variance(tcx)); time(time_passes, "coherence checking", (), |_| coherence::check_coherence(&ccx)); time(time_passes, "type checking", (), |_| check::check_item_types(&ccx)); check_for_entry_fn(&ccx); tcx.sess.abort_if_errors(); } #[cfg(stage0)] __build_diagnostic_array! { DIAGNOSTICS } #[cfg(not(stage0))] __build_diagnostic_array! { librustc_typeck, DIAGNOSTICS }
33.458689
100
0.55705
76d5ba7d0cfea24becf8d65c13a844df93164d52
3,491
use input::token::*; #[derive(Debug)] enum ResultError { OperationNotFound, } struct Expression{ tokens: Vec<Token>, arguments: Vec<f64>, operations: Vec<String>, results: Vec<f64>, } trait Solve { fn get_operation_result(&self) -> Result<f64, ResultError>; fn new() -> Self; } impl Solve for Expression { fn new() -> Self { Expression { tokens: Vec::new(), arguments: Vec::new(), operations: Vec::new(), results: Vec::new(), } } fn get_operation_result(&self) -> Result<f64, ResultError> { match self.operations[0].as_str() { "+" => { Ok(self.arguments[1] + self.arguments[0]) }, "-" => { Ok(self.arguments[1] - self.arguments[0]) }, "*" => { Ok(self.arguments[1] * self.arguments[0]) }, "/" => { Ok(self.arguments[1] / self.arguments[0]) }, _=> { return Err(ResultError::OperationNotFound) } } } } fn convert_to_u64(input: String) -> Result<u64, std::num::ParseIntError> { let result = match input.parse::<u64>() { Ok(input) => input, Err(e) => return Err(e), }; Ok(result) } fn convert_to_f64(input: String) -> Result<f64, std::num::ParseFloatError> { let result = match input.parse::<f64>() { Ok(input) => input, Err(e) => return Err(e), }; Ok(result) } fn sqrt(input: f64) -> f64 { input.sqrt() } fn pow(input: f64, power: u64) -> f64{ let mut iterator: u64 = 0; let mut result: f64 = 0.0; while iterator < power { result = result * input; iterator = iterator + 1; } return result; } fn get_function_result(function_name: String, arguments: Vec<String>) -> String { let mut int_arguments: Vec<u64> = Vec::new(); let mut float_arguments: Vec<f64> = Vec::new(); // create i64 forms of all the arguments for string in &arguments { int_arguments.push(convert_to_u64(string.to_string()).unwrap()) } // create f64 forms of all the arguments for string in &arguments { float_arguments.push(convert_to_f64(string.to_string()).unwrap()) } match function_name.as_str() { "sqrt" => sqrt(convert_to_f64(arguments[0].clone()).unwrap()).to_string(), "pow" => pow(convert_to_f64(arguments[0].clone()).unwrap(), int_arguments[1]).to_string(), &_ => "".to_string(), } } pub fn solve_equation(equation: &str) -> Result<Vec<f64>, TokenError> { let mut expression: Expression = Expression::new(); expression.tokens = create_token_vec(equation).unwrap(); // sort the tokens in either the argument or operation stack for token in &expression.tokens { let token_type: Tokens = token.clone().token; match token_type { Tokens::Number => expression.arguments.push(convert_to_f64(token.clone().value).unwrap()), Tokens::Operation => expression.operations.push(token.clone().name), _ => return Err(TokenError::UnknownToken), } } expression.results.push(Expression::get_operation_result(&expression).unwrap()); // dispatch every operation with their arguments return Ok(expression.results); } #[cfg(test)] mod tests { use super::*; #[teist] fn test_pow() { let result = pow(2.0, 2).unwrap(); assert_eq!(result, 4.0); } #[test] fn test_get_operation() { } }
26.24812
102
0.584646
f71f0fb3464da0cf5621f76176a82546312b597b
22,797
mod tester; mod wrapper; use std::cmp::min; use std::sync::Arc; use futures::stream::BoxStream; use futures::{StreamExt, TryStreamExt}; use geoengine_datatypes::dataset::DatasetId; use geoengine_datatypes::primitives::VectorQueryRectangle; use rayon::ThreadPool; use serde::{Deserialize, Serialize}; use snafu::ensure; use crate::adapters::FeatureCollectionChunkMerger; use crate::engine::{ ExecutionContext, InitializedVectorOperator, Operator, QueryContext, TypedVectorQueryProcessor, VectorOperator, VectorQueryProcessor, VectorResultDescriptor, }; use crate::engine::{OperatorDatasets, QueryProcessor}; use crate::error; use crate::util::Result; use arrow::array::BooleanArray; use async_trait::async_trait; use geoengine_datatypes::collections::{ FeatureCollectionInfos, FeatureCollectionModifications, GeometryCollection, MultiPointCollection, MultiPolygonCollection, VectorDataType, }; pub use tester::PointInPolygonTester; pub use wrapper::PointInPolygonTesterWithCollection; /// The point in polygon filter requires two inputs in the following order: /// 1. a `MultiPointCollection` source /// 2. a `MultiPolygonCollection` source /// Then, it filters the `MultiPolygonCollection`s so that only those features are retained that are in any polygon. pub type PointInPolygonFilter = Operator<PointInPolygonFilterParams, PointInPolygonFilterSource>; #[derive(Debug, Clone, Deserialize, Serialize)] pub struct PointInPolygonFilterParams {} #[derive(Debug, Clone, Deserialize, Serialize)] pub struct PointInPolygonFilterSource { pub points: Box<dyn VectorOperator>, pub polygons: Box<dyn VectorOperator>, } impl OperatorDatasets for PointInPolygonFilterSource { fn datasets_collect(&self, datasets: &mut Vec<DatasetId>) { self.points.datasets_collect(datasets); self.polygons.datasets_collect(datasets); } } #[typetag::serde] #[async_trait] impl VectorOperator for PointInPolygonFilter { async fn initialize( self: Box<Self>, context: &dyn ExecutionContext, ) -> Result<Box<dyn InitializedVectorOperator>> { let points = self.sources.points.initialize(context).await?; let polygons = self.sources.polygons.initialize(context).await?; ensure!( points.result_descriptor().data_type == VectorDataType::MultiPoint, error::InvalidType { expected: VectorDataType::MultiPoint.to_string(), found: points.result_descriptor().data_type.to_string(), } ); ensure!( polygons.result_descriptor().data_type == VectorDataType::MultiPolygon, error::InvalidType { expected: VectorDataType::MultiPolygon.to_string(), found: polygons.result_descriptor().data_type.to_string(), } ); let initialized_operator = InitializedPointInPolygonFilter { result_descriptor: points.result_descriptor().clone(), points, polygons, }; Ok(initialized_operator.boxed()) } } pub struct InitializedPointInPolygonFilter { points: Box<dyn InitializedVectorOperator>, polygons: Box<dyn InitializedVectorOperator>, result_descriptor: VectorResultDescriptor, } impl InitializedVectorOperator for InitializedPointInPolygonFilter { fn query_processor(&self) -> Result<TypedVectorQueryProcessor> { let point_processor = self .points .query_processor()? .multi_point() .expect("checked in `PointInPolygonFilter` constructor"); let polygon_processor = self .polygons .query_processor()? .multi_polygon() .expect("checked in `PointInPolygonFilter` constructor"); Ok(TypedVectorQueryProcessor::MultiPoint( PointInPolygonFilterProcessor::new(point_processor, polygon_processor).boxed(), )) } fn result_descriptor(&self) -> &VectorResultDescriptor { &self.result_descriptor } } pub struct PointInPolygonFilterProcessor { points: Box<dyn VectorQueryProcessor<VectorType = MultiPointCollection>>, polygons: Box<dyn VectorQueryProcessor<VectorType = MultiPolygonCollection>>, } impl PointInPolygonFilterProcessor { pub fn new( points: Box<dyn VectorQueryProcessor<VectorType = MultiPointCollection>>, polygons: Box<dyn VectorQueryProcessor<VectorType = MultiPolygonCollection>>, ) -> Self { Self { points, polygons } } fn filter_parallel( points: &Arc<MultiPointCollection>, polygons: &MultiPolygonCollection, thread_pool: &ThreadPool, ) -> Vec<bool> { debug_assert!(!points.is_empty()); // TODO: parallelize over coordinate rather than features let tester = Arc::new(PointInPolygonTester::new(polygons)); // TODO: multithread let parallelism = thread_pool.current_num_threads(); let chunk_size = (points.len() as f64 / parallelism as f64).ceil() as usize; let mut result = vec![false; points.len()]; thread_pool.scope(|scope| { let num_features = points.len(); let feature_offsets = points.feature_offsets(); let time_intervals = points.time_intervals(); let coordinates = points.coordinates(); for (chunk_index, chunk_result) in (&mut result).chunks_mut(chunk_size).enumerate() { let feature_index_start = chunk_index * chunk_size; let features_index_end = min(feature_index_start + chunk_size, num_features); let tester = tester.clone(); scope.spawn(move |_| { for ( feature_index, ((coordinates_start_index, coordinates_end_index), time_interval), ) in two_tuple_windows( feature_offsets[feature_index_start..=features_index_end] .iter() .map(|&c| c as usize), ) .zip(time_intervals[feature_index_start..features_index_end].iter()) .enumerate() { let is_multi_point_in_polygon_collection = coordinates [coordinates_start_index..coordinates_end_index] .iter() .any(|coordinate| { tester.any_polygon_contains_coordinate(coordinate, time_interval) }); chunk_result[feature_index] = is_multi_point_in_polygon_collection; } }); } }); result } async fn filter_points( ctx: &dyn QueryContext, points: Arc<MultiPointCollection>, polygons: MultiPolygonCollection, initial_filter: &BooleanArray, ) -> Result<BooleanArray> { let thread_pool = ctx.thread_pool().clone(); let thread_points = points.clone(); let filter = tokio::task::spawn_blocking(move || { Self::filter_parallel(&thread_points, &polygons, &thread_pool) }) .await?; arrow::compute::or(initial_filter, &filter.into()).map_err(Into::into) } } #[async_trait] impl VectorQueryProcessor for PointInPolygonFilterProcessor { type VectorType = MultiPointCollection; async fn vector_query<'a>( &'a self, query: VectorQueryRectangle, ctx: &'a dyn QueryContext, ) -> Result<BoxStream<'a, Result<Self::VectorType>>> { let filtered_stream = self.points .query(query, ctx) .await? .and_then(move |points| async move { if points.is_empty() { return Ok(points); } let initial_filter = BooleanArray::from(vec![false; points.len()]); let arc_points = Arc::new(points); let filter = self .polygons .query(query, ctx) .await? .fold(Ok(initial_filter), |filter, polygons| async { let polygons = polygons?; if polygons.is_empty() { return filter; } Self::filter_points(ctx, arc_points.clone(), polygons, &filter?).await }) .await?; arc_points.filter(filter).map_err(Into::into) }); Ok( FeatureCollectionChunkMerger::new(filtered_stream.fuse(), ctx.chunk_byte_size().into()) .boxed(), ) } } /// Loop through an iterator by yielding the current and previous tuple. Starts with the /// (first, second) item, so the iterator must have more than one item to create an output. fn two_tuple_windows<I, T>(mut iter: I) -> impl Iterator<Item = (T, T)> where I: Iterator<Item = T>, T: Copy, { let mut last = iter.next(); iter.map(move |item| { let output = (last.unwrap(), item); last = Some(item); output }) } #[cfg(test)] mod tests { use super::*; use geoengine_datatypes::primitives::{ BoundingBox2D, Coordinate2D, MultiPoint, MultiPolygon, SpatialResolution, TimeInterval, }; use geoengine_datatypes::util::test::TestDefault; use crate::engine::{ChunkByteSize, MockExecutionContext, MockQueryContext}; use crate::mock::MockFeatureCollectionSource; #[test] fn point_in_polygon_boundary_conditions() { let collection = MultiPolygonCollection::from_data( vec![MultiPolygon::new(vec![vec![vec![ (0.0, 0.0).into(), (10.0, 0.0).into(), (10.0, 10.0).into(), (0.0, 10.0).into(), (0.0, 0.0).into(), ]]]) .unwrap()], vec![Default::default(); 1], Default::default(), ) .unwrap(); let tester = PointInPolygonTester::new(&collection); // the algorithm is not stable for boundary cases directly on the edges assert!(tester.any_polygon_contains_coordinate( &Coordinate2D::new(0.000_001, 0.000_001), &Default::default() ),); assert!(tester.any_polygon_contains_coordinate( &Coordinate2D::new(0.000_001, 0.1), &Default::default() ),); assert!(tester.any_polygon_contains_coordinate( &Coordinate2D::new(0.1, 0.000_001), &Default::default() ),); assert!(tester .any_polygon_contains_coordinate(&Coordinate2D::new(9.9, 9.9), &Default::default()),); assert!(tester .any_polygon_contains_coordinate(&Coordinate2D::new(10.0, 9.9), &Default::default()),); assert!(tester .any_polygon_contains_coordinate(&Coordinate2D::new(9.9, 10.0), &Default::default()),); assert!(!tester .any_polygon_contains_coordinate(&Coordinate2D::new(-0.1, -0.1), &Default::default()),); assert!(!tester .any_polygon_contains_coordinate(&Coordinate2D::new(0.0, -0.1), &Default::default()),); assert!(!tester .any_polygon_contains_coordinate(&Coordinate2D::new(-0.1, 0.0), &Default::default()),); assert!(!tester .any_polygon_contains_coordinate(&Coordinate2D::new(10.1, 10.1), &Default::default()),); assert!(!tester .any_polygon_contains_coordinate(&Coordinate2D::new(10.1, 9.9), &Default::default()),); assert!(!tester .any_polygon_contains_coordinate(&Coordinate2D::new(9.9, 10.1), &Default::default()),); } #[tokio::test] async fn all() -> Result<()> { let points = MultiPointCollection::from_data( MultiPoint::many(vec![(0.001, 0.1), (1.0, 1.1), (2.0, 3.1)]).unwrap(), vec![TimeInterval::new_unchecked(0, 1); 3], Default::default(), )?; let point_source = MockFeatureCollectionSource::single(points.clone()).boxed(); let polygon_source = MockFeatureCollectionSource::single(MultiPolygonCollection::from_data( vec![MultiPolygon::new(vec![vec![vec![ (0.0, 0.0).into(), (10.0, 0.0).into(), (10.0, 10.0).into(), (0.0, 10.0).into(), (0.0, 0.0).into(), ]]])?], vec![TimeInterval::new_unchecked(0, 1); 1], Default::default(), )?) .boxed(); let operator = PointInPolygonFilter { params: PointInPolygonFilterParams {}, sources: PointInPolygonFilterSource { points: point_source, polygons: polygon_source, }, } .boxed() .initialize(&MockExecutionContext::test_default()) .await?; let query_processor = operator.query_processor()?.multi_point().unwrap(); let query_rectangle = VectorQueryRectangle { spatial_bounds: BoundingBox2D::new((0., 0.).into(), (10., 10.).into()).unwrap(), time_interval: TimeInterval::default(), spatial_resolution: SpatialResolution::zero_point_one(), }; let ctx = MockQueryContext::new(ChunkByteSize::MAX); let query = query_processor.query(query_rectangle, &ctx).await.unwrap(); let result = query .map(Result::unwrap) .collect::<Vec<MultiPointCollection>>() .await; assert_eq!(result.len(), 1); assert_eq!(result[0], points); Ok(()) } #[tokio::test] async fn none() -> Result<()> { let points = MultiPointCollection::from_data( MultiPoint::many(vec![(0.0, 0.1), (1.0, 1.1), (2.0, 3.1)]).unwrap(), vec![TimeInterval::new_unchecked(0, 1); 3], Default::default(), )?; let point_source = MockFeatureCollectionSource::single(points.clone()).boxed(); let polygon_source = MockFeatureCollectionSource::single( MultiPolygonCollection::from_data(vec![], vec![], Default::default())?, ) .boxed(); let operator = PointInPolygonFilter { params: PointInPolygonFilterParams {}, sources: PointInPolygonFilterSource { points: point_source, polygons: polygon_source, }, } .boxed() .initialize(&MockExecutionContext::test_default()) .await?; let query_processor = operator.query_processor()?.multi_point().unwrap(); let query_rectangle = VectorQueryRectangle { spatial_bounds: BoundingBox2D::new((0., 0.).into(), (10., 10.).into()).unwrap(), time_interval: TimeInterval::default(), spatial_resolution: SpatialResolution::zero_point_one(), }; let ctx = MockQueryContext::new(ChunkByteSize::MAX); let query = query_processor.query(query_rectangle, &ctx).await.unwrap(); let result = query .map(Result::unwrap) .collect::<Vec<MultiPointCollection>>() .await; assert_eq!(result.len(), 0); Ok(()) } #[tokio::test] async fn time() -> Result<()> { let points = MultiPointCollection::from_data( MultiPoint::many(vec![(1.0, 1.1), (2.0, 2.1), (3.0, 3.1)]).unwrap(), vec![ TimeInterval::new(0, 1)?, TimeInterval::new(5, 6)?, TimeInterval::new(0, 5)?, ], Default::default(), )?; let point_source = MockFeatureCollectionSource::single(points.clone()).boxed(); let polygon = MultiPolygon::new(vec![vec![vec![ (0.0, 0.0).into(), (10.0, 0.0).into(), (10.0, 10.0).into(), (0.0, 10.0).into(), (0.0, 0.0).into(), ]]])?; let polygon_source = MockFeatureCollectionSource::single(MultiPolygonCollection::from_data( vec![polygon.clone(), polygon], vec![TimeInterval::new(0, 1)?, TimeInterval::new(1, 2)?], Default::default(), )?) .boxed(); let operator = PointInPolygonFilter { params: PointInPolygonFilterParams {}, sources: PointInPolygonFilterSource { points: point_source, polygons: polygon_source, }, } .boxed() .initialize(&MockExecutionContext::test_default()) .await?; let query_processor = operator.query_processor()?.multi_point().unwrap(); let query_rectangle = VectorQueryRectangle { spatial_bounds: BoundingBox2D::new((0., 0.).into(), (10., 10.).into()).unwrap(), time_interval: TimeInterval::default(), spatial_resolution: SpatialResolution::zero_point_one(), }; let ctx = MockQueryContext::new(ChunkByteSize::MAX); let query = query_processor.query(query_rectangle, &ctx).await.unwrap(); let result = query .map(Result::unwrap) .collect::<Vec<MultiPointCollection>>() .await; assert_eq!(result.len(), 1); assert_eq!(result[0], points.filter(vec![true, false, true])?); Ok(()) } #[tokio::test] async fn multiple_inputs() -> Result<()> { let points1 = MultiPointCollection::from_data( MultiPoint::many(vec![(5.0, 5.1), (15.0, 15.1)]).unwrap(), vec![TimeInterval::new(0, 1)?; 2], Default::default(), )?; let points2 = MultiPointCollection::from_data( MultiPoint::many(vec![(6.0, 6.1), (16.0, 16.1)]).unwrap(), vec![TimeInterval::new(1, 2)?; 2], Default::default(), )?; let point_source = MockFeatureCollectionSource::multiple(vec![points1.clone(), points2.clone()]).boxed(); let polygon1 = MultiPolygon::new(vec![vec![vec![ (0.0, 0.0).into(), (10.0, 0.0).into(), (10.0, 10.0).into(), (0.0, 10.0).into(), (0.0, 0.0).into(), ]]])?; let polygon2 = MultiPolygon::new(vec![vec![vec![ (10.0, 10.0).into(), (20.0, 10.0).into(), (20.0, 20.0).into(), (10.0, 20.0).into(), (10.0, 10.0).into(), ]]])?; let polygon_source = MockFeatureCollectionSource::multiple(vec![ MultiPolygonCollection::from_data( vec![polygon1.clone()], vec![TimeInterval::new(0, 1)?], Default::default(), )?, MultiPolygonCollection::from_data( vec![polygon1, polygon2], vec![TimeInterval::new(1, 2)?, TimeInterval::new(1, 2)?], Default::default(), )?, ]) .boxed(); let operator = PointInPolygonFilter { params: PointInPolygonFilterParams {}, sources: PointInPolygonFilterSource { points: point_source, polygons: polygon_source, }, } .boxed() .initialize(&MockExecutionContext::test_default()) .await?; let query_processor = operator.query_processor()?.multi_point().unwrap(); let query_rectangle = VectorQueryRectangle { spatial_bounds: BoundingBox2D::new((0., 0.).into(), (10., 10.).into()).unwrap(), time_interval: TimeInterval::default(), spatial_resolution: SpatialResolution::zero_point_one(), }; let ctx_one_chunk = MockQueryContext::new(ChunkByteSize::MAX); let ctx_minimal_chunks = MockQueryContext::new(ChunkByteSize::MIN); let query = query_processor .query(query_rectangle, &ctx_minimal_chunks) .await .unwrap(); let result = query .map(Result::unwrap) .collect::<Vec<MultiPointCollection>>() .await; assert_eq!(result.len(), 2); assert_eq!(result[0], points1.filter(vec![true, false])?); assert_eq!(result[1], points2); let query = query_processor .query(query_rectangle, &ctx_one_chunk) .await .unwrap(); let result = query .map(Result::unwrap) .collect::<Vec<MultiPointCollection>>() .await; assert_eq!(result.len(), 1); assert_eq!( result[0], points1.filter(vec![true, false])?.append(&points2)? ); Ok(()) } #[tokio::test] async fn empty_points() { let point_collection = MultiPointCollection::from_data(vec![], vec![], Default::default()).unwrap(); let polygon_collection = MultiPolygonCollection::from_data( vec![MultiPolygon::new(vec![vec![vec![ (0.0, 0.0).into(), (10.0, 0.0).into(), (10.0, 10.0).into(), (0.0, 10.0).into(), (0.0, 0.0).into(), ]]]) .unwrap()], vec![TimeInterval::default()], Default::default(), ) .unwrap(); let operator = PointInPolygonFilter { params: PointInPolygonFilterParams {}, sources: PointInPolygonFilterSource { points: MockFeatureCollectionSource::single(point_collection).boxed(), polygons: MockFeatureCollectionSource::single(polygon_collection).boxed(), }, } .boxed() .initialize(&MockExecutionContext::test_default()) .await .unwrap(); let query_rectangle = VectorQueryRectangle { spatial_bounds: BoundingBox2D::new((-10., -10.).into(), (10., 10.).into()).unwrap(), time_interval: TimeInterval::default(), spatial_resolution: SpatialResolution::zero_point_one(), }; let query_processor = operator.query_processor().unwrap().multi_point().unwrap(); let query_context = MockQueryContext::test_default(); let query = query_processor .query(query_rectangle, &query_context) .await .unwrap(); let result = query .map(Result::unwrap) .collect::<Vec<MultiPointCollection>>() .await; assert_eq!(result.len(), 0); } }
34.540909
116
0.568189
9c88b37331475b6f15cc67c3d5b1700d247239eb
5,443
use std::collections::HashMap; use std::collections::hash_map::Values; use quadtree::{Span, Dir}; use std::fmt; #[derive(PartialEq, Eq, Clone)] pub struct Position { x: i32, y: i32 } impl Position { pub fn new(x: i32, y: i32) -> Position { return Position { x: x, y: y }; } } impl fmt::Debug for Position { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "({}, {})", self.x, self.y) } } #[derive(Debug, PartialEq, Eq, Clone)] pub struct PosSpan { nw: Position, width: i32, height: i32 } impl PosSpan { /// Creates a new span starting at `(x, y)` inclusive /// ending at `(x + w, y + h)` exclusive. pub fn new(x: i32, y: i32, w: i32, h: i32) -> PosSpan { return PosSpan { nw: Position { x: x, y: y }, width: w, height: h }; } } impl Span<PosSpan, Position> for PosSpan { fn dir_of(&self, t: &Position) -> Option<Dir> { if t.x < self.nw.x { if t.y < self.nw.y { return Some(Dir::NW); } else if t.y < self.nw.y + self.height { return Some(Dir::W); } else { return Some(Dir::SW); } } else if t.x < self.nw.x + self.width { if t.y < self.nw.y { return Some(Dir::N); } else if t.y < self.nw.y + self.height { return None; } else { return Some(Dir::S); } } else { if t.y < self.nw.y { return Some(Dir::NE); } else if t.y < self.nw.y + self.height { return Some(Dir::E); } else { return Some(Dir::SE); } } } fn north_span(&self) -> PosSpan { return PosSpan::new(self.nw.x, self.nw.y - self.height, self.width, self.height); } fn south_span(&self) -> PosSpan { return PosSpan::new(self.nw.x, self.nw.y + self.height, self.width, self.height); } fn west_span(&self) -> PosSpan { return PosSpan::new(self.nw.x - self.width, self.nw.y, self.width, self.height); } fn east_span(&self) -> PosSpan { return PosSpan::new(self.nw.x + self.width, self.nw.y, self.width, self.height); } fn can_split(&self) -> bool { return self.width > 1 && self.height > 1; } fn split(&self) -> HashMap<Dir, PosSpan> { let left_x = self.nw.x; let left_y = self.nw.y; let width_mid = self.width / 2; let height_mid = self.height / 2; let mut result = HashMap::new(); result.insert(Dir::NW, PosSpan::new(left_x, left_y, width_mid, height_mid)); result.insert(Dir::NE, PosSpan::new(left_x + width_mid, left_y, self.width - width_mid, height_mid)); result.insert(Dir::SW, PosSpan::new(left_x, left_y + height_mid, width_mid, self.height - height_mid)); result.insert(Dir::SE, PosSpan::new(left_x + width_mid, left_y + height_mid, self.width - width_mid, self.height - height_mid)); return result; } fn expand(&self, dir: &Dir) -> PosSpan { return match dir { &Dir::N => PosSpan::new(self.nw.x, self.nw.y - self.height, self.width * 2, self.height * 2), &Dir::S => PosSpan::new(self.nw.x, self.nw.y, self.width * 2, self.height * 2), &Dir::E => PosSpan::new(self.nw.x, self.nw.y, self.width * 2, self.height * 2), &Dir::W => PosSpan::new(self.nw.x - self.width, self.nw.y, self.width * 2, self.height * 2), &Dir::NE => PosSpan::new(self.nw.x, self.nw.y - self.height, self.width * 2, self.height * 2), &Dir::NW => PosSpan::new(self.nw.x - self.width, self.nw.y - self.height, self.width * 2, self.height * 2), &Dir::SE => PosSpan::new(self.nw.x, self.nw.y, self.width * 2, self.height * 2), &Dir::SW => PosSpan::new(self.nw.x - self.width, self.nw.y, self.width * 2, self.height * 2) }; } fn merge(spans: Values<Dir, PosSpan>) -> PosSpan { unreachable!("Doesn't work"); let mut min_x = None; let mut min_y = None; let mut max_x = None; let mut max_y = None; for span in spans { if min_x == None || min_x.unwrap() > span.nw.x { min_x = Some(span.nw.x); } if min_y == None || min_y.unwrap() > span.nw.y { min_y = Some(span.nw.y); } if max_x == None || max_x.unwrap() < span.nw.x + span.width { max_x = Some(span.nw.x + span.width); } if max_y == None || max_y.unwrap() < span.nw.y + span.height { max_y = Some(span.nw.y + span.height); } } return PosSpan::new( min_x.unwrap(), min_y.unwrap(), max_x.unwrap() - min_x.unwrap(), max_y.unwrap() - min_y.unwrap()); } fn overlaps(&self, other: &PosSpan) -> bool { if self.nw.x + self.width <= other.nw.x { return false; } if other.nw.x + other.width <= self.nw.x { return false; } if self.nw.y + self.height <= other.nw.y { return false; } if other.nw.y + other.height <= self.nw.y { return false; } return true; } }
32.987879
136
0.504317
0a92a544faf82d20f49607ea02bcd1d8bba8e363
6,718
//! Program state processor use crate::{ error::GovernanceError, state::{ delegation::scope_delegation_record_account::{ get_scope_delegation_record_data, ScopeDelegationRecordAccount, }, token_owner_budget_record::get_token_owner_budget_record_data_for_token_record, vote_power_origin_record::get_vote_power_origin_record_data_for_owner, vote_power_owner_record::{ get_vote_power_owner_record_data, get_vote_power_owner_record_data_for_delegation_activity, }, }, }; use borsh::BorshSerialize; use solana_program::{ account_info::{next_account_info, AccountInfo}, entrypoint::ProgramResult, msg, pubkey::Pubkey, }; /// Processes DepositGoverningTokens instruction pub fn process_undelegate( program_id: &Pubkey, accounts: &[AccountInfo], amount: u64, ) -> ProgramResult { let accounts_iter = &mut accounts.iter(); let delegation_record_info = next_account_info(accounts_iter)?; let token_origin_record_info = next_account_info(accounts_iter)?; let token_owner_budget_record_info = next_account_info(accounts_iter)?; let governing_owner_info = next_account_info(accounts_iter)?; let delegatee_vote_power_owner_record_info = next_account_info(accounts_iter)?; let delegatee_governing_owner_info = next_account_info(accounts_iter)?; let beneficiary_info = next_account_info(accounts_iter)?; msg!("X {}", token_origin_record_info.data_is_empty()); let token_origin_record = get_vote_power_origin_record_data_for_owner( program_id, token_origin_record_info, governing_owner_info, )?; msg!("XX {}", delegation_record_info.data_is_empty()); let delegation_record = get_scope_delegation_record_data( program_id, delegation_record_info, &token_origin_record, token_origin_record_info, governing_owner_info, delegatee_vote_power_owner_record_info, )?; msg!("XXX"); let mut token_owner_budget_record = get_token_owner_budget_record_data_for_token_record( program_id, token_owner_budget_record_info, &token_origin_record, token_origin_record_info, governing_owner_info, )?; let delegatee_token_owner_record = get_vote_power_owner_record_data(program_id, delegatee_vote_power_owner_record_info)?; // we can only undelegate if delegation is not used actively in any voting, if delegation_record.vote_head.is_some() || delegation_record.last_vote_head != delegatee_token_owner_record.latest_vote { return Err(GovernanceError::InvalidDelegatioStateForUndelegation.into()); } /* if delegatee_token_owner_record.governing_token_mint != token_owner_record.governing_token_mint { return Err(GovernanceError::InvalidGoverningTokenMint.into()); } */ /* let scope = delegatee_token_owner_record .delegated_by_scope .as_ref() .unwrap(); */ // Update budget token_owner_budget_record.amount = token_owner_budget_record .amount .checked_add(amount) .unwrap(); token_owner_budget_record.serialize(&mut *token_owner_budget_record_info.data.borrow_mut())?; // Modify the delegatee token owner record let mut delegatee_token_owner_record_data = get_vote_power_owner_record_data_for_delegation_activity( program_id, delegatee_vote_power_owner_record_info, delegatee_governing_owner_info.key, &token_origin_record.source, &delegatee_token_owner_record.delegated_by_scope, )?; delegatee_token_owner_record_data.amount = delegatee_token_owner_record_data .amount .checked_sub(amount) .unwrap(); /* match &mut delegatee_token_owner_record_data.source { VotePowerSource::Token { governing_token_deposit_amount, .. } => { *governing_token_deposit_amount = governing_token_deposit_amount.checked_sub(amount).unwrap(); } VotePowerSource::Tag { amount: tag_amount, .. } => { *tag_amount = tag_amount.checked_sub(amount).unwrap(); } }; */ delegatee_token_owner_record_data .serialize(&mut *delegatee_vote_power_owner_record_info.data.borrow_mut())?; // Update delegation, might also dispose ScopeDelegationRecordAccount::undelegate( program_id, amount, delegation_record_info, &token_origin_record, token_origin_record_info, governing_owner_info, &delegatee_token_owner_record, delegatee_vote_power_owner_record_info, beneficiary_info, )?; /* VotePowerOwnerRecord::subtract_amount( program_id, delegatee_vote_power_owner_record_info, delegatee_governing_owner_info.key, &token_owner_record.governing_token_mint, Some(scope), amount, )?; */ // for all outstanding active votes, done by the delegatee_token_owner_record, // update cast vote info /* if delegatee_vote_power_owner_record_info.data_is_empty() { let token_owner_record_data = VotePowerOwnerRecord { account_type: AccountType::VotePowerOwnerRecord, governing_owner: delegatee.clone(), governing_token_deposit_amount: amount, governing_token_mint: *governing_token_mint_info.key, unrelinquished_votes_count: 0, total_votes_count: 0, delegated_governing_token_deposit_amount: 0, outstanding_proposal_count: 0, delegated_by_scope: Some(scope), }; create_and_serialize_account_signed( payer_info, delegatee_vote_power_owner_record_info, &token_owner_record_data, &delegatee_vote_power_owner_record_address_seeds, program_id, system_info, &rent, )?; } else { let mut token_owner_record_data = get_vote_power_owner_record_data_for_seeds( program_id, delegatee_vote_power_owner_record_info, &delegatee_vote_power_owner_record_address_seeds, )?; token_owner_record_data.governing_token_deposit_amount = token_owner_record_data .governing_token_deposit_amount .checked_add(amount) .unwrap(); token_owner_record_data.serialize(&mut *vote_power_owner_record_info.data.borrow_mut())?; } */ Ok(()) }
34.80829
103
0.682048
91bad8f31bead69567d63d69d1f74b6fa8fbae63
12,293
//! See [Mesh](crate::mesh::Mesh). use crate::mesh::Mesh; use crate::mesh::ids::*; /// /// # Export /// /// Methods for extracting raw mesh data which for example can be used for visualisation. /// /// # Examples /// /// ## Index based arrays /// /// ``` /// # let mesh = tri_mesh::MeshBuilder::new().cube().build().unwrap(); /// // Get face indices, vertex positions and vertex normals as float arrays.. /// let indices = mesh.indices_buffer(); /// let positions = mesh.positions_buffer(); /// let normals = mesh.normals_buffer(); /// # assert_eq!(positions.len(), 24); /// # assert_eq!(normals.len(), 24); /// /// // The vertex attributes are extracted by.. /// for vertex in 0..positions.len()/3 /// { /// println!("The position and normal of vertex with index {} is:", vertex); /// println!("({}, {}, {}) and ({}, {}, {})", /// positions[3*vertex], positions[3*vertex+1], positions[3*vertex+2], /// normals[3*vertex], normals[3*vertex+1], normals[3*vertex+2]); /// } /// /// // .. and the face attributes are extracted by /// for face in 0..indices.len()/3 /// { /// let vertices = (indices[3*face] as usize, indices[3*face + 1] as usize, indices[3*face + 2] as usize); /// println!("The vertex positions of face with index {} is:", face); /// println!("({}, {}, {}), ({}, {}, {}) and ({}, {}, {})", /// positions[3*vertices.0], positions[3*vertices.0+1], positions[3*vertices.0+2], /// positions[3*vertices.1], positions[3*vertices.1+1], positions[3*vertices.1+2], /// positions[3*vertices.2], positions[3*vertices.2+1], positions[3*vertices.2+2]); /// println!("The normals of face with index {} is:", face); /// println!("({}, {}, {}), ({}, {}, {}) and ({}, {}, {})", /// normals[3*vertices.0], normals[3*vertices.0+1], normals[3*vertices.0+2], /// normals[3*vertices.1], normals[3*vertices.1+1], normals[3*vertices.1+2], /// normals[3*vertices.2], normals[3*vertices.2+1], normals[3*vertices.2+2]); /// } /// ``` /// /// ## Non-index based arrays /// /// ``` /// # let mesh = tri_mesh::MeshBuilder::new().cube().build().unwrap(); /// // Get vertex positions and vertex normals for each corner of each face as float arrays.. /// let positions = mesh.non_indexed_positions_buffer(); /// let normals = mesh.non_indexed_normals_buffer(); /// # assert_eq!(positions.len(), mesh.no_faces() * 3 * 3); /// # assert_eq!(normals.len(), mesh.no_faces() * 3 * 3); /// /// // .. the face attributes are extracted by /// for face in 0..positions.len()/9 /// { /// let vertices = (9*face, 9*face+3, 9*face+6); /// println!("The vertex positions of face with index {} is:", face); /// println!("({}, {}, {}), ({}, {}, {}) and ({}, {}, {})", /// positions[vertices.0], positions[vertices.0+1], positions[vertices.0+2], /// positions[vertices.1], positions[vertices.1+1], positions[vertices.1+2], /// positions[vertices.2], positions[vertices.2+1], positions[vertices.2+2]); /// println!("The vertex normals of face with index {} is:", face); /// println!("({}, {}, {}), ({}, {}, {}) and ({}, {}, {})", /// normals[vertices.0], normals[vertices.0+1], normals[vertices.0+2], /// normals[vertices.1], normals[vertices.1+1], normals[vertices.1+2], /// normals[vertices.2], normals[vertices.2+1], normals[vertices.2+2]); /// } /// ``` /// /// impl Mesh { /// /// Returns the face indices in an array `(i0, i1, i2) = (indices[3*x], indices[3*x+1], indices[3*x+2])` which is meant to be used for visualisation. /// Use the `positions_buffer` method and `normals_buffer` method to get the positions and normals of the vertices. /// See [this](#index-based-arrays) example. /// pub fn indices_buffer(&self) -> Vec<u32> { let vertices: Vec<VertexID> = self.vertex_iter().collect(); let mut indices = Vec::with_capacity(self.no_faces() * 3); for face_id in self.face_iter() { for halfedge_id in self.face_halfedge_iter(face_id) { let vertex_id = self.walker_from_halfedge(halfedge_id).vertex_id().unwrap(); let index = vertices.iter().position(|v| v == &vertex_id).unwrap(); indices.push(index as u32); } } indices } /// /// Returns the positions of the vertices in an array which is meant to be used for visualisation. /// See [this](#index-based-arrays) example. /// /// **Note:** The connectivity of the vertices are attained by the `indices_buffer` method. /// pub fn positions_buffer(&self) -> Vec<f64> { let mut positions = Vec::with_capacity(self.no_vertices() * 3); for position in self.vertex_iter().map(|vertex_id| self.vertex_position(vertex_id)) { push_vec3(&mut positions, position); } positions } /// /// Returns the normals of the vertices in an array which is meant to be used for visualisation. /// See [this](#index-based-arrays) example. /// /// **Note:** The connectivity of the vertices are attained by the `indices_buffer` method. /// /// **Note:** The normal of a vertex is computed as the average of the normals of the adjacent faces. /// /// **Note:** The normals are computed from the connectivity and positions each time this method is invoked. /// pub fn normals_buffer(&self) -> Vec<f64> { let mut normals = Vec::with_capacity(self.no_vertices() * 3); for vertex_id in self.vertex_iter() { push_vec3(&mut normals, self.vertex_normal(vertex_id)); } normals } /// /// Returns the positions of the face corners in an array which is meant to be used for visualisation. /// See [this](#non-index-based-arrays) example. /// pub fn non_indexed_positions_buffer(&self) -> Vec<f64> { let mut positions = Vec::with_capacity(self.no_faces() * 3 * 3); for face_id in self.face_iter() { let (p0, p1, p2) = self.face_positions(face_id); push_vec3(&mut positions, p0); push_vec3(&mut positions, p1); push_vec3(&mut positions, p2); } positions } /// /// Returns the normals of the face corners in an array which is meant to be used for visualisation. /// See [this](#non-index-based-arrays) example. /// /// **Note:** The normal of a vertex is computed as the average of the normals of the adjacent faces. /// /// **Note:** The normals are computed from the connectivity and positions each time this method is invoked. /// pub fn non_indexed_normals_buffer(&self) -> Vec<f64> { let mut normals = Vec::with_capacity(self.no_faces() * 3 * 3); for face_id in self.face_iter() { let (v0, v1, v2) = self.face_vertices(face_id); push_vec3(&mut normals, self.vertex_normal(v0)); push_vec3(&mut normals, self.vertex_normal(v1)); push_vec3(&mut normals, self.vertex_normal(v2)); } normals } /// /// Parses the mesh into a text string that follows the .obj file format and which can then be saved into a file. /// /// # Examples /// /// ```no_run /// # fn main() -> std::io::Result<()> { /// # let mesh = tri_mesh::MeshBuilder::new().cube().build().unwrap(); /// // Write the mesh data to a string /// let obj_source = mesh.parse_as_obj(); /// /// // Write the string to an .obj file /// std::fs::write("foo.obj", obj_source)?; /// # Ok(()) /// # } /// ``` pub fn parse_as_obj(&self) -> String { let mut output = String::from("o object\n"); let positions = self.positions_buffer(); for i in 0..self.no_vertices() { output = format!("{}v {} {} {}\n", output, positions[i*3], positions[i*3 + 1], positions[i*3 + 2]); } let normals = self.normals_buffer(); for i in 0..self.no_vertices() { output = format!("{}vn {} {} {}\n", output, normals[i*3], normals[i*3 + 1], normals[i*3 + 2]); } let indices = self.indices_buffer(); for i in 0..self.no_faces() { let mut face = String::new(); for j in 0..3 { let index = indices[i*3 + j] + 1; face = format!("{} {}//{}", face, index, index); } output = format!("{}f{}\n", output, face); } output } } fn push_vec3(vec: &mut Vec<f64>, vec3: crate::mesh::math::Vec3) { for i in 0..3 { vec.push(vec3[i]); } } #[cfg(test)] mod tests { use crate::MeshBuilder; use crate::mesh::math::*; #[test] fn test_indexed_export() { let mesh = MeshBuilder::new().cylinder(3, 16).build().unwrap(); let indices = mesh.indices_buffer(); let positions = mesh.positions_buffer(); let normals = mesh.normals_buffer(); assert_eq!(indices.len(), mesh.no_faces() * 3); assert_eq!(positions.len(), mesh.no_vertices() * 3); assert_eq!(normals.len(), mesh.no_vertices() * 3); for face in 0..positions.len()/9 { let vertices = (indices[3*face] as usize, indices[3*face + 1] as usize, indices[3*face + 2] as usize); let p0 = vec3(positions[3*vertices.0], positions[3*vertices.0+1], positions[3*vertices.0+2]); let p1 = vec3(positions[3*vertices.1], positions[3*vertices.1+1], positions[3*vertices.1+2]); let p2 = vec3(positions[3*vertices.2], positions[3*vertices.2+1], positions[3*vertices.2+2]); let center = (p0 + p1 + p2) / 3.0; let face_id = mesh.face_iter().find(|face_id| (mesh.face_center(*face_id) - center).magnitude() < 0.00001); assert!(face_id.is_some()); let n0 = vec3(normals[3*vertices.0], normals[3*vertices.0+1], normals[3*vertices.0+2]); let n1 = vec3(normals[3*vertices.1], normals[3*vertices.1+1], normals[3*vertices.1+2]); let n2 = vec3(normals[3*vertices.2], normals[3*vertices.2+1], normals[3*vertices.2+2]); let (v0, v1, v2) = mesh.face_vertices(face_id.unwrap()); assert!(n0 == mesh.vertex_normal(v0) || n1 == mesh.vertex_normal(v0) || n2 == mesh.vertex_normal(v0)); assert!(n0 == mesh.vertex_normal(v1) || n1 == mesh.vertex_normal(v1) || n2 == mesh.vertex_normal(v1)); assert!(n0 == mesh.vertex_normal(v2) || n1 == mesh.vertex_normal(v2) || n2 == mesh.vertex_normal(v2)); } } #[test] fn test_non_indexed_export() { let mesh = MeshBuilder::new().cylinder(3, 16).build().unwrap(); let positions = mesh.non_indexed_positions_buffer(); let normals = mesh.non_indexed_normals_buffer(); assert_eq!(positions.len(), mesh.no_faces() * 3 * 3); assert_eq!(normals.len(), mesh.no_faces() * 3 * 3); for face in 0..positions.len()/9 { let vertices = (9*face, 9*face+3, 9*face+6); let p0 = vec3(positions[vertices.0], positions[vertices.0+1], positions[vertices.0+2]); let p1 = vec3(positions[vertices.1], positions[vertices.1+1], positions[vertices.1+2]); let p2 = vec3(positions[vertices.2], positions[vertices.2+1], positions[vertices.2+2]); let center = (p0 + p1 + p2) / 3.0; let face_id = mesh.face_iter().find(|face_id| (mesh.face_center(*face_id) - center).magnitude() < 0.00001); assert!(face_id.is_some()); let n0 = vec3(normals[vertices.0], normals[vertices.0+1], normals[vertices.0+2]); let n1 = vec3(normals[vertices.1], normals[vertices.1+1], normals[vertices.1+2]); let n2 = vec3(normals[vertices.2], normals[vertices.2+1], normals[vertices.2+2]); let (v0, v1, v2) = mesh.face_vertices(face_id.unwrap()); assert!(n0 == mesh.vertex_normal(v0) || n1 == mesh.vertex_normal(v0) || n2 == mesh.vertex_normal(v0)); assert!(n0 == mesh.vertex_normal(v1) || n1 == mesh.vertex_normal(v1) || n2 == mesh.vertex_normal(v1)); assert!(n0 == mesh.vertex_normal(v2) || n1 == mesh.vertex_normal(v2) || n2 == mesh.vertex_normal(v2)); } } }
42.099315
153
0.579842
b9a7d8b1e65f755c27f5d2766167fb19d8ac32f9
2,051
extern crate kiss3d; extern crate nalgebra as na; use kiss3d::event::{Action, WindowEvent}; use kiss3d::window::Window; fn main() { let mut window = Window::new("Kiss3d: events"); while window.render() { for mut event in window.events().iter() { match event.value { WindowEvent::Key(button, Action::Press, _) => { println!("You pressed the button: {:?}", button); println!("Do not try to press escape: the event is inhibited!"); event.inhibited = true // override the default keyboard handler } WindowEvent::Key(button, Action::Release, _) => { println!("You released the button: {:?}", button); println!("Do not try to press escape: the event is inhibited!"); event.inhibited = true // override the default keyboard handler } WindowEvent::MouseButton(button, Action::Press, mods) => { println!("You pressed the mouse button: {:?}", button); println!("You pressed the mouse button with modifiers: {:?}", mods); // dont override the default mouse handler } WindowEvent::MouseButton(button, Action::Release, mods) => { println!("You released the mouse button: {:?}", button); println!("You released the mouse button with modifiers: {:?}", mods); // dont override the default mouse handler } WindowEvent::CursorPos(x, y, _) => { println!("Cursor pos: ({} , {})", x, y); // dont override the default mouse handler } WindowEvent::Scroll(xshift, yshift, _) => { println!("Cursor pos: ({} , {})", xshift, yshift); // dont override the default mouse handler } _ => {} } } } }
44.586957
89
0.494881
91c0825559082410a47ef43e792bfe4879a0a0ea
2,258
//! Implement Octal formatting macro_rules! impl_fmt_binary { ([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt) => { impl crate::fmt::Binary for $id { #[allow(clippy::missing_inline_in_public_items)] fn fmt(&self, f: &mut crate::fmt::Formatter<'_>) -> crate::fmt::Result { write!(f, "{}(", stringify!($id))?; for i in 0..$elem_count { if i > 0 { write!(f, ", ")?; } self.extract(i).fmt(f)?; } write!(f, ")") } } test_if! { $test_tt: paste::item! { pub mod [<$id _fmt_binary>] { use super::*; #[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)] fn binary() { use arrayvec::{ArrayString,ArrayVec}; type TinyString = ArrayString<[u8; 512]>; use crate::fmt::Write; let v = $id::splat($elem_ty::default()); let mut s = TinyString::new(); write!(&mut s, "{:#b}", v).unwrap(); let mut beg = TinyString::new(); write!(&mut beg, "{}(", stringify!($id)).unwrap(); assert!(s.starts_with(beg.as_str())); assert!(s.ends_with(")")); let s: ArrayVec<[TinyString; 64]> = s.replace(beg.as_str(), "") .replace(")", "").split(",") .map(|v| TinyString::from(v.trim()).unwrap()) .collect(); assert_eq!(s.len(), $id::lanes()); for (index, ss) in s.into_iter().enumerate() { let mut e = TinyString::new(); write!(&mut e, "{:#b}", v.extract(index)).unwrap(); assert_eq!(ss, e); } } } } } }; }
41.054545
84
0.368025
1db0445559cd55e5cbee3e4de974e509eb253eaf
28,560
// Run clippy on a fixed set of crates and collect the warnings. // This helps observing the impact clippy changs have on a set of real-world code. // // When a new lint is introduced, we can search the results for new warnings and check for false // positives. #![cfg(feature = "lintcheck")] #![allow(clippy::filter_map)] use crate::clippy_project_root; use std::collections::HashMap; use std::process::Command; use std::sync::atomic::{AtomicUsize, Ordering}; use std::{env, fmt, fs::write, path::PathBuf}; use clap::ArgMatches; use rayon::prelude::*; use serde::{Deserialize, Serialize}; use serde_json::Value; const CLIPPY_DRIVER_PATH: &str = "target/debug/clippy-driver"; const CARGO_CLIPPY_PATH: &str = "target/debug/cargo-clippy"; const LINTCHECK_DOWNLOADS: &str = "target/lintcheck/downloads"; const LINTCHECK_SOURCES: &str = "target/lintcheck/sources"; /// List of sources to check, loaded from a .toml file #[derive(Debug, Serialize, Deserialize)] struct SourceList { crates: HashMap<String, TomlCrate>, } /// A crate source stored inside the .toml /// will be translated into on one of the `CrateSource` variants #[derive(Debug, Serialize, Deserialize)] struct TomlCrate { name: String, versions: Option<Vec<String>>, git_url: Option<String>, git_hash: Option<String>, path: Option<String>, options: Option<Vec<String>>, } /// Represents an archive we download from crates.io, or a git repo, or a local repo/folder /// Once processed (downloaded/extracted/cloned/copied...), this will be translated into a `Crate` #[derive(Debug, Serialize, Deserialize, Eq, Hash, PartialEq, Ord, PartialOrd)] enum CrateSource { CratesIo { name: String, version: String, options: Option<Vec<String>>, }, Git { name: String, url: String, commit: String, options: Option<Vec<String>>, }, Path { name: String, path: PathBuf, options: Option<Vec<String>>, }, } /// Represents the actual source code of a crate that we ran "cargo clippy" on #[derive(Debug)] struct Crate { version: String, name: String, // path to the extracted sources that clippy can check path: PathBuf, options: Option<Vec<String>>, } /// A single warning that clippy issued while checking a `Crate` #[derive(Debug)] struct ClippyWarning { crate_name: String, crate_version: String, file: String, line: String, column: String, linttype: String, message: String, is_ice: bool, } impl std::fmt::Display for ClippyWarning { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { writeln!( f, r#"target/lintcheck/sources/{}-{}/{}:{}:{} {} "{}""#, &self.crate_name, &self.crate_version, &self.file, &self.line, &self.column, &self.linttype, &self.message ) } } impl CrateSource { /// Makes the sources available on the disk for clippy to check. /// Clones a git repo and checks out the specified commit or downloads a crate from crates.io or /// copies a local folder fn download_and_extract(&self) -> Crate { match self { CrateSource::CratesIo { name, version, options } => { let extract_dir = PathBuf::from(LINTCHECK_SOURCES); let krate_download_dir = PathBuf::from(LINTCHECK_DOWNLOADS); // url to download the crate from crates.io let url = format!("https://crates.io/api/v1/crates/{}/{}/download", name, version); println!("Downloading and extracting {} {} from {}", name, version, url); let _ = std::fs::create_dir("target/lintcheck/"); let _ = std::fs::create_dir(&krate_download_dir); let _ = std::fs::create_dir(&extract_dir); let krate_file_path = krate_download_dir.join(format!("{}-{}.crate.tar.gz", name, version)); // don't download/extract if we already have done so if !krate_file_path.is_file() { // create a file path to download and write the crate data into let mut krate_dest = std::fs::File::create(&krate_file_path).unwrap(); let mut krate_req = ureq::get(&url).call().unwrap().into_reader(); // copy the crate into the file std::io::copy(&mut krate_req, &mut krate_dest).unwrap(); // unzip the tarball let ungz_tar = flate2::read::GzDecoder::new(std::fs::File::open(&krate_file_path).unwrap()); // extract the tar archive let mut archive = tar::Archive::new(ungz_tar); archive.unpack(&extract_dir).expect("Failed to extract!"); } // crate is extracted, return a new Krate object which contains the path to the extracted // sources that clippy can check Crate { version: version.clone(), name: name.clone(), path: extract_dir.join(format!("{}-{}/", name, version)), options: options.clone(), } }, CrateSource::Git { name, url, commit, options, } => { let repo_path = { let mut repo_path = PathBuf::from(LINTCHECK_SOURCES); // add a -git suffix in case we have the same crate from crates.io and a git repo repo_path.push(format!("{}-git", name)); repo_path }; // clone the repo if we have not done so if !repo_path.is_dir() { println!("Cloning {} and checking out {}", url, commit); if !Command::new("git") .arg("clone") .arg(url) .arg(&repo_path) .status() .expect("Failed to clone git repo!") .success() { eprintln!("Failed to clone {} into {}", url, repo_path.display()) } } // check out the commit/branch/whatever if !Command::new("git") .arg("checkout") .arg(commit) .current_dir(&repo_path) .status() .expect("Failed to check out commit") .success() { eprintln!("Failed to checkout {} of repo at {}", commit, repo_path.display()) } Crate { version: commit.clone(), name: name.clone(), path: repo_path, options: options.clone(), } }, CrateSource::Path { name, path, options } => { use fs_extra::dir; // simply copy the entire directory into our target dir let copy_dest = PathBuf::from(format!("{}/", LINTCHECK_SOURCES)); // the source path of the crate we copied, ${copy_dest}/crate_name let crate_root = copy_dest.join(name); // .../crates/local_crate if !crate_root.exists() { println!("Copying {} to {}", path.display(), copy_dest.display()); dir::copy(path, &copy_dest, &dir::CopyOptions::new()).expect(&format!( "Failed to copy from {}, to {}", path.display(), crate_root.display() )); } else { println!( "Not copying {} to {}, destination already exists", path.display(), crate_root.display() ); } Crate { version: String::from("local"), name: name.clone(), path: crate_root, options: options.clone(), } }, } } } impl Crate { /// Run `cargo clippy` on the `Crate` and collect and return all the lint warnings that clippy /// issued fn run_clippy_lints( &self, cargo_clippy_path: &PathBuf, target_dir_index: &AtomicUsize, thread_limit: usize, total_crates_to_lint: usize, ) -> Vec<ClippyWarning> { // advance the atomic index by one let index = target_dir_index.fetch_add(1, Ordering::SeqCst); // "loop" the index within 0..thread_limit let target_dir_index = index % thread_limit; let perc = ((index * 100) as f32 / total_crates_to_lint as f32) as u8; if thread_limit == 1 { println!( "{}/{} {}% Linting {} {}", index, total_crates_to_lint, perc, &self.name, &self.version ); } else { println!( "{}/{} {}% Linting {} {} in target dir {:?}", index, total_crates_to_lint, perc, &self.name, &self.version, target_dir_index ); } let cargo_clippy_path = std::fs::canonicalize(cargo_clippy_path).unwrap(); let shared_target_dir = clippy_project_root().join("target/lintcheck/shared_target_dir"); let mut args = vec!["--", "--message-format=json", "--", "--cap-lints=warn"]; if let Some(options) = &self.options { for opt in options { args.push(opt); } } else { args.extend(&["-Wclippy::pedantic", "-Wclippy::cargo"]) } let all_output = std::process::Command::new(&cargo_clippy_path) // use the looping index to create individual target dirs .env( "CARGO_TARGET_DIR", shared_target_dir.join(format!("_{:?}", target_dir_index)), ) // lint warnings will look like this: // src/cargo/ops/cargo_compile.rs:127:35: warning: usage of `FromIterator::from_iter` .args(&args) .current_dir(&self.path) .output() .unwrap_or_else(|error| { panic!( "Encountered error:\n{:?}\ncargo_clippy_path: {}\ncrate path:{}\n", error, &cargo_clippy_path.display(), &self.path.display() ); }); let stdout = String::from_utf8_lossy(&all_output.stdout); let output_lines = stdout.lines(); let warnings: Vec<ClippyWarning> = output_lines .into_iter() // get all clippy warnings and ICEs .filter(|line| filter_clippy_warnings(&line)) .map(|json_msg| parse_json_message(json_msg, &self)) .collect(); warnings } } #[derive(Debug)] struct LintcheckConfig { // max number of jobs to spawn (default 1) max_jobs: usize, // we read the sources to check from here sources_toml_path: PathBuf, // we save the clippy lint results here lintcheck_results_path: PathBuf, } impl LintcheckConfig { fn from_clap(clap_config: &ArgMatches) -> Self { // first, check if we got anything passed via the LINTCHECK_TOML env var, // if not, ask clap if we got any value for --crates-toml <foo> // if not, use the default "clippy_dev/lintcheck_crates.toml" let sources_toml = env::var("LINTCHECK_TOML").unwrap_or( clap_config .value_of("crates-toml") .clone() .unwrap_or("clippy_dev/lintcheck_crates.toml") .to_string(), ); let sources_toml_path = PathBuf::from(sources_toml); // for the path where we save the lint results, get the filename without extension (so for // wasd.toml, use "wasd"...) let filename: PathBuf = sources_toml_path.file_stem().unwrap().into(); let lintcheck_results_path = PathBuf::from(format!("lintcheck-logs/{}_logs.txt", filename.display())); // look at the --threads arg, if 0 is passed, ask rayon rayon how many threads it would spawn and // use half of that for the physical core count // by default use a single thread let max_jobs = match clap_config.value_of("threads") { Some(threads) => { let threads: usize = threads .parse() .expect(&format!("Failed to parse '{}' to a digit", threads)); if threads == 0 { // automatic choice // Rayon seems to return thread count so half that for core count (rayon::current_num_threads() / 2) as usize } else { threads } }, // no -j passed, use a single thread None => 1, }; LintcheckConfig { max_jobs, sources_toml_path, lintcheck_results_path, } } } /// takes a single json-formatted clippy warnings and returns true (we are interested in that line) /// or false (we aren't) fn filter_clippy_warnings(line: &str) -> bool { // we want to collect ICEs because clippy might have crashed. // these are summarized later if line.contains("internal compiler error: ") { return true; } // in general, we want all clippy warnings // however due to some kind of bug, sometimes there are absolute paths // to libcore files inside the message // or we end up with cargo-metadata output (https://github.com/rust-lang/rust-clippy/issues/6508) // filter out these message to avoid unnecessary noise in the logs if line.contains("clippy::") && !(line.contains("could not read cargo metadata") || (line.contains(".rustup") && line.contains("toolchains"))) { return true; } false } /// Builds clippy inside the repo to make sure we have a clippy executable we can use. fn build_clippy() { let status = Command::new("cargo") .arg("build") .status() .expect("Failed to build clippy!"); if !status.success() { eprintln!("Error: Failed to compile Clippy!"); std::process::exit(1); } } /// Read a `toml` file and return a list of `CrateSources` that we want to check with clippy fn read_crates(toml_path: &PathBuf) -> Vec<CrateSource> { let toml_content: String = std::fs::read_to_string(&toml_path).unwrap_or_else(|_| panic!("Failed to read {}", toml_path.display())); let crate_list: SourceList = toml::from_str(&toml_content).unwrap_or_else(|e| panic!("Failed to parse {}: \n{}", toml_path.display(), e)); // parse the hashmap of the toml file into a list of crates let tomlcrates: Vec<TomlCrate> = crate_list .crates .into_iter() .map(|(_cratename, tomlcrate)| tomlcrate) .collect(); // flatten TomlCrates into CrateSources (one TomlCrates may represent several versions of a crate => // multiple Cratesources) let mut crate_sources = Vec::new(); tomlcrates.into_iter().for_each(|tk| { if let Some(ref path) = tk.path { crate_sources.push(CrateSource::Path { name: tk.name.clone(), path: PathBuf::from(path), options: tk.options.clone(), }); } // if we have multiple versions, save each one if let Some(ref versions) = tk.versions { versions.iter().for_each(|ver| { crate_sources.push(CrateSource::CratesIo { name: tk.name.clone(), version: ver.to_string(), options: tk.options.clone(), }); }) } // otherwise, we should have a git source if tk.git_url.is_some() && tk.git_hash.is_some() { crate_sources.push(CrateSource::Git { name: tk.name.clone(), url: tk.git_url.clone().unwrap(), commit: tk.git_hash.clone().unwrap(), options: tk.options.clone(), }); } // if we have a version as well as a git data OR only one git data, something is funky if tk.versions.is_some() && (tk.git_url.is_some() || tk.git_hash.is_some()) || tk.git_hash.is_some() != tk.git_url.is_some() { eprintln!("tomlkrate: {:?}", tk); if tk.git_hash.is_some() != tk.git_url.is_some() { panic!("Error: Encountered TomlCrate with only one of git_hash and git_url!"); } if tk.path.is_some() && (tk.git_hash.is_some() || tk.versions.is_some()) { panic!("Error: TomlCrate can only have one of 'git_.*', 'version' or 'path' fields"); } unreachable!("Failed to translate TomlCrate into CrateSource!"); } }); // sort the crates crate_sources.sort(); crate_sources } /// Parse the json output of clippy and return a `ClippyWarning` fn parse_json_message(json_message: &str, krate: &Crate) -> ClippyWarning { let jmsg: Value = serde_json::from_str(&json_message).unwrap_or_else(|e| panic!("Failed to parse json:\n{:?}", e)); ClippyWarning { crate_name: krate.name.to_string(), crate_version: krate.version.to_string(), file: jmsg["message"]["spans"][0]["file_name"] .to_string() .trim_matches('"') .into(), line: jmsg["message"]["spans"][0]["line_start"] .to_string() .trim_matches('"') .into(), column: jmsg["message"]["spans"][0]["text"][0]["highlight_start"] .to_string() .trim_matches('"') .into(), linttype: jmsg["message"]["code"]["code"].to_string().trim_matches('"').into(), message: jmsg["message"]["message"].to_string().trim_matches('"').into(), is_ice: json_message.contains("internal compiler error: "), } } /// Generate a short list of occuring lints-types and their count fn gather_stats(clippy_warnings: &[ClippyWarning]) -> (String, HashMap<&String, usize>) { // count lint type occurrences let mut counter: HashMap<&String, usize> = HashMap::new(); clippy_warnings .iter() .for_each(|wrn| *counter.entry(&wrn.linttype).or_insert(0) += 1); // collect into a tupled list for sorting let mut stats: Vec<(&&String, &usize)> = counter.iter().map(|(lint, count)| (lint, count)).collect(); // sort by "000{count} {clippy::lintname}" // to not have a lint with 200 and 2 warnings take the same spot stats.sort_by_key(|(lint, count)| format!("{:0>4}, {}", count, lint)); let stats_string = stats .iter() .map(|(lint, count)| format!("{} {}\n", lint, count)) .collect::<String>(); (stats_string, counter) } /// check if the latest modification of the logfile is older than the modification date of the /// clippy binary, if this is true, we should clean the lintchec shared target directory and recheck fn lintcheck_needs_rerun(lintcheck_logs_path: &PathBuf) -> bool { let clippy_modified: std::time::SystemTime = { let mut times = [CLIPPY_DRIVER_PATH, CARGO_CLIPPY_PATH].iter().map(|p| { std::fs::metadata(p) .expect("failed to get metadata of file") .modified() .expect("failed to get modification date") }); // the oldest modification of either of the binaries std::cmp::max(times.next().unwrap(), times.next().unwrap()) }; let logs_modified: std::time::SystemTime = std::fs::metadata(lintcheck_logs_path) .expect("failed to get metadata of file") .modified() .expect("failed to get modification date"); // time is represented in seconds since X // logs_modified 2 and clippy_modified 5 means clippy binary is older and we need to recheck logs_modified < clippy_modified } /// lintchecks `main()` function pub fn run(clap_config: &ArgMatches) { let config = LintcheckConfig::from_clap(clap_config); println!("Compiling clippy..."); build_clippy(); println!("Done compiling"); // if the clippy bin is newer than our logs, throw away target dirs to force clippy to // refresh the logs if lintcheck_needs_rerun(&config.lintcheck_results_path) { let shared_target_dir = "target/lintcheck/shared_target_dir"; match std::fs::metadata(&shared_target_dir) { Ok(metadata) => { if metadata.is_dir() { println!("Clippy is newer than lint check logs, clearing lintcheck shared target dir..."); std::fs::remove_dir_all(&shared_target_dir) .expect("failed to remove target/lintcheck/shared_target_dir"); } }, Err(_) => { /* dir probably does not exist, don't remove anything */ }, } } let cargo_clippy_path: PathBuf = PathBuf::from(CARGO_CLIPPY_PATH) .canonicalize() .expect("failed to canonicalize path to clippy binary"); // assert that clippy is found assert!( cargo_clippy_path.is_file(), "target/debug/cargo-clippy binary not found! {}", cargo_clippy_path.display() ); let clippy_ver = std::process::Command::new(CARGO_CLIPPY_PATH) .arg("--version") .output() .map(|o| String::from_utf8_lossy(&o.stdout).into_owned()) .expect("could not get clippy version!"); // download and extract the crates, then run clippy on them and collect clippys warnings // flatten into one big list of warnings let crates = read_crates(&config.sources_toml_path); let old_stats = read_stats_from_file(&config.lintcheck_results_path); let counter = AtomicUsize::new(1); let clippy_warnings: Vec<ClippyWarning> = if let Some(only_one_crate) = clap_config.value_of("only") { // if we don't have the specified crate in the .toml, throw an error if !crates.iter().any(|krate| { let name = match krate { CrateSource::CratesIo { name, .. } => name, CrateSource::Git { name, .. } => name, CrateSource::Path { name, .. } => name, }; name == only_one_crate }) { eprintln!( "ERROR: could not find crate '{}' in clippy_dev/lintcheck_crates.toml", only_one_crate ); std::process::exit(1); } // only check a single crate that was passed via cmdline crates .into_iter() .map(|krate| krate.download_and_extract()) .filter(|krate| krate.name == only_one_crate) .map(|krate| krate.run_clippy_lints(&cargo_clippy_path, &AtomicUsize::new(0), 1, 1)) .flatten() .collect() } else { if config.max_jobs > 1 { // run parallel with rayon // Ask rayon for thread count. Assume that half of that is the number of physical cores // Use one target dir for each core so that we can run N clippys in parallel. // We need to use different target dirs because cargo would lock them for a single build otherwise, // killing the parallelism. However this also means that deps will only be reused half/a // quarter of the time which might result in a longer wall clock runtime // This helps when we check many small crates with dep-trees that don't have a lot of branches in // order to achive some kind of parallelism // by default, use a single thread let num_cpus = config.max_jobs; let num_crates = crates.len(); // check all crates (default) crates .into_par_iter() .map(|krate| krate.download_and_extract()) .map(|krate| krate.run_clippy_lints(&cargo_clippy_path, &counter, num_cpus, num_crates)) .flatten() .collect() } else { // run sequential let num_crates = crates.len(); crates .into_iter() .map(|krate| krate.download_and_extract()) .map(|krate| krate.run_clippy_lints(&cargo_clippy_path, &counter, 1, num_crates)) .flatten() .collect() } }; // generate some stats let (stats_formatted, new_stats) = gather_stats(&clippy_warnings); // grab crashes/ICEs, save the crate name and the ice message let ices: Vec<(&String, &String)> = clippy_warnings .iter() .filter(|warning| warning.is_ice) .map(|w| (&w.crate_name, &w.message)) .collect(); let mut all_msgs: Vec<String> = clippy_warnings.iter().map(|warning| warning.to_string()).collect(); all_msgs.sort(); all_msgs.push("\n\n\n\nStats:\n".into()); all_msgs.push(stats_formatted); // save the text into lintcheck-logs/logs.txt let mut text = clippy_ver; // clippy version number on top text.push_str(&format!("\n{}", all_msgs.join(""))); text.push_str("ICEs:\n"); ices.iter() .for_each(|(cratename, msg)| text.push_str(&format!("{}: '{}'", cratename, msg))); println!("Writing logs to {}", config.lintcheck_results_path.display()); write(&config.lintcheck_results_path, text).unwrap(); print_stats(old_stats, new_stats); } /// read the previous stats from the lintcheck-log file fn read_stats_from_file(file_path: &PathBuf) -> HashMap<String, usize> { let file_content: String = match std::fs::read_to_string(file_path).ok() { Some(content) => content, None => { eprintln!("RETURND"); return HashMap::new(); }, }; let lines: Vec<String> = file_content.lines().map(|l| l.to_string()).collect(); // search for the beginning "Stats:" and the end "ICEs:" of the section we want let start = lines.iter().position(|line| line == "Stats:").unwrap(); let end = lines.iter().position(|line| line == "ICEs:").unwrap(); let stats_lines = &lines[start + 1..=end - 1]; stats_lines .into_iter() .map(|line| { let mut spl = line.split(" ").into_iter(); ( spl.next().unwrap().to_string(), spl.next().unwrap().parse::<usize>().unwrap(), ) }) .collect::<HashMap<String, usize>>() } /// print how lint counts changed between runs fn print_stats(old_stats: HashMap<String, usize>, new_stats: HashMap<&String, usize>) { let same_in_both_hashmaps = old_stats .iter() .filter(|(old_key, old_val)| new_stats.get::<&String>(&old_key) == Some(old_val)) .map(|(k, v)| (k.to_string(), *v)) .collect::<Vec<(String, usize)>>(); let mut old_stats_deduped = old_stats; let mut new_stats_deduped = new_stats; // remove duplicates from both hashmaps same_in_both_hashmaps.iter().for_each(|(k, v)| { assert!(old_stats_deduped.remove(k) == Some(*v)); assert!(new_stats_deduped.remove(k) == Some(*v)); }); println!("\nStats:"); // list all new counts (key is in new stats but not in old stats) new_stats_deduped .iter() .filter(|(new_key, _)| old_stats_deduped.get::<str>(&new_key).is_none()) .for_each(|(new_key, new_value)| { println!("{} 0 => {}", new_key, new_value); }); // list all changed counts (key is in both maps but value differs) new_stats_deduped .iter() .filter(|(new_key, _new_val)| old_stats_deduped.get::<str>(&new_key).is_some()) .for_each(|(new_key, new_val)| { let old_val = old_stats_deduped.get::<str>(&new_key).unwrap(); println!("{} {} => {}", new_key, old_val, new_val); }); // list all gone counts (key is in old status but not in new stats) old_stats_deduped .iter() .filter(|(old_key, _)| new_stats_deduped.get::<&String>(&old_key).is_none()) .for_each(|(old_key, old_value)| { println!("{} {} => 0", old_key, old_value); }); }
38.804348
119
0.565581
09ceba2eaa0d968985a029e5af4eef979ad9303e
29,899
#![deny(missing_docs)] #![cfg_attr(test, deny(warnings))] #![cfg_attr(feature = "heap_size", feature(custom_derive, plugin))] #![cfg_attr(feature = "heap_size", plugin(heapsize_plugin))] #![cfg_attr(all(feature = "mesalock_sgx", not(target_env = "sgx")), no_std)] #![cfg_attr(all(target_env = "sgx", target_vendor = "mesalock"), feature(rustc_private))] //! Language tags can be used identify human languages, scripts e.g. Latin script, countries and //! other regions. //! //! Language tags are defined in [BCP47](http://tools.ietf.org/html/bcp47), an introduction is //! ["Language tags in HTML and XML"](http://www.w3.org/International/articles/language-tags/) by //! the W3C. They are commonly used in HTML and HTTP `Content-Language` and `Accept-Language` //! header fields. //! //! This package currently supports parsing (fully conformant parser), formatting and comparing //! language tags. //! //! # Examples //! Create a simple language tag representing the French language as spoken //! in Belgium and print it: //! //! ```rust //! use language_tags::LanguageTag; //! let mut langtag: LanguageTag = Default::default(); //! langtag.language = Some("fr".to_owned()); //! langtag.region = Some("BE".to_owned()); //! assert_eq!(format!("{}", langtag), "fr-BE"); //! ``` //! //! Parse a tag representing a special type of English specified by private agreement: //! //! ```rust //! use language_tags::LanguageTag; //! let langtag: LanguageTag = "en-x-twain".parse().unwrap(); //! assert_eq!(format!("{}", langtag.language.unwrap()), "en"); //! assert_eq!(format!("{:?}", langtag.privateuse), "[\"twain\"]"); //! ``` //! //! You can check for equality, but more often you should test if two tags match. //! //! ```rust //! use language_tags::LanguageTag; //! let mut langtag_server: LanguageTag = Default::default(); //! langtag_server.language = Some("de".to_owned()); //! langtag_server.region = Some("AT".to_owned()); //! let mut langtag_user: LanguageTag = Default::default(); //! langtag_user.language = Some("de".to_owned()); //! assert!(langtag_user.matches(&langtag_server)); //! ``` //! //! There is also the `langtag!` macro for creating language tags. #[cfg(feature = "heap_size")] extern crate heapsize; #[cfg(all(feature = "mesalock_sgx", not(target_env = "sgx")))] #[macro_use] extern crate sgx_tstd as std; use std::ascii::AsciiExt; use std::cmp::Ordering; use std::collections::{BTreeMap, BTreeSet}; use std::error::Error as ErrorTrait; use std::fmt::{self, Display}; use std::iter::FromIterator; use std::prelude::v1::*; fn is_alphabetic(s: &str) -> bool { s.chars().all(|x| x >= 'A' && x <= 'Z' || x >= 'a' && x <= 'z') } fn is_numeric(s: &str) -> bool { s.chars().all(|x| x >= '0' && x <= '9') } fn is_alphanumeric_or_dash(s: &str) -> bool { s.chars() .all(|x| x >= 'A' && x <= 'Z' || x >= 'a' && x <= 'z' || x >= '0' && x <= '9' || x == '-') } /// Defines an Error type for langtags. /// /// Errors occur mainly during parsing of language tags. #[derive(Debug, Eq, PartialEq)] pub enum Error { /// The same extension subtag is only allowed once in a tag before the private use part. DuplicateExtension, /// If an extension subtag is present, it must not be empty. EmptyExtension, /// If the `x` subtag is present, it must not be empty. EmptyPrivateUse, /// The langtag contains a char that is not A-Z, a-z, 0-9 or the dash. ForbiddenChar, /// A subtag fails to parse, it does not match any other subtags. InvalidSubtag, /// The given language subtag is invalid. InvalidLanguage, /// A subtag may be eight characters in length at maximum. SubtagTooLong, /// At maximum three extlangs are allowed, but zero to one extlangs are preferred. TooManyExtlangs, } impl ErrorTrait for Error { fn description(&self) -> &str { match *self { Error::DuplicateExtension => "The same extension subtag is only allowed once in a tag", Error::EmptyExtension => "If an extension subtag is present, it must not be empty", Error::EmptyPrivateUse => "If the `x` subtag is present, it must not be empty", Error::ForbiddenChar => "The langtag contains a char not allowed", Error::InvalidSubtag => "A subtag fails to parse, it does not match any other subtags", Error::InvalidLanguage => "The given language subtag is invalid", Error::SubtagTooLong => "A subtag may be eight characters in length at maximum", Error::TooManyExtlangs => "At maximum three extlangs are allowed", } } } impl Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.write_str(self.description()) } } /// Result type used for this library. pub type Result<T> = ::std::result::Result<T, Error>; /// Contains all grandfathered tags. pub const GRANDFATHERED: [(&'static str, Option<&'static str>); 26] = [("art-lojban", Some("jbo")), ("cel-gaulish", None), ("en-GB-oed", Some("en-GB-oxendict")), ("i-ami", Some("ami")), ("i-bnn", Some("bnn")), ("i-default", None), ("i-enochian", None), ("i-hak", Some("hak")), ("i-klingon", Some("tlh")), ("i-lux", Some("lb")), ("i-mingo", None), ("i-navajo", Some("nv")), ("i-pwn", Some("pwn")), ("i-tao", Some("tao")), ("i-tay", Some("tay")), ("i-tsu", Some("tsu")), ("no-bok", Some("nb")), ("no-nyn", Some("nn")), ("sgn-BE-FR", Some("sfb")), ("sgn-BE-NL", Some("vgt")), ("sgn-CH-DE", Some("sgg")), ("zh-guoyu", Some("cmn")), ("zh-hakka", Some("hak")), ("zh-min", None), ("zh-min-nan", Some("nan")), ("zh-xiang", Some("hsn"))]; const DEPRECATED_LANGUAGE: [(&'static str, &'static str); 53] = [("in", "id"), ("iw", "he"), ("ji", "yi"), ("jw", "jv"), ("mo", "ro"), ("aam", "aas"), ("adp", "dz"), ("aue", "ktz"), ("ayx", "nun"), ("bjd", "drl"), ("ccq", "rki"), ("cjr", "mom"), ("cka", "cmr"), ("cmk", "xch"), ("drh", "khk"), ("drw", "prs"), ("gav", "dev"), ("gfx", "vaj"), ("gti", "nyc"), ("hrr", "jal"), ("ibi", "opa"), ("ilw", "gal"), ("kgh", "kml"), ("koj", "kwv"), ("kwq", "yam"), ("kxe", "tvd"), ("lii", "raq"), ("lmm", "rmx"), ("meg", "cir"), ("mst", "mry"), ("mwj", "vaj"), ("myt", "mry"), ("nnx", "ngv"), ("oun", "vaj"), ("pcr", "adx"), ("pmu", "phr"), ("ppr", "lcq"), ("puz", "pub"), ("sca", "hle"), ("thx", "oyb"), ("tie", "ras"), ("tkk", "twm"), ("tlw", "weo"), ("tnf", "prs"), ("tsf", "taj"), ("uok", "ema"), ("xia", "acn"), ("xsj", "suj"), ("ybd", "rki"), ("yma", "lrr"), ("ymt", "mtm"), ("yos", "zom"), ("yuu", "yug")]; const DEPRECATED_REGION: [(&'static str, &'static str); 6] = [("BU", "MM"), ("DD", "DE"), ("FX", "FR"), ("TP", "TL"), ("YD", "YE"), ("ZR", "CD")]; /// A language tag as described in [BCP47](http://tools.ietf.org/html/bcp47). /// /// Language tags are used to help identify languages, whether spoken, /// written, signed, or otherwise signaled, for the purpose of /// communication. This includes constructed and artificial languages /// but excludes languages not intended primarily for human /// communication, such as programming languages. #[derive(Debug, Default, Eq, Clone)] #[cfg_attr(feature = "heap_size", derive(HeapSizeOf))] pub struct LanguageTag { /// Language subtags are used to indicate the language, ignoring all /// other aspects such as script, region or spefic invariants. pub language: Option<String>, /// Extended language subtags are used to identify certain specially /// selected languages that, for various historical and compatibility /// reasons, are closely identified with or tagged using an existing /// primary language subtag. pub extlangs: Vec<String>, /// Script subtags are used to indicate the script or writing system /// variations that distinguish the written forms of a language or its /// dialects. pub script: Option<String>, /// Region subtags are used to indicate linguistic variations associated /// with or appropriate to a specific country, territory, or region. /// Typically, a region subtag is used to indicate variations such as /// regional dialects or usage, or region-specific spelling conventions. /// It can also be used to indicate that content is expressed in a way /// that is appropriate for use throughout a region, for instance, /// Spanish content tailored to be useful throughout Latin America. pub region: Option<String>, /// Variant subtags are used to indicate additional, well-recognized /// variations that define a language or its dialects that are not /// covered by other available subtags. pub variants: Vec<String>, /// Extensions provide a mechanism for extending language tags for use in /// various applications. They are intended to identify information that /// is commonly used in association with languages or language tags but /// that is not part of language identification. pub extensions: BTreeMap<u8, Vec<String>>, /// Private use subtags are used to indicate distinctions in language /// that are important in a given context by private agreement. pub privateuse: Vec<String>, } impl LanguageTag { /// Matches language tags. The first language acts as a language range, the second one is used /// as a normal language tag. None fields in the language range are ignored. If the language /// tag has more extlangs than the range these extlangs are ignored. Matches are /// case-insensitive. `*` in language ranges are represented using `None` values. The language /// range `*` that matches language tags is created by the default language tag: /// `let wildcard: LanguageTag = Default::default();.` /// /// For example the range `en-GB` matches only `en-GB` and `en-Arab-GB` but not `en`. /// The range `en` matches all language tags starting with `en` including `en`, `en-GB`, /// `en-Arab` and `en-Arab-GB`. /// /// # Panics /// If the language range has extensions or private use tags. /// /// # Examples /// ``` /// # #[macro_use] extern crate language_tags; /// # fn main() { /// let range_italian = langtag!(it); /// let tag_german = langtag!(de); /// let tag_italian_switzerland = langtag!(it;;;CH); /// assert!(!range_italian.matches(&tag_german)); /// assert!(range_italian.matches(&tag_italian_switzerland)); /// /// let range_spanish_brazil = langtag!(es;;;BR); /// let tag_spanish = langtag!(es); /// assert!(!range_spanish_brazil.matches(&tag_spanish)); /// # } /// ``` pub fn matches(&self, other: &LanguageTag) -> bool { fn matches_option(a: &Option<String>, b: &Option<String>) -> bool { match (a, b) { (&Some(ref a), &Some(ref b)) => a.eq_ignore_ascii_case(b), (&None, _) => true, (_, &None) => false, } } fn matches_vec(a: &[String], b: &[String]) -> bool { a.iter().zip(b.iter()).all(|(x, y)| x.eq_ignore_ascii_case(y)) } assert!(self.is_language_range()); matches_option(&self.language, &other.language) && matches_vec(&self.extlangs, &other.extlangs) && matches_option(&self.script, &other.script) && matches_option(&self.region, &other.region) && matches_vec(&self.variants, &other.variants) } /// Checks if it is a language range, meaning that there are no extension and privateuse tags. pub fn is_language_range(&self) -> bool { self.extensions.is_empty() && self.privateuse.is_empty() } /// Returns the canonical version of the language tag. /// /// It currently applies the following steps: /// /// * Grandfathered tags are replaced with the canonical version if possible. /// * Extension languages are promoted to primary language. /// * Deprecated languages are replaced with modern equivalents. /// * Deprecated regions are replaced with new country names. /// * The `heploc` variant is replaced with `alalc97`. /// /// The returned language tags may not be completly canonical and they are /// not validated. pub fn canonicalize(&self) -> LanguageTag { if let Some(ref language) = self.language { if let Some(&(_, Some(tag))) = GRANDFATHERED.iter().find(|&&(x, _)| { x.eq_ignore_ascii_case(&language) }) { return tag.parse().expect("GRANDFATHERED list must contain only valid tags."); } } let mut tag = self.clone(); if !self.extlangs.is_empty() { tag.language = Some(self.extlangs[0].clone()); tag.extlangs = Vec::new(); } if let Some(ref language) = self.language { if let Some(&(_, l)) = DEPRECATED_LANGUAGE.iter().find(|&&(x, _)| { x.eq_ignore_ascii_case(&language) }) { tag.language = Some(l.to_owned()); }; } if let Some(ref region) = self.region { if let Some(&(_, r)) = DEPRECATED_REGION.iter().find(|&&(x, _)| { x.eq_ignore_ascii_case(&region) }) { tag.region = Some(r.to_owned()); }; } tag.variants = self.variants .iter() .map(|variant| { if "heploc".eq_ignore_ascii_case(variant) { "alalc97".to_owned() } else { variant.clone() } }) .collect(); tag } } impl PartialEq for LanguageTag { fn eq(&self, other: &LanguageTag) -> bool { fn eq_option(a: &Option<String>, b: &Option<String>) -> bool { match (a, b) { (&Some(ref a), &Some(ref b)) => a.eq_ignore_ascii_case(b), (&None, &None) => true, _ => false, } } fn eq_vec(a: &[String], b: &[String]) -> bool { a.len() == b.len() && a.iter().zip(b.iter()).all(|(x, y)| x.eq_ignore_ascii_case(y)) } eq_option(&self.language, &other.language) && eq_vec(&self.extlangs, &other.extlangs) && eq_option(&self.script, &other.script) && eq_option(&self.region, &other.region) && eq_vec(&self.variants, &other.variants) && BTreeSet::from_iter(&self.extensions) == BTreeSet::from_iter(&other.extensions) && self.extensions.keys().all(|a| eq_vec(&self.extensions[a], &other.extensions[a])) && eq_vec(&self.privateuse, &other.privateuse) } } /// Handles normal tags. /// The parser has a position from 0 to 6. Bigger positions reepresent the ASCII codes of /// single character extensions /// language-extlangs-script-region-variant-extension-privateuse /// --- 0 -- -- 1 -- -- 2 - -- 3 - -- 4 -- --- x --- ---- 6 --- fn parse_language_tag(langtag: &mut LanguageTag, t: &str) -> Result<u8> { let mut position: u8 = 0; for subtag in t.split('-') { if subtag.len() > 8 { // All subtags have a maximum length of eight characters. return Err(Error::SubtagTooLong); } if position == 6 { langtag.privateuse.push(subtag.to_owned()); } else if subtag.eq_ignore_ascii_case("x") { position = 6; } else if position == 0 { // Primary language if subtag.len() < 2 || !is_alphabetic(subtag) { return Err(Error::InvalidLanguage); } langtag.language = Some(subtag.to_owned()); if subtag.len() < 4 { // extlangs are only allowed for short language tags position = 1; } else { position = 2; } } else if position == 1 && subtag.len() == 3 && is_alphabetic(subtag) { // extlangs langtag.extlangs.push(subtag.to_owned()); } else if position <= 2 && subtag.len() == 4 && is_alphabetic(subtag) { // Script langtag.script = Some(subtag.to_owned()); position = 3; } else if position <= 3 && (subtag.len() == 2 && is_alphabetic(subtag) || subtag.len() == 3 && is_numeric(subtag)) { langtag.region = Some(subtag.to_owned()); position = 4; } else if position <= 4 && (subtag.len() >= 5 && is_alphabetic(&subtag[0..1]) || subtag.len() >= 4 && is_numeric(&subtag[0..1])) { // Variant langtag.variants.push(subtag.to_owned()); position = 4; } else if subtag.len() == 1 { position = subtag.as_bytes()[0] as u8; if langtag.extensions.contains_key(&position) { return Err(Error::DuplicateExtension); } langtag.extensions.insert(position, Vec::new()); } else if position > 6 { langtag.extensions .get_mut(&position) .expect("no entry found for key") .push(subtag.to_owned()); } else { return Err(Error::InvalidSubtag); } } Ok(position) } impl std::str::FromStr for LanguageTag { type Err = Error; fn from_str(s: &str) -> Result<Self> { let t = s.trim(); if !is_alphanumeric_or_dash(t) { return Err(Error::ForbiddenChar); } let mut langtag: LanguageTag = Default::default(); // Handle grandfathered tags if let Some(&(tag, _)) = GRANDFATHERED.iter().find(|&&(x, _)| x.eq_ignore_ascii_case(t)) { langtag.language = Some((*tag).to_owned()); return Ok(langtag); } let position = try!(parse_language_tag(&mut langtag, t)); if langtag.extensions.values().any(|x| x.is_empty()) { // Extensions and privateuse must not be empty if present return Err(Error::EmptyExtension); } if position == 6 && langtag.privateuse.is_empty() { return Err(Error::EmptyPrivateUse); } if langtag.extlangs.len() > 2 { // maximum 3 extlangs return Err(Error::TooManyExtlangs); } Ok(langtag) } } impl fmt::Display for LanguageTag { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn cmp_ignore_ascii_case(a: &u8, b: &u8) -> Ordering { fn byte_to_uppercase(x: u8) -> u8 { if x > 96 { x - 32 } else { x } } let x: u8 = byte_to_uppercase(*a); let y: u8 = byte_to_uppercase(*b); x.cmp(&y) } if let Some(ref x) = self.language { try!(Display::fmt(&x.to_ascii_lowercase()[..], f)) } for x in &self.extlangs { try!(write!(f, "-{}", x.to_ascii_lowercase())); } if let Some(ref x) = self.script { let y: String = x.chars() .enumerate() .map(|(i, c)| { if i == 0 { c.to_ascii_uppercase() } else { c.to_ascii_lowercase() } }) .collect(); try!(write!(f, "-{}", y)); } if let Some(ref x) = self.region { try!(write!(f, "-{}", x.to_ascii_uppercase())); } for x in &self.variants { try!(write!(f, "-{}", x.to_ascii_lowercase())); } let mut extensions: Vec<(&u8, &Vec<String>)> = self.extensions.iter().collect(); extensions.sort_by(|&(a, _), &(b, _)| cmp_ignore_ascii_case(a, b)); for (raw_key, values) in extensions { let mut key = String::new(); key.push(*raw_key as char); try!(write!(f, "-{}", key)); for value in values { try!(write!(f, "-{}", value)); } } if !self.privateuse.is_empty() { if self.language.is_none() { try!(f.write_str("x")); } else { try!(f.write_str("-x")); } for value in &self.privateuse { try!(write!(f, "-{}", value)); } } Ok(()) } } #[macro_export] /// Utility for creating simple language tags. /// /// The macro supports the language, exlang, script and region parts of language tags, /// they are separated by semicolons, omitted parts are denoted with mulitple semicolons. /// /// # Examples /// * `it`: `langtag!(it)` /// * `it-LY`: `langtag!(it;;;LY)` /// * `it-Arab-LY`: `langtag!(it;;Arab;LY)` /// * `ar-afb`: `langtag!(ar;afb)` /// * `i-enochian`: `langtag!(i-enochian)` macro_rules! langtag { ( $language:expr ) => { $crate::LanguageTag { language: Some(stringify!($language).to_owned()), extlangs: Vec::new(), script: None, region: None, variants: Vec::new(), extensions: ::std::collections::BTreeMap::new(), privateuse: Vec::new(), } }; ( $language:expr;;;$region:expr ) => { $crate::LanguageTag { language: Some(stringify!($language).to_owned()), extlangs: Vec::new(), script: None, region: Some(stringify!($region).to_owned()), variants: Vec::new(), extensions: ::std::collections::BTreeMap::new(), privateuse: Vec::new(), } }; ( $language:expr;;$script:expr ) => { $crate::LanguageTag { language: Some(stringify!($language).to_owned()), extlangs: Vec::new(), script: Some(stringify!($script).to_owned()), region: None, variants: Vec::new(), extensions: ::std::collections::BTreeMap::new(), privateuse: Vec::new(), } }; ( $language:expr;;$script:expr;$region:expr ) => { $crate::LanguageTag { language: Some(stringify!($language).to_owned()), extlangs: Vec::new(), script: Some(stringify!($script).to_owned()), region: Some(stringify!($region).to_owned()), variants: Vec::new(), extensions: ::std::collections::BTreeMap::new(), privateuse: Vec::new(), } }; ( $language:expr;$extlangs:expr) => { $crate::LanguageTag { language: Some(stringify!($language).to_owned()), extlangs: vec![stringify!($extlangs).to_owned()], script: None, region: None, variants: Vec::new(), extensions: ::std::collections::BTreeMap::new(), privateuse: Vec::new(), } }; ( $language:expr;$extlangs:expr;$script:expr) => { $crate::LanguageTag { language: Some(stringify!($language).to_owned()), extlangs: vec![stringify!($extlangs).to_owned()], script: Some(stringify!($script).to_owned()), region: None, variants: Vec::new(), extensions: ::std::collections::BTreeMap::new(), privateuse: Vec::new(), } }; ( $language:expr;$extlangs:expr;;$region:expr ) => { $crate::LanguageTag { language: Some(stringify!($language).to_owned()), extlangs: vec![stringify!($extlangs).to_owned()], script: None, region: Some(stringify!($region).to_owned()), variants: Vec::new(), extensions: ::std::collections::BTreeMap::new(), privateuse: Vec::new(), } }; ( $language:expr;$extlangs:expr;$script:expr;$region:expr ) => { $crate::LanguageTag { language: Some(stringify!($language).to_owned()), extlangs: vec![stringify!($extlangs).to_owned()], script: Some(stringify!($script).to_owned()), region: Some(stringify!($region).to_owned()), variants: Vec::new(), extensions: ::std::collections::BTreeMap::new(), privateuse: Vec::new(), } }; }
46.211747
100
0.453126
c1c738e3bae5c34b7b3964a2ede8a33ed1ddd0be
1,223
use anyhow::Result; use log::info; use s3::bucket::Bucket; use s3::creds::Credentials; use s3::Region::Custom; pub struct Client { bucket: Bucket, } impl Client { pub fn new(endpoint: &str, access_key: &str, secret_key: &str) -> Result<Client> { let region = Custom { endpoint: endpoint.to_string(), region: "".to_string(), }; let credential = Credentials { access_key: Some(access_key.to_string()), secret_key: Some(secret_key.to_string()), security_token: None, session_token: None, }; let bucket = Bucket::new_with_path_style("default", region, credential)?; Ok(Client { bucket }) } pub fn get_object(&self, path: &str) -> Result<Vec<u8>> { info!("get object; path {}", path); let (result, code) = self.bucket.get_object_blocking(path)?; info!("get object; code {}", code); Ok(result) } pub fn put_object(&self, path: &str, data: &[u8]) -> Result<()> { info!("put object; path {}", path); let (_result, code) = self.bucket.put_object_blocking(path, data)?; info!("put object; code {}", code); Ok(()) } }
29.829268
86
0.565004
22818e611564aee0157604ccfe7d63856c384f6f
58,681
use crate::*; use std::cell::RefCell; static BALL_DATA: &'static [u8] = include_bytes!("../../test_rc/ball.bmp"); #[derive(Default)] #[allow(dead_code)] pub struct TestRun { window: bool, button: bool, check: bool, combo: bool, date: bool, font: bool, list: bool, menu: bool, radio: bool, text: bool, progress: bool, track: bool, tooltip: bool, status: bool, } #[derive(Default)] pub struct ControlsTest { // data runs: RefCell<TestRun>, // Resources window_icon: Icon, love_icon: Icon, love_small_icon: Icon, ferris: Bitmap, popcorn: Bitmap, popcorn_small: Bitmap, ball: Bitmap, arial_font: Font, segoe_font: Font, wait_cursor: Cursor, tabs_image_list: ImageList, // Dialogs open_file_dialog: FileDialog, open_directory_dialog: FileDialog, save_file_dialog: FileDialog, color_dialog: ColorDialog, font_dialog: FontDialog, // Layouts dialog_tab_layout: GridLayout, tree_tab_layout: GridLayout, list_view_tab_layout: GridLayout, panel_layout: GridLayout, tab_container_layout: FlexboxLayout, // Control window pub window: Window, tray_icon: TrayNotification, tray_icon_2: TrayNotification, status: StatusBar, // First Tab controls_holder: TabsContainer, basics_control_tab: Tab, basics_control_tab2: Tab, dialog_tab: Tab, tree_tab: Tab, list_view_tab: Tab, test_button: Button, test_checkbox1: CheckBox, test_checkbox2: CheckBox, test_combo: ComboBox<&'static str>, test_date: DatePicker, test_img_frame: ImageFrame, test_label: Label, test_list_box1: ListBox<&'static str>, test_list_box2: ListBox<&'static str>, test_radio1: RadioButton, test_radio2: RadioButton, test_radio3: RadioButton, test_radio4: RadioButton, test_text_input: TextInput, test_text_box: TextBox, test_progress1: ProgressBar, test_progress2: ProgressBar, test_track1: TrackBar, test_track2: TrackBar, // Second Tab test_image_button: Button, test_image_button2: Button, test_image_button3: Button, test_button_checkbox: CheckBox, test_number_select: NumberSelect, test_rich: RichTextBox, test_scroll_h: ScrollBar, test_scroll_v: ScrollBar, test_maximize: Button, test_minimize: Button, test_restore: Button, // Third Tab test_open_file_button: Button, test_open_directory_button: Button, test_save_file_button: Button, file_dialog_result: TextBox, test_select_color_button: Button, test_color_output: TextInput, test_select_font_button: Button, test_font_output: TextInput, // Fourth Tab test_tree: TreeView, test_tree_input: TextInput, test_tree_add: Button, test_tree_remove: Button, // Fifth Tab test_list_view: ListView, // Tooltip test_ttp1: Tooltip, test_ttp2: Tooltip, test_ttp3: Tooltip, // Menu window_menu: Menu, window_submenu1: Menu, window_menu_sep: MenuSeparator, window_menu_item1: MenuItem, window_menu_item2: MenuItem, window_menu_item3: MenuItem, pop_menu: Menu, pop_menu_item1: MenuItem, pop_menu_item2: MenuItem, // Control panel pub panel: Window, run_window_test: Button, run_button_test: Button, run_check_box_test: Button, run_combo_test: Button, run_date_test: Button, run_font_test: Button, run_list_test: Button, run_menu_test: Button, run_radio_test: Button, run_text_test: Button, run_progress_test: Button, run_track_test: Button, run_tooltip_test: Button, run_status_test: Button, run_tray_test: Button, } mod partial_controls_test_ui { use super::*; use crate::{PartialUi, NwgError, ControlHandle}; impl PartialUi for ControlsTest { fn build_partial<W: Into<ControlHandle>>(data: &mut ControlsTest, _parent: Option<W>) -> Result<(), NwgError> { // // Resources // Icon::builder() .source_file(Some("./test_rc/cog.ico")) .build(&mut data.window_icon)?; Icon::builder() .source_file(Some("./test_rc/love.ico")) .build(&mut data.love_icon)?; Icon::builder() .source_file(Some("./test_rc/love.ico")) .size(Some((25, 25))) .build(&mut data.love_small_icon)?; Bitmap::builder() .source_file(Some("./test_rc/ferris.bmp")) .build(&mut data.ferris)?; Bitmap::builder() .source_bin(Some(BALL_DATA)) .build(&mut data.ball)?; Bitmap::builder() .source_file(Some("./test_rc/popcorn.bmp")) .size(Some((80, 80))) .build(&mut data.popcorn)?; Bitmap::builder() .source_file(Some("./test_rc/popcorn.bmp")) .size(Some((25, 25))) .build(&mut data.popcorn_small)?; Cursor::builder() .source_system(Some(OemCursor::Wait)) .build(&mut data.wait_cursor)?; ImageList::builder() .size((16, 16)) .build(&mut data.tabs_image_list)?; data.tabs_image_list.add_bitmap_from_filename("./test_rc/list_0.png")?; data.tabs_image_list.add_bitmap_from_filename("./test_rc/list_1.png")?; data.tabs_image_list.add_bitmap_from_filename("./test_rc/list_2.png")?; data.tabs_image_list.add_bitmap_from_filename("./test_rc/list_3.png")?; let dir = ::std::env::current_dir().unwrap(); FileDialog::builder() .action(FileDialogAction::Open) .multiselect(true) .title("Open a file") .default_folder(dir.to_str().unwrap()) .build(&mut data.open_file_dialog)?; FileDialog::builder() .action(FileDialogAction::OpenDirectory) .title("Open a directory") .build(&mut data.open_directory_dialog)?; FileDialog::builder() .action(FileDialogAction::Save) .title("Save a file") .filename("abcde.txt") .filters("Text(*.txt)|Any(*.*)") .build(&mut data.save_file_dialog)?; ColorDialog::builder() .build(&mut data.color_dialog)?; FontDialog::builder() .build(&mut data.font_dialog)?; Font::builder() .size(20) .family("Arial") .build(&mut data.arial_font)?; Font::builder() .size(23) .family("Segoe UI") .build(&mut data.segoe_font)?; // // Controls holder // Window::builder() .flags(WindowFlags::MAIN_WINDOW) .size((480, 450)) .position((100, 100)) .title("Controls") .icon(Some(&data.window_icon)) .build(&mut data.window)?; TrayNotification::builder() .parent(&data.window) .icon(Some(&data.window_icon)) .tip(Some("Native Windows GUI tests")) .build(&mut data.tray_icon)?; StatusBar::builder() .text("Ready for tests ;)") .parent(&data.window) .build(&mut data.status)?; TabsContainer::builder() .parent(&data.window) .image_list(Some(&data.tabs_image_list)) .build(&mut data.controls_holder)?; Tab::builder() .text("Basic") .parent(&data.controls_holder) .image_index(Some(0)) .build(&mut data.basics_control_tab)?; Tab::builder() .text("Basic 2") .parent(&data.controls_holder) .image_index(Some(1)) .build(&mut data.basics_control_tab2)?; Tab::builder() .text("Dialog") .parent(&data.controls_holder) .image_index(Some(2)) .build(&mut data.dialog_tab)?; Tab::builder() .text("Tree view") .parent(&data.controls_holder) .image_index(Some(3)) .build(&mut data.tree_tab)?; Tab::builder() .text("List view") .parent(&data.controls_holder) .image_index(Some(3)) .build(&mut data.list_view_tab)?; Button::builder() .text("A simple button") .position((10, 10)) .size((130, 30)) .parent(&data.basics_control_tab) .build(&mut data.test_button)?; CheckBox::builder() .text("I like bacon") .position((10, 50)) .size((130, 30)) .background_color(Some([255, 255, 255])) .parent(&data.basics_control_tab) .build(&mut data.test_checkbox1)?; CheckBox::builder() .flags(CheckBoxFlags::VISIBLE | CheckBoxFlags::TRISTATE) .text("Three state") .position((10, 80)) .size((130, 30)) .background_color(Some([255, 255, 255])) .parent(&data.basics_control_tab) .build(&mut data.test_checkbox2)?; ComboBox::builder() .position((10, 120)) .size((130, 30)) .collection(vec!["Chocolate", "Strawberry", "Blueberry"]) .selected_index(Some(0)) .parent(&data.basics_control_tab) .build(&mut data.test_combo)?; DatePicker::builder() .position((10, 160)) .size((130, 30)) .parent(&data.basics_control_tab) .build(&mut data.test_date)?; Label::builder() .text("A label\r\nSecond line") .position((10, 200)) .size((130, 50)) .background_color(Some([255, 255, 255])) .parent(&data.basics_control_tab) .build(&mut data.test_label)?; ListBox::builder() .position((10, 260)) .size((130, 100)) .parent(&data.basics_control_tab) .collection(vec!["Red", "White", "Green", "Yellow"]) .selected_index(Some(1)) .build(&mut data.test_list_box1)?; ListBox::builder() .flags(ListBoxFlags::VISIBLE | ListBoxFlags::MULTI_SELECT) .position((150, 10)) .size((130, 100)) .parent(&data.basics_control_tab) .collection(vec!["Cat", "Dog", "Parrot", "Horse", "Ogre"]) .multi_selection(vec![0, 2, 3]) .build(&mut data.test_list_box2)?; ImageFrame::builder() .position((150, 110)) .size((130, 99)) .parent(&data.basics_control_tab) .bitmap(Some(&data.ferris)) .background_color(Some([255,255,255])) .build(&mut data.test_img_frame)?; RadioButton::builder() .flags(RadioButtonFlags::GROUP | RadioButtonFlags::VISIBLE) .text("Cats") .position((150, 220)) .size((130, 25)) .background_color(Some([255, 255, 255])) .parent(&data.basics_control_tab) .build(&mut data.test_radio1)?; RadioButton::builder() .text("Dogs") .position((150, 245)) .size((130, 25)) .background_color(Some([255, 255, 255])) .parent(&data.basics_control_tab) .build(&mut data.test_radio2)?; RadioButton::builder() .flags(RadioButtonFlags::GROUP | RadioButtonFlags::VISIBLE) .text("Energy drink") .position((150, 280)) .size((130, 25)) .background_color(Some([255, 255, 255])) .parent(&data.basics_control_tab) .build(&mut data.test_radio3)?; RadioButton::builder() .text("Chocolate") .position((150, 305)) .size((130, 25)) .background_color(Some([255, 255, 255])) .parent(&data.basics_control_tab) .build(&mut data.test_radio4)?; TextInput::builder() .text("Hello World!") .position((290, 10)) .size((150, 25)) .parent(&data.basics_control_tab) .build(&mut data.test_text_input)?; TextBox::builder() .text("Multi\r\nLine\r\nText") .flags(TextBoxFlags::VISIBLE | TextBoxFlags::AUTOVSCROLL | TextBoxFlags::AUTOHSCROLL | TextBoxFlags::TAB_STOP) .position((290, 40)) .size((150, 100)) .parent(&data.basics_control_tab) .build(&mut data.test_text_box)?; ProgressBar::builder() .position((290, 150)) .size((150, 30)) .parent(&data.basics_control_tab) .build(&mut data.test_progress1)?; ProgressBar::builder() .flags(ProgressBarFlags::VISIBLE | ProgressBarFlags::VERTICAL | ProgressBarFlags::MARQUEE) .position((340, 220)) .size((30, 110)) .range(0..100) .pos(50) .marquee(true) .marquee_update(0) .parent(&data.basics_control_tab) .build(&mut data.test_progress2)?; TrackBar::builder() .position((290, 190)) .size((150, 20)) .parent(&data.basics_control_tab) .background_color(Some([255, 255, 255])) .build(&mut data.test_track1)?; TrackBar::builder() .flags(TrackBarFlags::VISIBLE | TrackBarFlags::RANGE | TrackBarFlags::VERTICAL | TrackBarFlags::AUTO_TICK) .position((290, 220)) .size((40, 110)) .background_color(Some([255, 255, 255])) .parent(&data.basics_control_tab) .build(&mut data.test_track2)?; // // Control tab 2 // Button::builder() .flags(ButtonFlags::VISIBLE | ButtonFlags::BITMAP) .position((10, 10)) .size((90, 90)) .bitmap(Some(&data.popcorn)) .parent(&data.basics_control_tab2) .build(&mut data.test_image_button)?; Button::builder() .position((110, 10)) .size((140, 40)) .icon(Some(&data.love_small_icon)) .parent(&data.basics_control_tab2) .build(&mut data.test_image_button2)?; Button::builder() .position((110, 55)) .size((140, 40)) .bitmap(Some(&data.ball)) .parent(&data.basics_control_tab2) .build(&mut data.test_image_button3)?; CheckBox::builder() .flags(CheckBoxFlags::VISIBLE | CheckBoxFlags::PUSHLIKE) .text("A check box button") .position((260, 10)) .size((140, 40)) .parent(&data.basics_control_tab2) .build(&mut data.test_button_checkbox)?; NumberSelect::builder() .value_float(10.50) .step_float(0.5) .decimals(2) .position((10, 110)) .size((140, 25)) .parent(&data.basics_control_tab2) .build(&mut data.test_number_select)?; RichTextBox::builder() .text("That's a rich text box!") .position((10, 140)) .size((200, 200)) .parent(&data.basics_control_tab2) .font(Some(&data.segoe_font)) .build(&mut data.test_rich)?; ScrollBar::builder() .position((220,140)) .size((20, 200)) .parent(&data.basics_control_tab2) .range(Some(0..100)) .pos(Some(30)) .build(&mut data.test_scroll_v)?; ScrollBar::builder() .position((160, 110)) .size((90, 20)) .range(Some(0..10)) .flags(ScrollBarFlags::VISIBLE | ScrollBarFlags::HORIZONTAL) .parent(&data.basics_control_tab2) .build(&mut data.test_scroll_h)?; Button::builder() .text("Maximize") .position((260, 55)) .size((140, 40)) .parent(&data.basics_control_tab2) .build(&mut data.test_maximize)?; Button::builder() .text("Minimize") .position((260, 100)) .size((140, 40)) .parent(&data.basics_control_tab2) .build(&mut data.test_minimize)?; Button::builder() .text("Restore") .position((260, 145)) .size((140, 40)) .parent(&data.basics_control_tab2) .build(&mut data.test_restore)?; // // Dialogs // Button::builder() .text("Open file") .parent(&data.dialog_tab) .enabled(cfg!(feature="file-dialog")) .build(&mut data.test_open_file_button)?; Button::builder() .text("Open directory") .parent(&data.dialog_tab) .enabled(cfg!(feature="file-dialog")) .build(&mut data.test_open_directory_button)?; Button::builder() .text("Save file") .parent(&data.dialog_tab) .enabled(cfg!(feature="file-dialog")) .build(&mut data.test_save_file_button)?; TextBox::builder() .parent(&data.dialog_tab) .flags(TextBoxFlags::VISIBLE | TextBoxFlags::AUTOVSCROLL | TextBoxFlags::AUTOHSCROLL | TextBoxFlags::TAB_STOP) .build(&mut data.file_dialog_result)?; Button::builder() .text("Select a color") .parent(&data.dialog_tab) .enabled(cfg!(feature="color-dialog")) .build(&mut data.test_select_color_button)?; TextInput::builder() .parent(&data.dialog_tab) .placeholder_text(Some("The color will go here")) .background_color(Some([255, 255, 255])) .build(&mut data.test_color_output)?; Button::builder() .text("Select a font") .parent(&data.dialog_tab) .enabled(cfg!(feature="font-dialog")) .build(&mut data.test_select_font_button)?; TextInput::builder() .parent(&data.dialog_tab) .placeholder_text(Some("The font will go here")) .background_color(Some([255, 255, 255])) .build(&mut data.test_font_output)?; // // Treeview // TreeView::builder() .parent(&data.tree_tab) .build(&mut data.test_tree)?; TextInput::builder() .text("New Item") .background_color(Some([255, 255, 255])) .parent(&data.tree_tab) .build(&mut data.test_tree_input)?; Button::builder() .text("Add item") .parent(&data.tree_tab) .build(&mut data.test_tree_add)?; Button::builder() .text("Remove item") .parent(&data.tree_tab) .build(&mut data.test_tree_remove)?; // // Listview // ListView::builder() .parent(&data.list_view_tab) .ex_flags(ListViewExFlags::GRID | ListViewExFlags::FULL_ROW_SELECT) .list_style(ListViewStyle::Detailed) .build(&mut data.test_list_view)?; // // Tooltip // Tooltip::builder() .register(&data.test_button, "A test button") .register(&data.test_date, "A test date picker") .register(&data.test_combo, "A test combobox") .register_callback(&data.window) .register_callback(&data.test_text_input) .build(&mut data.test_ttp1)?; Tooltip::builder() .decoration(Some("Tooltip title (fancy)"), Some(&data.window_icon)) .register(&data.test_img_frame, "Hello rust!") .build(&mut data.test_ttp2)?; Tooltip::builder() .default_decoration(Some("More info"), Some(TooltipIcon::InfoLarge)) .register(&data.test_list_box1, "Simple list") .register(&data.test_list_box2, "Multi select list") .build(&mut data.test_ttp3)?; // // Menu // Menu::builder() .text("&Test menu") .parent(&data.window) .build(&mut data.window_menu)?; Menu::builder() .text("Test &Submenu") .parent(&data.window_menu) .build(&mut data.window_submenu1)?; MenuSeparator::builder() .parent(&data.window_menu) .build(&mut data.window_menu_sep)?; MenuItem::builder() .text("Test item 1") .parent(&data.window_menu) .build(&mut data.window_menu_item1)?; MenuItem::builder() .text("Test item 2") .check(true) .parent(&data.window_submenu1) .build(&mut data.window_menu_item2)?; MenuItem::builder() .text("Test item 3") .parent(&data.window) .build(&mut data.window_menu_item3)?; Menu::builder() .popup(true) .parent(&data.window) .build(&mut data.pop_menu)?; MenuItem::builder() .text("Popup item 1\tCTRL+P") .parent(&data.pop_menu) .build(&mut data.pop_menu_item1)?; MenuItem::builder() .text("Popup item 2") .parent(&data.pop_menu) .build(&mut data.pop_menu_item2)?; // // Run tests // Window::builder() .flags(WindowFlags::WINDOW) .size((300, 360)) .position((650, 100)) .title("Action panel") .icon(Some(&data.window_icon)) .parent(Some(&data.window)) .build(&mut data.panel)?; TrayNotification::builder() .parent(&data.panel) .flags(TrayNotificationFlags::SILENT | TrayNotificationFlags::USER_ICON | TrayNotificationFlags::LARGE_ICON) .icon(Some(&data.love_icon)) .balloon_icon(Some(&data.love_icon)) .info(Some("Tray notification by NWG")) .info_title(Some("Native Windows GUI tests")) .tip(Some("Hello!")) .build(&mut data.tray_icon_2)?; Button::builder() .text("Run window test") .parent(&data.panel) .build(&mut data.run_window_test)?; Button::builder() .text("Run button test") .parent(&data.panel) .build(&mut data.run_button_test)?; Button::builder() .text("Run checkbox test") .parent(&data.panel) .build(&mut data.run_check_box_test)?; Button::builder() .text("Run combo test") .parent(&data.panel) .build(&mut data.run_combo_test)?; Button::builder() .text("Run date test") .parent(&data.panel) .build(&mut data.run_date_test)?; Button::builder() .text("Run font test") .parent(&data.panel) .build(&mut data.run_font_test)?; Button::builder() .text("Run list test") .parent(&data.panel) .build(&mut data.run_list_test)?; Button::builder() .text("Run menu test") .parent(&data.panel) .build(&mut data.run_menu_test)?; Button::builder() .text("Run radio test") .parent(&data.panel) .build(&mut data.run_radio_test)?; Button::builder() .text("Run text test") .parent(&data.panel) .build(&mut data.run_text_test)?; Button::builder() .text("Run progress test") .parent(&data.panel) .build(&mut data.run_progress_test)?; Button::builder() .text("Run track test") .parent(&data.panel) .build(&mut data.run_track_test)?; Button::builder() .text("Run tooltip test") .parent(&data.panel) .build(&mut data.run_tooltip_test)?; Button::builder() .text("Run status test") .parent(&data.panel) .build(&mut data.run_status_test)?; Button::builder() .text("Run tray test") .parent(&data.panel) .build(&mut data.run_tray_test)?; // // Layout // use stretch::style::Dimension as D; use stretch::geometry::Rect; FlexboxLayout::builder() .parent(&data.window) .border(Rect { start: D::Points(2.0), end: D::Points(2.0), top: D::Points(2.0), bottom: D::Points(20.0) } ) .child(&data.controls_holder) .build(&data.tab_container_layout)?; GridLayout::builder() .parent(&data.panel) .spacing(1) .max_row(Some(8)) .child(0, 0, &data.run_window_test) .child(1, 0, &data.run_button_test) .child(0, 1, &data.run_check_box_test) .child(1, 1, &data.run_combo_test) .child(0, 2, &data.run_date_test) .child(1, 2, &data.run_font_test) .child(0, 3, &data.run_list_test) .child(1, 3, &data.run_menu_test) .child(0, 4, &data.run_radio_test) .child(1, 4, &data.run_text_test) .child(0, 5, &data.run_progress_test) .child(1, 5, &data.run_track_test) .child(0, 6, &data.run_tooltip_test) .child(1, 6, &data.run_status_test) .child(0, 7, &data.run_tray_test) .build(&data.panel_layout)?; GridLayout::builder() .parent(&data.dialog_tab) .min_size([400, 150]) .max_size([u32::max_value(), 200]) .child(0, 0, &data.test_open_file_button) .child(1, 0, &data.test_open_directory_button) .child(2, 0, &data.test_save_file_button) .child_item(GridLayoutItem::new(&data.file_dialog_result, 0, 1, 3, 1)) .child(0, 2, &data.test_select_color_button) .child_item(GridLayoutItem::new(&data.test_color_output, 1, 2, 2, 1)) .child(0, 3, &data.test_select_font_button) .child_item(GridLayoutItem::new(&data.test_font_output, 1, 3, 2, 1)) .build(&data.dialog_tab_layout)?; GridLayout::builder() .parent(&data.tree_tab) .min_size([400, 220]) .child_item(GridLayoutItem::new(&data.test_tree, 0, 0, 1, 7)) .child(1, 0, &data.test_tree_input) .child(1, 1, &data.test_tree_add) .child(1, 2, &data.test_tree_remove) .build(&data.tree_tab_layout)?; GridLayout::builder() .parent(&data.list_view_tab) .min_size([400, 220]) .child_item(GridLayoutItem::new(&data.test_list_view, 0, 0, 1, 7)) .build(&data.list_view_tab_layout)?; Ok(()) } fn process_event<'a>(&self, evt: Event, _evt_data: &EventData, handle: ControlHandle) { use crate::Event as E; match evt { E::OnInit => if &handle == &self.window { init_tree(self); init_list_view(self); init_rich_text_box(self); }, E::OnWindowClose => if &handle == &self.window { self.panel.set_visible(false); }, E::OnButtonClick => if &handle == &self.run_window_test { run_window_tests(self, evt); } else if &handle == &self.run_button_test { run_button_tests(self, evt); } else if &handle == &self.run_check_box_test { run_check_box_tests(self, evt); } else if &handle == &self.run_combo_test { run_combo_tests(self, evt); } else if &handle == &self.run_date_test { run_date_tests(self, evt); } else if &handle == &self.run_font_test { run_font_tests(self, evt); } else if &handle == &self.run_list_test { run_list_tests(self, evt); } else if &handle == &self.run_menu_test { run_menu_tests(self, evt); } else if &handle == &self.run_radio_test { run_radio_tests(self, evt); } else if &handle == &self.run_text_test { run_text_tests(self, evt); } else if &handle == &self.run_progress_test { run_progress_tests(self, evt); } else if &handle == &self.run_track_test { run_track_tests(self, evt); } else if &handle == &self.run_tooltip_test { run_tooltip_tests(self, evt); } else if &handle == &self.run_status_test { run_status_tests(self, evt); } else if &handle == &self.test_open_file_button { open_file(self, evt); } else if &handle == &self.test_open_directory_button { open_directory(self, evt); } else if &handle == &self.test_save_file_button { save_file(self, evt); } else if &handle == &self.test_tree_add { tree_tests(self, &self.test_tree_add.handle); } else if &handle == &self.test_tree_remove { tree_tests(self, &self.test_tree_remove.handle); } else if &handle == &self.test_select_color_button { color_select(self); } else if &handle == &self.test_select_font_button { font_select(self); } else if &handle == &self.run_tray_test { run_tray_tests(self); } else if &handle == &self.test_maximize { self.window.maximize(); } else if &handle == &self.test_minimize { self.window.minimize(); } else if &handle == &self.test_restore { self.window.restore(); }, E::OnContextMenu => if &handle == &self.window { show_pop_menu(self, evt); } else if &handle == &self.basics_control_tab { show_pop_menu(self, evt); } else if &handle == &self.tray_icon_2 { show_pop_menu(self, evt); }, E::OnTooltipText => if &handle == &self.window { set_tooltip_dynamic(self, &self.window.handle, _evt_data.on_tooltip_text()); } else if &handle == &self.test_text_input { set_tooltip_dynamic(self, &self.test_text_input.handle, _evt_data.on_tooltip_text()); }, E::OnMenuItemSelected => if &handle == &self.window_menu_item1 { item_hello("menu item"); } else if &handle == &self.pop_menu_item1 { item_hello("popup menu item"); }, E::OnChar => { if &handle == &self.test_rich { print_char(_evt_data); } }, E::OnListViewColumnClick => { if &handle == &self.test_list_view { set_lv_sort(&self.test_list_view, _evt_data); } }, _ => {} } } fn handles(&self) -> Vec<&ControlHandle> { vec![&self.window.handle, &self.panel.handle] } } } fn item_hello(m: &'static str) { simple_message("Hello", &format!("Hello from {}!", m)); } fn init_rich_text_box(app: &ControlsTest) { let rich = &app.test_rich; rich.set_selection(0..1000); rich.set_char_format(&CharFormat { effects: Some(CharEffects::BOLD), height: Some(250), text_color: Some([200, 0, 0]), ..Default::default() }) } fn init_tree(app: &ControlsTest) { let tree = &app.test_tree; let item = tree.insert_item("Hello", None, TreeInsert::Root); let view = tree.insert_item("A tree View", Some(&item), TreeInsert::First); tree.insert_item("AHHHHHHH", Some(&view), TreeInsert::First); tree.insert_item("Items", Some(&item), TreeInsert::First); let other = tree.insert_item("Another root children", Some(&item), TreeInsert::Last); tree.insert_item("Banana", Some(&other), TreeInsert::First); tree.insert_item("Pinapple", Some(&other), TreeInsert::First); } fn init_list_view(app: &ControlsTest) { let list = &app.test_list_view; for &column in &["Name", "Price", "Quantity"] { list.insert_column(column); } list.set_headers_enabled(true); list.set_column_sort_arrow(1, Some(ListViewColumnSortArrow::Down)); let data: &[&[&str]] = &[ // &["Name", "Price (USD $)", "Quantity"], &["Banana", "10.0", "1000"], &["Apple", "2.0", "345"], &["Kiwi", "5.0", "194"], &["Oranges", "5.0", "15"], &["Lettuce", "1.0", "257"], ]; for d in data { list.insert_items_row(None, d); } } fn show_pop_menu(app: &ControlsTest, _evt: Event) { let (x, y) = GlobalCursor::position(); app.pop_menu.popup(x, y); } fn run_window_tests(app: &ControlsTest, _evt: Event) { if !app.runs.borrow().window { assert_eq!(&app.window.text(), "Controls"); app.window.set_text("Controls New title"); assert_eq!(&app.window.text(), "Controls New title"); assert_eq!(app.window.visible(), true); app.window.set_visible(false); assert_eq!(app.window.visible(), false); app.window.set_visible(true); assert_eq!(app.window.enabled(), true); app.window.set_enabled(false); assert_eq!(app.window.enabled(), false); app.window.set_enabled(true); app.window.set_position(100, 100); assert_eq!(app.window.position(), (100, 100)); app.window.set_size(500, 420); // The actual size return here might be less because it does not take account of the menubar // assert_eq!(app.window.size(), (500, 400)); app.runs.borrow_mut().window = true; } else { app.window.set_text("Controls"); app.runs.borrow_mut().window = false; } } fn run_button_tests(app: &ControlsTest, _evt: Event) { if !app.runs.borrow().button { assert_eq!(&app.test_button.text(), "A simple button"); app.test_button.set_text("New Text"); assert_eq!(&app.test_button.text(), "New Text"); assert_eq!(app.test_button.position(), (10, 10)); app.test_button.set_position(5, 5); assert_eq!(app.test_button.position(), (5, 5)); assert_eq!(app.test_button.size(), (130, 30)); app.test_button.set_size(120, 35); assert_eq!(app.test_button.size(), (120, 35)); if app.basics_control_tab.visible() { assert_eq!(app.test_button.visible(), true); app.test_button.set_visible(false); assert_eq!(app.test_button.visible(), false); app.test_button.set_visible(true); } app.test_button.set_focus(); assert_eq!(app.test_button.focus(), true); app.window.set_focus(); assert_eq!(app.test_button.focus(), false); assert_eq!(app.test_button.enabled(), true); app.test_button.set_enabled(false); assert_eq!(app.test_button.enabled(), false); let mut icon = None; let mut bitmap = None; app.test_image_button2.image(&mut bitmap, &mut icon); assert!(icon.is_some() && bitmap.is_none()); app.test_image_button2.set_bitmap(Some(&app.popcorn_small)); app.test_image_button2.image(&mut bitmap, &mut icon); assert!(icon.is_none() && bitmap.is_some()); app.test_image_button2.set_icon(None); app.test_image_button2.image(&mut bitmap, &mut icon); assert!(icon.is_none() && bitmap.is_none()); app.runs.borrow_mut().button = true; } else { app.test_button.set_text("A simple button"); app.test_button.set_position(10, 10); app.test_button.set_size(130, 30); app.test_button.set_enabled(true); app.test_image_button2.set_icon(Some(&app.love_small_icon)); app.runs.borrow_mut().button = false; } } fn run_check_box_tests(app: &ControlsTest, _evt: Event) { if !app.runs.borrow().check { assert_eq!(app.test_checkbox2.tristate(), true); assert_eq!(app.test_checkbox1.tristate(), false); app.test_checkbox1.set_tristate(true); assert_eq!(app.test_checkbox1.tristate(), true); app.test_checkbox1.set_check_state(CheckBoxState::Checked); assert_eq!(app.test_checkbox1.check_state(), CheckBoxState::Checked); app.test_checkbox1.set_check_state(CheckBoxState::Unchecked); assert_eq!(app.test_checkbox1.check_state(), CheckBoxState::Unchecked); app.test_checkbox1.set_check_state(CheckBoxState::Indeterminate); assert_eq!(app.test_checkbox1.check_state(), CheckBoxState::Indeterminate); app.runs.borrow_mut().check = true; } else { app.test_checkbox1.set_tristate(false); app.runs.borrow_mut().check = false; } } fn run_combo_tests(app: &ControlsTest, _evt: Event) { if !app.runs.borrow().combo { { let col = app.test_combo.collection(); assert_eq!(&col as &[&'static str], &["Chocolate", "Strawberry", "Blueberry"]); } { let mut col = app.test_combo.collection_mut(); col.push("Hello"); } app.test_combo.sync(); app.test_combo.push("World!"); assert_eq!(app.test_combo.len(), 5); app.test_combo.set_selection(None); assert_eq!(app.test_combo.selection(), None); assert_eq!(app.test_combo.selection_string(), None); app.test_combo.set_selection(Some(2)); assert_eq!(app.test_combo.selection(), Some(2)); assert_eq!(app.test_combo.selection_string(), Some("Blueberry".to_string())); assert_eq!(app.test_combo.set_selection_string("hel"), Some(3)); assert_eq!(app.test_combo.selection(), Some(3)); assert_eq!(app.test_combo.selection_string(), Some("Hello".to_string())); app.test_combo.sort(); assert_eq!(app.test_combo.set_selection_string("Blue"), Some(0)); app.test_combo.insert(1, "BOO!"); app.test_combo.insert(std::usize::MAX, "Ahoy!!"); assert_eq!(app.test_combo.set_selection_string("BOO!"), Some(1)); assert_eq!(app.test_combo.set_selection_string("Ahoy!!"), Some(6)); app.test_combo.remove(0); app.test_combo.dropdown(true); app.runs.borrow_mut().combo = true; } else { app.test_combo.set_collection(vec!["Chocolate", "Strawberry", "Blueberry"]); app.runs.borrow_mut().combo = false; } } fn run_date_tests(app: &ControlsTest, _evt: Event) { if !app.runs.borrow().date { let v = DatePickerValue { year: 2000, month: 10, day: 5 }; app.test_date.set_value(Some(v)); assert_eq!(app.test_date.value(), Some(v)); assert_eq!(app.test_date.checked(), true); app.test_date.set_value(None); assert_eq!(app.test_date.value(), None); assert_eq!(app.test_date.checked(), false); app.test_date.set_format(Some("'YEAR: 'yyyy")); let up = DatePickerValue { year: 2000, month: 1, day: 1 }; let down = DatePickerValue { year: 2001, month: 1, day: 1 }; app.test_date.set_range(&[up, down]); assert_eq!(app.test_date.range(), [up, down]); app.runs.borrow_mut().date = true; } else { app.test_date.set_format(None); let up = DatePickerValue { year: 1950, month: 1, day: 1 }; let down = DatePickerValue { year: 2020, month: 12, day: 30 }; app.test_date.set_range(&[up, down]); app.runs.borrow_mut().date = false; } } fn run_font_tests(app: &ControlsTest, _evt: Event) { if !app.runs.borrow().font { app.test_label.set_font(Some(&app.arial_font)); app.test_button.set_font(Some(&app.arial_font)); app.test_checkbox1.set_font(Some(&app.arial_font)); app.test_checkbox2.set_font(Some(&app.arial_font)); app.test_combo.set_font(Some(&app.arial_font)); app.test_date.set_font(Some(&app.arial_font)); app.test_date.set_font(Some(&app.arial_font)); app.test_list_box1.set_font(Some(&app.arial_font)); app.test_list_box2.set_font(Some(&app.arial_font)); app.controls_holder.set_font(Some(&app.arial_font)); app.test_text_input.set_font(Some(&app.arial_font)); app.test_text_box.set_font(Some(&app.arial_font)); app.test_tree.set_font(Some(&app.arial_font)); assert_eq!(app.test_label.font().as_ref(), Some(&app.arial_font)); app.runs.borrow_mut().font = true; } else { app.test_label.set_font(None); app.test_button.set_font(None); app.test_checkbox1.set_font(None); app.test_checkbox2.set_font(None); app.test_combo.set_font(None); app.test_date.set_font(None); app.test_list_box1.set_font(None); app.test_list_box2.set_font(None); app.controls_holder.set_font(None); app.test_tree.set_font(None); app.test_list_box1.set_size(130, 100); app.test_list_box2.set_size(130, 100); app.runs.borrow_mut().font = false; } } fn run_list_tests(app: &ControlsTest, _evt: Event) { if !app.runs.borrow().list { app.test_list_box2.unselect_all(); { let col = app.test_list_box1.collection(); assert_eq!(&col as &[&'static str], &["Red", "White", "Green", "Yellow"]); } { let mut col = app.test_list_box1.collection_mut(); col.push("Blue"); } app.test_list_box1.sync(); app.test_list_box1.push("Hello!"); assert_eq!(app.test_list_box1.len(), 6); app.test_list_box1.set_selection(Some(0)); assert_eq!(app.test_list_box1.selected(0), true); app.test_list_box1.set_selection(None); assert_eq!(app.test_list_box1.selected(0), false); assert_eq!(app.test_list_box1.selection(), None); assert_eq!(app.test_list_box1.selection_string(), None); app.test_list_box1.set_selection(Some(2)); assert_eq!(app.test_list_box1.selection(), Some(2)); assert_eq!(app.test_list_box1.selection_string(), Some("Green".to_string())); app.test_list_box1.insert(1, "BOO!"); app.test_list_box1.insert(std::usize::MAX, "Ahoy!!"); assert_eq!(app.test_list_box1.set_selection_string("BOO!"), Some(1)); assert_eq!(app.test_list_box1.set_selection_string("Ahoy!!"), Some(7)); app.test_list_box1.remove(0); app.test_list_box2.multi_add_selection(0); app.test_list_box2.multi_add_selection(2); app.test_list_box2.multi_add_selection(3); assert_eq!(app.test_list_box2.multi_selection_len(), 3); assert_eq!(app.test_list_box2.multi_selection(), vec![0, 2, 3]); app.test_list_box2.multi_remove_selection(0); assert_eq!(app.test_list_box2.multi_selection_len(), 2); assert_eq!(app.test_list_box2.multi_selection(), vec![2, 3]); app.test_list_box2.select_all(); assert_eq!(app.test_list_box2.multi_selection_len(), 5); assert_eq!(app.test_list_box2.multi_selection(), vec![0, 1, 2, 3, 4]); app.test_list_box2.unselect_all(); assert_eq!(app.test_list_box2.multi_selection_len(), 0); assert_eq!(app.test_list_box2.multi_selection(), vec![]); app.test_list_box2.multi_select_range(0..2); assert_eq!(app.test_list_box2.multi_selection_len(), 3); assert_eq!(app.test_list_box2.multi_selection(), vec![0, 1, 2]); app.test_list_box2.multi_unselect_range(0..1); assert_eq!(app.test_list_box2.multi_selection_len(), 1); assert_eq!(app.test_list_box2.multi_selection(), vec![2]); app.runs.borrow_mut().list = true; } else { app.test_list_box2.unselect_all(); app.test_list_box1.set_collection(vec!["Red", "White", "Green", "Yellow"]); app.runs.borrow_mut().list = false; } } fn run_menu_tests(app: &ControlsTest, _evt: Event) { if !app.runs.borrow().menu { app.window_menu_item1.set_enabled(false); assert_eq!(app.window_menu_item1.enabled(), false); app.window_submenu1.set_enabled(false); assert_eq!(app.window_submenu1.enabled(), false); app.pop_menu_item1.set_enabled(false); assert_eq!(app.pop_menu_item1.enabled(), false); app.pop_menu.set_enabled(false); app.runs.borrow_mut().menu = true; } else { app.pop_menu_item1.set_enabled(true); app.window_submenu1.set_enabled(true); app.window_menu_item1.set_enabled(true); app.runs.borrow_mut().menu = false; } } fn run_radio_tests(app: &ControlsTest, _evt: Event) { if !app.runs.borrow().radio { app.test_radio1.set_check_state(RadioButtonState::Checked); assert_eq!(app.test_radio1.check_state(), RadioButtonState::Checked); app.test_radio2.set_check_state(RadioButtonState::Checked); assert_eq!(app.test_radio2.check_state(), RadioButtonState::Checked); app.test_radio2.set_check_state(RadioButtonState::Unchecked); assert_eq!(app.test_radio2.check_state(), RadioButtonState::Unchecked); app.runs.borrow_mut().radio = true; } else { app.runs.borrow_mut().radio = false; } } fn run_text_tests(app: &ControlsTest, _evt: Event) { if !app.runs.borrow().text { app.test_text_input.set_text("New Text"); assert_eq!(&app.test_text_input.text(), "New Text"); app.test_text_input.set_limit(32); assert_eq!(app.test_text_input.limit(), 32); assert_eq!(app.test_text_input.password_char(), None); app.test_text_input.set_password_char(Some('X')); assert_eq!(app.test_text_input.password_char(), Some('X')); app.test_text_input.set_modified(true); assert_eq!(app.test_text_input.modified(), true); app.test_text_input.set_selection(0..4); assert_eq!(app.test_text_input.selection(), 0..4); assert_eq!(app.test_text_input.len(), 8); assert_eq!(app.test_text_input.visible(), true); app.test_text_input.set_visible(false); assert_eq!(app.test_text_input.visible(), false); app.test_text_input.set_visible(true); app.test_text_input.set_focus(); assert_eq!(app.test_text_input.focus(), true); app.window.set_focus(); assert_eq!(app.test_text_input.focus(), false); assert_eq!(app.test_text_input.readonly(), false); app.test_text_input.set_readonly(true); assert_eq!(app.test_text_input.readonly(), true); assert_eq!(app.test_text_input.enabled(), true); app.test_text_input.set_enabled(false); assert_eq!(app.test_text_input.enabled(), false); app.test_text_input.set_placeholder_text(Some("Placeholder!")); app.runs.borrow_mut().text = true; } else { app.test_text_input.set_text("Hello World"); app.test_text_input.set_enabled(true); app.test_text_input.set_readonly(false); app.test_text_input.set_password_char(None); app.runs.borrow_mut().text = false; } } fn run_progress_tests(app: &ControlsTest, _evt: Event) { if !app.runs.borrow().progress { app.test_progress1.set_range(0..1000); let r = app.test_progress1.range(); assert!(r.start == 0 && r.end == 1000); app.test_progress1.set_pos(500); assert!(app.test_progress1.pos() == 500); app.test_progress1.set_step(100); assert!(app.test_progress1.step() == 100); app.test_progress1.set_state(ProgressBarState::Paused); assert!(app.test_progress1.state() == ProgressBarState::Paused); app.test_progress1.advance(); assert!(app.test_progress1.pos() == 600); app.test_progress1.advance_delta(50); assert!(app.test_progress1.pos() == 650); app.runs.borrow_mut().progress = true; } else { app.test_progress1.set_pos(0); app.test_progress1.set_state(ProgressBarState::Normal); app.runs.borrow_mut().progress = false; } } fn run_track_tests(app: &ControlsTest, _evt: Event) { if !app.runs.borrow().track { app.test_track1.set_range_min(0); app.test_track1.set_range_max(10); assert_eq!(app.test_track1.range_min(), 0); assert_eq!(app.test_track1.range_max(), 10); app.test_track1.set_pos(3); assert_eq!(app.test_track1.pos(), 3); app.test_track2.set_range_min(0); app.test_track2.set_range_max(5); app.test_track2.set_selection_range_pos(0..3); assert_eq!(app.test_track2.selection_range_pos(), 0..3); app.runs.borrow_mut().track = true; } else { app.runs.borrow_mut().track = false; } } fn run_tooltip_tests(app: &ControlsTest, _evt: Event) { if !app.runs.borrow().tooltip { app.test_ttp2.set_enabled(false); app.test_ttp1.set_delay_time(Some(100)); assert_eq!(app.test_ttp1.delay_time(), 100); app.test_ttp1.register(&app.test_checkbox1, "A simple checkbox"); app.test_ttp1.register(&app.test_checkbox2, "A checkbox with 3 states!"); app.test_ttp3.set_default_decoration("Changed!", TooltipIcon::None); app.test_ttp1.set_text(&app.test_button.handle, "New tool tip!"); assert_eq!(&app.test_ttp1.text(&app.test_button.handle, None), "New tool tip!"); app.test_ttp1.unregister(&app.test_button); app.runs.borrow_mut().tooltip = true; } else { app.test_ttp1.register(&app.test_button, "A button"); app.test_ttp2.set_enabled(true); app.runs.borrow_mut().tooltip = false; } } fn run_status_tests(app: &ControlsTest, _evt: Event) { if !app.runs.borrow().status { app.status.set_text(0, "Status changed!"); assert_eq!(&app.status.text(0), "Status changed!"); app.status.set_font(Some(&app.arial_font)); assert_eq!(app.status.font().as_ref(), Some(&app.arial_font)); app.status.set_min_height(55); app.runs.borrow_mut().status = true; } else { app.status.set_font(None); app.status.set_min_height(25); app.runs.borrow_mut().status = false; } } fn run_tray_tests(app: &ControlsTest) { app.tray_icon.set_visibility(false); app.tray_icon_2.set_icon(&app.window_icon); app.tray_icon_2.set_tip("Changed the toolip and the icon!"); let icon = Some(&app.love_icon); let flags = Some(TrayNotificationFlags::USER_ICON | TrayNotificationFlags::SILENT | TrayNotificationFlags::LARGE_ICON); app.tray_icon_2.show("OH NO!", Some("Just a title"), flags, icon); app.tray_icon_2.show("I'm spamming the system tray popup!", Some("Just a title"), flags, icon); app.tray_icon_2.show("You can't stop me!!!!!", Some("Just a title (really)"), flags, Some(&app.window_icon)); } fn set_tooltip_dynamic<'a>(app: &ControlsTest, handle: &ControlHandle, data: &ToolTipTextData) { if &app.window == handle { data.set_text(&format!("Control text: \"{}\"", app.window.text())); } else if &app.test_text_input == handle { data.set_text(&format!("Control text: \"{}\"", app.test_text_input.text())); } } fn tree_tests(app: &ControlsTest, handle: &ControlHandle) { let add = &app.test_tree_add == handle; let remove = &app.test_tree_remove == handle; if add { let text = app.test_tree_input.text(); match app.test_tree.root() { Some(root) => match app.test_tree.selected_item() { None => { app.test_tree.insert_item(&text, Some(&root), TreeInsert::Last); }, Some(i) => { app.test_tree.insert_item(&text, Some(&i), TreeInsert::Last); }, }, None => { app.test_tree.insert_item(&text, None, TreeInsert::Root); }, } } if remove { match app.test_tree.selected_item() { Some(item) => { app.test_tree.remove_item(&item);}, None => {} } } app.test_tree.set_focus(); } #[cfg(feature = "file-dialog")] fn open_file(app: &ControlsTest, _evt: Event) { if app.open_file_dialog.run(Some(&app.window)) { app.file_dialog_result.clear(); if let Ok(file_names) = app.open_file_dialog.get_selected_items() { let mut names = String::new(); for name in file_names { names.push_str(&name.into_string().unwrap()); names.push_str("\r\n") } app.file_dialog_result.set_text(&names); } } } #[cfg(not(feature = "file-dialog"))] fn open_file(_app: &ControlsTest, _evt: Event) {} #[cfg(feature = "file-dialog")] fn open_directory(app: &ControlsTest, _evt: Event) { if app.open_directory_dialog.run(Some(&app.window)) { app.file_dialog_result.clear(); if let Ok(directory) = app.open_directory_dialog.get_selected_item() { app.file_dialog_result.set_text(&directory.into_string().unwrap()); } } } #[cfg(not(feature = "file-dialog"))] fn open_directory(_app: &ControlsTest, _evt: Event) {} #[cfg(feature = "file-dialog")] fn save_file(app: &ControlsTest, _evt: Event) { if app.save_file_dialog.run(Some(&app.window)) { app.file_dialog_result.clear(); if let Ok(file) = app.save_file_dialog.get_selected_item() { app.file_dialog_result.set_text(&file.into_string().unwrap()); } } } #[cfg(not(feature = "file-dialog"))] fn save_file(_app: &ControlsTest, _evt: Event) {} #[cfg(feature = "color-dialog")] fn color_select(app: &ControlsTest) { if app.color_dialog.run(Some(&app.window)) { app.test_color_output.set_text(&format!("{:?}", app.color_dialog.color())) } } #[cfg(not(feature = "color-dialog"))] fn color_select(_app: &ControlsTest) {} #[cfg(feature = "font-dialog")] fn font_select(app: &ControlsTest) { if app.font_dialog.run(Some(&app.window)) { app.test_font_output.set_text(&format!("{:?}", app.font_dialog.font())) } } #[cfg(not(feature = "font-dialog"))] fn font_select(_app: &ControlsTest) {} fn print_char(data: &EventData) { match data { EventData::OnChar(c) => println!("{:?}", c), _=>{} } } fn set_lv_sort(lv: &ListView, data: &EventData) { match data { EventData::OnListViewItemIndex { row_index: _, column_index } => { for i in 0..lv.column_len() { if *column_index != i { lv.set_column_sort_arrow(i, None); } else { match lv.column_sort_arrow(i) { Some(ListViewColumnSortArrow::Up) | None => lv.set_column_sort_arrow(i, Some(ListViewColumnSortArrow::Down)), Some(ListViewColumnSortArrow::Down) => lv.set_column_sort_arrow(i, Some(ListViewColumnSortArrow::Up)), } } } }, _ => {} } }
35.414001
133
0.545696
035bb0fce17f99320a1975a4e654f00bbb73aca5
2,242
//! Wrapper for wlr_seat_client, a manager for handling seats to an individual //! client. //! //! This struct is very unsafe, and probably will not be used directly by the //! compositor author. Instead, this is used internally by various wlr seat //! state structs (e.g `wlr_seat_keyboard_state`, `wlr_seat_pointer_state`) use std::marker::PhantomData; use super::seat::Seat; use wlroots_sys::{wl_client, wlr_seat_client, wlr_seat_client_for_wl_client}; /// Contains state for a single client's bound wl_seat resource. /// It can be used to issue input events to the client. /// /// The lifetime of this object is managed by `Seat`. pub struct Client<'wlr_seat> { client: *mut wlr_seat_client, _phantom: PhantomData<&'wlr_seat Seat> } #[allow(dead_code)] impl<'wlr_seat> Client<'wlr_seat> { /// Gets a seat::Client for the specified client, /// if there is one bound for that client. /// /// # Unsafety /// Since this just is a wrapper for checking if the wlr_seat pointer matches /// the provided wl_client pointer, this function is unsafe. /// /// Please only pass a valid pointer to a wl_client to this function. pub unsafe fn client_for_wl_client(seat: &'wlr_seat mut Seat, client: *mut wl_client) -> Option<Client<'wlr_seat>> { let client = wlr_seat_client_for_wl_client(seat.as_ptr(), client); if client.is_null() { None } else { Some(Client { client, _phantom: PhantomData }) } } /// Recreates a `Client` from a raw `wlr_seat_client`. /// /// # Unsafety /// The pointer must point to a valid `wlr_seat_client`. /// /// Note also that the struct has an *boundless lifetime*. You _must_ ensure /// this struct does not live longer than the `Seat` that manages it. pub(crate) unsafe fn from_ptr<'unbound_seat>(client: *mut wlr_seat_client) -> Client<'unbound_seat> { Client { client, _phantom: PhantomData } } pub(crate) unsafe fn as_ptr(&self) -> *mut wlr_seat_client { self.client } }
36.16129
81
0.621766
0a511559c3d548ff60b8682392f6b27b32a96278
18,429
//! flash.filter.GradientGlowFilter object use crate::avm1::activation::Activation; use crate::avm1::error::Error; use crate::avm1::function::{Executable, FunctionObject}; use crate::avm1::object::bevel_filter::BevelFilterType; use crate::avm1::object::gradient_glow_filter::GradientGlowFilterObject; use crate::avm1::property::Attribute; use crate::avm1::{AvmString, Object, ScriptObject, TObject, Value}; use gc_arena::MutationContext; pub fn constructor<'gc>( activation: &mut Activation<'_, 'gc, '_>, this: Object<'gc>, args: &[Value<'gc>], ) -> Result<Value<'gc>, Error<'gc>> { set_distance(activation, this, args.get(0..1).unwrap_or_default())?; set_angle(activation, this, args.get(1..2).unwrap_or_default())?; set_colors(activation, this, args.get(2..3).unwrap_or_default())?; set_alphas(activation, this, args.get(3..4).unwrap_or_default())?; set_ratios(activation, this, args.get(4..5).unwrap_or_default())?; set_blur_x(activation, this, args.get(5..6).unwrap_or_default())?; set_blur_y(activation, this, args.get(6..7).unwrap_or_default())?; set_strength(activation, this, args.get(7..8).unwrap_or_default())?; set_quality(activation, this, args.get(8..9).unwrap_or_default())?; set_type(activation, this, args.get(9..10).unwrap_or_default())?; set_knockout(activation, this, args.get(10..11).unwrap_or_default())?; Ok(this.into()) } pub fn distance<'gc>( _activation: &mut Activation<'_, 'gc, '_>, this: Object<'gc>, _args: &[Value<'gc>], ) -> Result<Value<'gc>, Error<'gc>> { if let Some(object) = this.as_gradient_glow_filter_object() { return Ok(object.distance().into()); } Ok(Value::Undefined) } pub fn set_distance<'gc>( activation: &mut Activation<'_, 'gc, '_>, this: Object<'gc>, args: &[Value<'gc>], ) -> Result<Value<'gc>, Error<'gc>> { let distance = args .get(0) .unwrap_or(&4.0.into()) .coerce_to_f64(activation)?; if let Some(object) = this.as_gradient_glow_filter_object() { object.set_distance(activation.context.gc_context, distance); } Ok(Value::Undefined) } pub fn angle<'gc>( _activation: &mut Activation<'_, 'gc, '_>, this: Object<'gc>, _args: &[Value<'gc>], ) -> Result<Value<'gc>, Error<'gc>> { if let Some(object) = this.as_gradient_glow_filter_object() { return Ok(object.angle().into()); } Ok(Value::Undefined) } pub fn set_angle<'gc>( activation: &mut Activation<'_, 'gc, '_>, this: Object<'gc>, args: &[Value<'gc>], ) -> Result<Value<'gc>, Error<'gc>> { let angle = args .get(0) .unwrap_or(&44.9999999772279.into()) .coerce_to_f64(activation)?; let clamped_angle = if angle.is_sign_negative() { -(angle.abs() % 360.0) } else { angle % 360.0 }; if let Some(object) = this.as_gradient_glow_filter_object() { object.set_angle(activation.context.gc_context, clamped_angle); } Ok(Value::Undefined) } pub fn colors<'gc>( activation: &mut Activation<'_, 'gc, '_>, this: Object<'gc>, _args: &[Value<'gc>], ) -> Result<Value<'gc>, Error<'gc>> { if let Some(filter) = this.as_gradient_glow_filter_object() { let array = ScriptObject::array( activation.context.gc_context, Some(activation.context.avm1.prototypes.array), ); let arr = filter.colors(); for (index, item) in arr.iter().copied().enumerate() { array.set_array_element(index, item.into(), activation.context.gc_context); } return Ok(array.into()); } Ok(Value::Undefined) } pub fn set_colors<'gc>( activation: &mut Activation<'_, 'gc, '_>, this: Object<'gc>, args: &[Value<'gc>], ) -> Result<Value<'gc>, Error<'gc>> { let colors = args.get(0).unwrap_or(&Value::Undefined); if let Value::Object(obj) = colors { if let Some(filter) = this.as_gradient_glow_filter_object() { let arr_len = obj.length(); let mut colors_arr = Vec::with_capacity(arr_len); let old_alphas = filter.alphas(); let mut alphas_arr = Vec::with_capacity(arr_len); for index in 0..arr_len { let col = obj.array_element(index).coerce_to_u32(activation)?; let alpha = if let Some(alpha) = old_alphas.get(index) { *alpha } else if col >> 24 == 0 { 0.0 } else { 255.0 / (col >> 24) as f64 }; colors_arr.push(col & 0xFFFFFF); alphas_arr.push(alpha); } filter.set_colors(activation.context.gc_context, colors_arr); filter.set_alphas(activation.context.gc_context, alphas_arr); let ratios = filter.ratios().into_iter().take(arr_len).collect(); filter.set_ratios(activation.context.gc_context, ratios); } } Ok(Value::Undefined) } pub fn alphas<'gc>( activation: &mut Activation<'_, 'gc, '_>, this: Object<'gc>, _args: &[Value<'gc>], ) -> Result<Value<'gc>, Error<'gc>> { if let Some(filter) = this.as_gradient_glow_filter_object() { let array = ScriptObject::array( activation.context.gc_context, Some(activation.context.avm1.prototypes.array), ); let arr = filter.alphas(); for (index, item) in arr.iter().copied().enumerate() { array.set_array_element(index, item.into(), activation.context.gc_context); } return Ok(array.into()); } Ok(Value::Undefined) } pub fn set_alphas<'gc>( activation: &mut Activation<'_, 'gc, '_>, this: Object<'gc>, args: &[Value<'gc>], ) -> Result<Value<'gc>, Error<'gc>> { let alphas = args.get(0).unwrap_or(&Value::Undefined); if let Value::Object(obj) = alphas { if let Some(filter) = this.as_gradient_glow_filter_object() { let arr_len = obj.length().min(filter.colors().len()); let mut arr = Vec::with_capacity(arr_len); for index in 0..arr_len { arr.push( obj.array_element(index) .coerce_to_f64(activation)? .max(0.0) .min(1.0), ); } let colors = filter.colors().into_iter().take(arr_len).collect(); filter.set_colors(activation.context.gc_context, colors); let ratios = filter.ratios().into_iter().take(arr_len).collect(); filter.set_ratios(activation.context.gc_context, ratios); filter.set_alphas(activation.context.gc_context, arr); } } Ok(Value::Undefined) } pub fn ratios<'gc>( activation: &mut Activation<'_, 'gc, '_>, this: Object<'gc>, _args: &[Value<'gc>], ) -> Result<Value<'gc>, Error<'gc>> { if let Some(filter) = this.as_gradient_glow_filter_object() { let array = ScriptObject::array( activation.context.gc_context, Some(activation.context.avm1.prototypes.array), ); let arr = filter.ratios(); for (index, item) in arr.iter().copied().enumerate() { array.set_array_element(index, item.into(), activation.context.gc_context); } return Ok(array.into()); } Ok(Value::Undefined) } pub fn set_ratios<'gc>( activation: &mut Activation<'_, 'gc, '_>, this: Object<'gc>, args: &[Value<'gc>], ) -> Result<Value<'gc>, Error<'gc>> { let ratios = args.get(0).unwrap_or(&Value::Undefined); if let Value::Object(obj) = ratios { if let Some(filter) = this.as_gradient_glow_filter_object() { let arr_len = obj.length().min(filter.colors().len()); let mut arr = Vec::with_capacity(arr_len); for index in 0..arr_len { arr.push( obj.array_element(index) .coerce_to_i32(activation)? .max(0) .min(255) as u8, ); } let colors = filter.colors().into_iter().take(arr_len).collect(); filter.set_colors(activation.context.gc_context, colors); let alphas = filter.alphas().into_iter().take(arr_len).collect(); filter.set_alphas(activation.context.gc_context, alphas); filter.set_ratios(activation.context.gc_context, arr); } } Ok(Value::Undefined) } pub fn blur_x<'gc>( _activation: &mut Activation<'_, 'gc, '_>, this: Object<'gc>, _args: &[Value<'gc>], ) -> Result<Value<'gc>, Error<'gc>> { if let Some(object) = this.as_gradient_glow_filter_object() { return Ok(object.blur_x().into()); } Ok(Value::Undefined) } pub fn set_blur_x<'gc>( activation: &mut Activation<'_, 'gc, '_>, this: Object<'gc>, args: &[Value<'gc>], ) -> Result<Value<'gc>, Error<'gc>> { let blur_x = args .get(0) .unwrap_or(&4.0.into()) .coerce_to_f64(activation) .map(|x| x.max(0.0).min(255.0))?; if let Some(object) = this.as_gradient_glow_filter_object() { object.set_blur_x(activation.context.gc_context, blur_x); } Ok(Value::Undefined) } pub fn blur_y<'gc>( _activation: &mut Activation<'_, 'gc, '_>, this: Object<'gc>, _args: &[Value<'gc>], ) -> Result<Value<'gc>, Error<'gc>> { if let Some(object) = this.as_gradient_glow_filter_object() { return Ok(object.blur_y().into()); } Ok(Value::Undefined) } pub fn set_blur_y<'gc>( activation: &mut Activation<'_, 'gc, '_>, this: Object<'gc>, args: &[Value<'gc>], ) -> Result<Value<'gc>, Error<'gc>> { let blur_y = args .get(0) .unwrap_or(&4.0.into()) .coerce_to_f64(activation) .map(|x| x.max(0.0).min(255.0))?; if let Some(object) = this.as_gradient_glow_filter_object() { object.set_blur_y(activation.context.gc_context, blur_y); } Ok(Value::Undefined) } pub fn strength<'gc>( _activation: &mut Activation<'_, 'gc, '_>, this: Object<'gc>, _args: &[Value<'gc>], ) -> Result<Value<'gc>, Error<'gc>> { if let Some(object) = this.as_gradient_glow_filter_object() { return Ok(object.strength().into()); } Ok(Value::Undefined) } pub fn set_strength<'gc>( activation: &mut Activation<'_, 'gc, '_>, this: Object<'gc>, args: &[Value<'gc>], ) -> Result<Value<'gc>, Error<'gc>> { let strength = args .get(0) .unwrap_or(&1.0.into()) .coerce_to_f64(activation) .map(|x| x.max(0.0).min(255.0))?; if let Some(object) = this.as_gradient_glow_filter_object() { object.set_strength(activation.context.gc_context, strength); } Ok(Value::Undefined) } pub fn quality<'gc>( _activation: &mut Activation<'_, 'gc, '_>, this: Object<'gc>, _args: &[Value<'gc>], ) -> Result<Value<'gc>, Error<'gc>> { if let Some(object) = this.as_gradient_glow_filter_object() { return Ok(object.quality().into()); } Ok(Value::Undefined) } pub fn set_quality<'gc>( activation: &mut Activation<'_, 'gc, '_>, this: Object<'gc>, args: &[Value<'gc>], ) -> Result<Value<'gc>, Error<'gc>> { let quality = args .get(0) .unwrap_or(&1.0.into()) .coerce_to_i32(activation) .map(|x| x.max(0).min(15))?; if let Some(object) = this.as_gradient_glow_filter_object() { object.set_quality(activation.context.gc_context, quality); } Ok(Value::Undefined) } pub fn get_type<'gc>( activation: &mut Activation<'_, 'gc, '_>, this: Object<'gc>, _args: &[Value<'gc>], ) -> Result<Value<'gc>, Error<'gc>> { if let Some(filter) = this.as_gradient_glow_filter_object() { let type_: &str = filter.get_type().into(); return Ok(AvmString::new(activation.context.gc_context, type_.to_string()).into()); } Ok(Value::Undefined) } pub fn set_type<'gc>( activation: &mut Activation<'_, 'gc, '_>, this: Object<'gc>, args: &[Value<'gc>], ) -> Result<Value<'gc>, Error<'gc>> { let type_: BevelFilterType = args .get(0) .unwrap_or(&Value::String(AvmString::new( activation.context.gc_context, "inner".to_string(), ))) .coerce_to_string(activation) .map(|s| s.as_str().into())?; if let Some(filter) = this.as_gradient_glow_filter_object() { filter.set_type(activation.context.gc_context, type_); } Ok(Value::Undefined) } pub fn knockout<'gc>( _activation: &mut Activation<'_, 'gc, '_>, this: Object<'gc>, _args: &[Value<'gc>], ) -> Result<Value<'gc>, Error<'gc>> { if let Some(object) = this.as_gradient_glow_filter_object() { return Ok(object.knockout().into()); } Ok(Value::Undefined) } pub fn set_knockout<'gc>( activation: &mut Activation<'_, 'gc, '_>, this: Object<'gc>, args: &[Value<'gc>], ) -> Result<Value<'gc>, Error<'gc>> { let knockout = args .get(0) .unwrap_or(&false.into()) .as_bool(activation.swf_version()); if let Some(object) = this.as_gradient_glow_filter_object() { object.set_knockout(activation.context.gc_context, knockout); } Ok(Value::Undefined) } pub fn create_proto<'gc>( gc_context: MutationContext<'gc, '_>, proto: Object<'gc>, fn_proto: Object<'gc>, ) -> Object<'gc> { let color_matrix_filter = GradientGlowFilterObject::empty_object(gc_context, Some(proto)); let object = color_matrix_filter.as_script_object().unwrap(); object.add_property( gc_context, "distance", FunctionObject::function( gc_context, Executable::Native(distance), Some(fn_proto), fn_proto, ), Some(FunctionObject::function( gc_context, Executable::Native(set_distance), Some(fn_proto), fn_proto, )), Attribute::empty(), ); object.add_property( gc_context, "angle", FunctionObject::function( gc_context, Executable::Native(angle), Some(fn_proto), fn_proto, ), Some(FunctionObject::function( gc_context, Executable::Native(set_angle), Some(fn_proto), fn_proto, )), Attribute::empty(), ); object.add_property( gc_context, "colors", FunctionObject::function( gc_context, Executable::Native(colors), Some(fn_proto), fn_proto, ), Some(FunctionObject::function( gc_context, Executable::Native(set_colors), Some(fn_proto), fn_proto, )), Attribute::empty(), ); object.add_property( gc_context, "alphas", FunctionObject::function( gc_context, Executable::Native(alphas), Some(fn_proto), fn_proto, ), Some(FunctionObject::function( gc_context, Executable::Native(set_alphas), Some(fn_proto), fn_proto, )), Attribute::empty(), ); object.add_property( gc_context, "ratios", FunctionObject::function( gc_context, Executable::Native(ratios), Some(fn_proto), fn_proto, ), Some(FunctionObject::function( gc_context, Executable::Native(set_ratios), Some(fn_proto), fn_proto, )), Attribute::empty(), ); object.add_property( gc_context, "blurX", FunctionObject::function( gc_context, Executable::Native(blur_x), Some(fn_proto), fn_proto, ), Some(FunctionObject::function( gc_context, Executable::Native(set_blur_x), Some(fn_proto), fn_proto, )), Attribute::empty(), ); object.add_property( gc_context, "blurY", FunctionObject::function( gc_context, Executable::Native(blur_y), Some(fn_proto), fn_proto, ), Some(FunctionObject::function( gc_context, Executable::Native(set_blur_y), Some(fn_proto), fn_proto, )), Attribute::empty(), ); object.add_property( gc_context, "strength", FunctionObject::function( gc_context, Executable::Native(strength), Some(fn_proto), fn_proto, ), Some(FunctionObject::function( gc_context, Executable::Native(set_strength), Some(fn_proto), fn_proto, )), Attribute::empty(), ); object.add_property( gc_context, "quality", FunctionObject::function( gc_context, Executable::Native(quality), Some(fn_proto), fn_proto, ), Some(FunctionObject::function( gc_context, Executable::Native(set_quality), Some(fn_proto), fn_proto, )), Attribute::empty(), ); object.add_property( gc_context, "type", FunctionObject::function( gc_context, Executable::Native(get_type), Some(fn_proto), fn_proto, ), Some(FunctionObject::function( gc_context, Executable::Native(set_type), Some(fn_proto), fn_proto, )), Attribute::empty(), ); object.add_property( gc_context, "knockout", FunctionObject::function( gc_context, Executable::Native(knockout), Some(fn_proto), fn_proto, ), Some(FunctionObject::function( gc_context, Executable::Native(set_knockout), Some(fn_proto), fn_proto, )), Attribute::empty(), ); color_matrix_filter.into() }
27.712782
94
0.561615
03c9da0be09797e440503f46c12b27c5c8356753
11,568
//! The pass that tries to make stack overflows deterministic, by introducing //! an upper bound of the stack size. //! //! This pass introduces a global mutable variable to track stack height, //! and instruments all calls with preamble and postamble. //! //! Stack height is increased prior the call. Otherwise, the check would //! be made after the stack frame is allocated. //! //! The preamble is inserted before the call. It increments //! the global stack height variable with statically determined "stack cost" //! of the callee. If after the increment the stack height exceeds //! the limit (specified by the `rules`) then execution traps. //! Otherwise, the call is executed. //! //! The postamble is inserted after the call. The purpose of the postamble is to decrease //! the stack height by the "stack cost" of the callee function. //! //! Note, that we can't instrument all possible ways to return from the function. The simplest //! example would be a trap issued by the host function. //! That means stack height global won't be equal to zero upon the next execution after such trap. //! //! # Thunks //! //! Because stack height is increased prior the call few problems arises: //! //! - Stack height isn't increased upon an entry to the first function, i.e. exported function. //! - Start function is executed externally (similar to exported functions). //! - It is statically unknown what function will be invoked in an indirect call. //! //! The solution for this problems is to generate a intermediate functions, called 'thunks', which //! will increase before and decrease the stack height after the call to original function, and //! then make exported function and table entries, start section to point to a corresponding thunks. //! //! # Stack cost //! //! Stack cost of the function is calculated as a sum of it's locals //! and the maximal height of the value stack. //! //! All values are treated equally, as they have the same size. //! //! The rationale is that this makes it possible to use the following very naive wasm executor: //! //! - values are implemented by a union, so each value takes a size equal to //! the size of the largest possible value type this union can hold. (In MVP it is 8 bytes) //! - each value from the value stack is placed on the native stack. //! - each local variable and function argument is placed on the native stack. //! - arguments pushed by the caller are copied into callee stack rather than shared //! between the frames. //! - upon entry into the function entire stack frame is allocated. use crate::std::{mem, string::String, vec::Vec}; use parity_wasm::{ builder, elements::{self, Instruction, Instructions, Type}, }; /// Macro to generate preamble and postamble. macro_rules! instrument_call { ($callee_idx: expr, $callee_stack_cost: expr, $stack_height_global_idx: expr, $stack_limit: expr) => {{ use $crate::parity_wasm::elements::Instruction::*; [ // stack_height += stack_cost(F) GetGlobal($stack_height_global_idx), I32Const($callee_stack_cost), I32Add, SetGlobal($stack_height_global_idx), // if stack_counter > LIMIT: unreachable GetGlobal($stack_height_global_idx), I32Const($stack_limit as i32), I32GtU, If(elements::BlockType::NoResult), Unreachable, End, // Original call Call($callee_idx), // stack_height -= stack_cost(F) GetGlobal($stack_height_global_idx), I32Const($callee_stack_cost), I32Sub, SetGlobal($stack_height_global_idx), ] }}; } mod max_height; mod thunk; /// Error that occured during processing the module. /// /// This means that the module is invalid. #[derive(Debug)] pub struct Error(String); pub(crate) struct Context { stack_height_global_idx: u32, func_stack_costs: Vec<u32>, stack_limit: u32, } impl Context { /// Returns index in a global index space of a stack_height global variable. fn stack_height_global_idx(&self) -> u32 { self.stack_height_global_idx } /// Returns `stack_cost` for `func_idx`. fn stack_cost(&self, func_idx: u32) -> Option<u32> { self.func_stack_costs.get(func_idx as usize).cloned() } /// Returns stack limit specified by the rules. fn stack_limit(&self) -> u32 { self.stack_limit } } /// Instrument a module with stack height limiter. /// /// See module-level documentation for more details. /// /// # Errors /// /// Returns `Err` if module is invalid and can't be pub fn inject_limiter( mut module: elements::Module, stack_limit: u32, ) -> Result<elements::Module, Error> { let mut ctx = Context { stack_height_global_idx: generate_stack_height_global(&mut module), func_stack_costs: compute_stack_costs(&module)?, stack_limit, }; instrument_functions(&mut ctx, &mut module)?; let module = thunk::generate_thunks(&mut ctx, module)?; Ok(module) } /// Generate a new global that will be used for tracking current stack height. fn generate_stack_height_global(module: &mut elements::Module) -> u32 { let global_entry = builder::global() .value_type() .i32() .mutable() .init_expr(Instruction::I32Const(0)) .build(); // Try to find an existing global section. for section in module.sections_mut() { if let elements::Section::Global(gs) = section { gs.entries_mut().push(global_entry); return (gs.entries().len() as u32) - 1 } } // Existing section not found, create one! module .sections_mut() .push(elements::Section::Global(elements::GlobalSection::with_entries(vec![global_entry]))); 0 } /// Calculate stack costs for all functions. /// /// Returns a vector with a stack cost for each function, including imports. fn compute_stack_costs(module: &elements::Module) -> Result<Vec<u32>, Error> { let func_imports = module.import_count(elements::ImportCountType::Function); // TODO: optimize! (0..module.functions_space()) .map(|func_idx| { if func_idx < func_imports { // We can't calculate stack_cost of the import functions. Ok(0) } else { compute_stack_cost(func_idx as u32, module) } }) .collect() } /// Stack cost of the given *defined* function is the sum of it's locals count (that is, /// number of arguments plus number of local variables) and the maximal stack /// height. fn compute_stack_cost(func_idx: u32, module: &elements::Module) -> Result<u32, Error> { // To calculate the cost of a function we need to convert index from // function index space to defined function spaces. let func_imports = module.import_count(elements::ImportCountType::Function) as u32; let defined_func_idx = func_idx .checked_sub(func_imports) .ok_or_else(|| Error("This should be a index of a defined function".into()))?; let code_section = module .code_section() .ok_or_else(|| Error("Due to validation code section should exists".into()))?; let body = &code_section .bodies() .get(defined_func_idx as usize) .ok_or_else(|| Error("Function body is out of bounds".into()))?; let mut locals_count: u32 = 0; for local_group in body.locals() { locals_count = locals_count .checked_add(local_group.count()) .ok_or_else(|| Error("Overflow in local count".into()))?; } let max_stack_height = max_height::compute(defined_func_idx, module)?; locals_count .checked_add(max_stack_height) .ok_or_else(|| Error("Overflow in adding locals_count and max_stack_height".into())) } fn instrument_functions(ctx: &mut Context, module: &mut elements::Module) -> Result<(), Error> { for section in module.sections_mut() { if let elements::Section::Code(code_section) = section { for func_body in code_section.bodies_mut() { let opcodes = func_body.code_mut(); instrument_function(ctx, opcodes)?; } } } Ok(()) } /// This function searches `call` instructions and wrap each call /// with preamble and postamble. /// /// Before: /// /// ```text /// get_local 0 /// get_local 1 /// call 228 /// drop /// ``` /// /// After: /// /// ```text /// get_local 0 /// get_local 1 /// /// < ... preamble ... > /// /// call 228 /// /// < .. postamble ... > /// /// drop /// ``` fn instrument_function(ctx: &mut Context, func: &mut Instructions) -> Result<(), Error> { use Instruction::*; struct InstrumentCall { offset: usize, callee: u32, cost: u32, } let calls: Vec<_> = func .elements() .iter() .enumerate() .filter_map(|(offset, instruction)| { if let Call(callee) = instruction { ctx.stack_cost(*callee).and_then(|cost| { if cost > 0 { Some(InstrumentCall { callee: *callee, offset, cost }) } else { None } }) } else { None } }) .collect(); // The `instrumented_call!` contains the call itself. This is why we need to subtract one. let len = func.elements().len() + calls.len() * (instrument_call!(0, 0, 0, 0).len() - 1); let original_instrs = mem::replace(func.elements_mut(), Vec::with_capacity(len)); let new_instrs = func.elements_mut(); let mut calls = calls.into_iter().peekable(); for (original_pos, instr) in original_instrs.into_iter().enumerate() { // whether there is some call instruction at this position that needs to be instrumented let did_instrument = if let Some(call) = calls.peek() { if call.offset == original_pos { let new_seq = instrument_call!( call.callee, call.cost as i32, ctx.stack_height_global_idx(), ctx.stack_limit() ); new_instrs.extend(new_seq); true } else { false } } else { false }; if did_instrument { calls.next(); } else { new_instrs.push(instr); } } if calls.next().is_some() { return Err(Error("Not all calls were used".into())) } Ok(()) } fn resolve_func_type( func_idx: u32, module: &elements::Module, ) -> Result<&elements::FunctionType, Error> { let types = module.type_section().map(|ts| ts.types()).unwrap_or(&[]); let functions = module.function_section().map(|fs| fs.entries()).unwrap_or(&[]); let func_imports = module.import_count(elements::ImportCountType::Function); let sig_idx = if func_idx < func_imports as u32 { module .import_section() .expect("function import count is not zero; import section must exists; qed") .entries() .iter() .filter_map(|entry| match entry.external() { elements::External::Function(idx) => Some(*idx), _ => None, }) .nth(func_idx as usize) .expect( "func_idx is less than function imports count; nth function import must be `Some`; qed", ) } else { functions .get(func_idx as usize - func_imports) .ok_or_else(|| Error(format!("Function at index {} is not defined", func_idx)))? .type_ref() }; let Type::Function(ty) = types.get(sig_idx as usize).ok_or_else(|| { Error(format!("Signature {} (specified by func {}) isn't defined", sig_idx, func_idx)) })?; Ok(ty) } #[cfg(test)] mod tests { use super::*; use parity_wasm::elements; fn parse_wat(source: &str) -> elements::Module { elements::deserialize_buffer(&wabt::wat2wasm(source).expect("Failed to wat2wasm")) .expect("Failed to deserialize the module") } fn validate_module(module: elements::Module) { let binary = elements::serialize(module).expect("Failed to serialize"); wabt::Module::read_binary(&binary, &Default::default()) .expect("Wabt failed to read final binary") .validate() .expect("Invalid module"); } #[test] fn test_with_params_and_result() { let module = parse_wat( r#" (module (func (export "i32.add") (param i32 i32) (result i32) get_local 0 get_local 1 i32.add ) ) "#, ); let module = inject_limiter(module, 1024).expect("Failed to inject stack counter"); validate_module(module); } }
29.286076
104
0.693811
e5cb00767187fb994547eeb913e5fd1586220042
261,818
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. pub fn parse_http_generic_error( response: &http::Response<bytes::Bytes>, ) -> Result<aws_smithy_types::Error, aws_smithy_json::deserialize::Error> { crate::json_errors::parse_generic_error(response.body(), response.headers()) } pub fn deser_structure_crate_error_internal_failure_exception_json_err( value: &[u8], mut builder: crate::error::internal_failure_exception::Builder, ) -> Result<crate::error::internal_failure_exception::Builder, aws_smithy_json::deserialize::Error> { let mut tokens_owned = aws_smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(value)) .peekable(); let tokens = &mut tokens_owned; aws_smithy_json::deserialize::token::expect_start_object(tokens.next())?; loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "message" => { builder = builder.set_message( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } if tokens.next().is_some() { return Err(aws_smithy_json::deserialize::Error::custom( "found more JSON tokens after completing parsing", )); } Ok(builder) } pub fn deser_structure_crate_error_invalid_request_exception_json_err( value: &[u8], mut builder: crate::error::invalid_request_exception::Builder, ) -> Result<crate::error::invalid_request_exception::Builder, aws_smithy_json::deserialize::Error> { let mut tokens_owned = aws_smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(value)) .peekable(); let tokens = &mut tokens_owned; aws_smithy_json::deserialize::token::expect_start_object(tokens.next())?; loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "message" => { builder = builder.set_message( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } if tokens.next().is_some() { return Err(aws_smithy_json::deserialize::Error::custom( "found more JSON tokens after completing parsing", )); } Ok(builder) } pub fn deser_structure_crate_error_limit_exceeded_exception_json_err( value: &[u8], mut builder: crate::error::limit_exceeded_exception::Builder, ) -> Result<crate::error::limit_exceeded_exception::Builder, aws_smithy_json::deserialize::Error> { let mut tokens_owned = aws_smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(value)) .peekable(); let tokens = &mut tokens_owned; aws_smithy_json::deserialize::token::expect_start_object(tokens.next())?; loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "message" => { builder = builder.set_message( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } if tokens.next().is_some() { return Err(aws_smithy_json::deserialize::Error::custom( "found more JSON tokens after completing parsing", )); } Ok(builder) } pub fn deser_structure_crate_error_resource_already_exists_exception_json_err( value: &[u8], mut builder: crate::error::resource_already_exists_exception::Builder, ) -> Result< crate::error::resource_already_exists_exception::Builder, aws_smithy_json::deserialize::Error, > { let mut tokens_owned = aws_smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(value)) .peekable(); let tokens = &mut tokens_owned; aws_smithy_json::deserialize::token::expect_start_object(tokens.next())?; loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "message" => { builder = builder.set_message( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "resourceId" => { builder = builder.set_resource_id( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "resourceArn" => { builder = builder.set_resource_arn( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } if tokens.next().is_some() { return Err(aws_smithy_json::deserialize::Error::custom( "found more JSON tokens after completing parsing", )); } Ok(builder) } pub fn deser_structure_crate_error_resource_in_use_exception_json_err( value: &[u8], mut builder: crate::error::resource_in_use_exception::Builder, ) -> Result<crate::error::resource_in_use_exception::Builder, aws_smithy_json::deserialize::Error> { let mut tokens_owned = aws_smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(value)) .peekable(); let tokens = &mut tokens_owned; aws_smithy_json::deserialize::token::expect_start_object(tokens.next())?; loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "message" => { builder = builder.set_message( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } if tokens.next().is_some() { return Err(aws_smithy_json::deserialize::Error::custom( "found more JSON tokens after completing parsing", )); } Ok(builder) } pub fn deser_structure_crate_error_service_unavailable_exception_json_err( value: &[u8], mut builder: crate::error::service_unavailable_exception::Builder, ) -> Result<crate::error::service_unavailable_exception::Builder, aws_smithy_json::deserialize::Error> { let mut tokens_owned = aws_smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(value)) .peekable(); let tokens = &mut tokens_owned; aws_smithy_json::deserialize::token::expect_start_object(tokens.next())?; loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "message" => { builder = builder.set_message( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } if tokens.next().is_some() { return Err(aws_smithy_json::deserialize::Error::custom( "found more JSON tokens after completing parsing", )); } Ok(builder) } pub fn deser_structure_crate_error_throttling_exception_json_err( value: &[u8], mut builder: crate::error::throttling_exception::Builder, ) -> Result<crate::error::throttling_exception::Builder, aws_smithy_json::deserialize::Error> { let mut tokens_owned = aws_smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(value)) .peekable(); let tokens = &mut tokens_owned; aws_smithy_json::deserialize::token::expect_start_object(tokens.next())?; loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "message" => { builder = builder.set_message( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } if tokens.next().is_some() { return Err(aws_smithy_json::deserialize::Error::custom( "found more JSON tokens after completing parsing", )); } Ok(builder) } pub fn deser_operation_crate_operation_create_alarm_model( value: &[u8], mut builder: crate::output::create_alarm_model_output::Builder, ) -> Result<crate::output::create_alarm_model_output::Builder, aws_smithy_json::deserialize::Error> { let mut tokens_owned = aws_smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(value)) .peekable(); let tokens = &mut tokens_owned; aws_smithy_json::deserialize::token::expect_start_object(tokens.next())?; loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "alarmModelArn" => { builder = builder.set_alarm_model_arn( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "alarmModelVersion" => { builder = builder.set_alarm_model_version( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "creationTime" => { builder = builder.set_creation_time( aws_smithy_json::deserialize::token::expect_timestamp_or_null( tokens.next(), aws_smithy_types::instant::Format::EpochSeconds, )?, ); } "lastUpdateTime" => { builder = builder.set_last_update_time( aws_smithy_json::deserialize::token::expect_timestamp_or_null( tokens.next(), aws_smithy_types::instant::Format::EpochSeconds, )?, ); } "status" => { builder = builder.set_status( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| { s.to_unescaped().map(|u| { crate::model::AlarmModelVersionStatus::from(u.as_ref()) }) }) .transpose()?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } if tokens.next().is_some() { return Err(aws_smithy_json::deserialize::Error::custom( "found more JSON tokens after completing parsing", )); } Ok(builder) } pub fn deser_operation_crate_operation_create_detector_model( value: &[u8], mut builder: crate::output::create_detector_model_output::Builder, ) -> Result<crate::output::create_detector_model_output::Builder, aws_smithy_json::deserialize::Error> { let mut tokens_owned = aws_smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(value)) .peekable(); let tokens = &mut tokens_owned; aws_smithy_json::deserialize::token::expect_start_object(tokens.next())?; loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "detectorModelConfiguration" => { builder = builder.set_detector_model_configuration( crate::json_deser::deser_structure_crate_model_detector_model_configuration(tokens)? ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } if tokens.next().is_some() { return Err(aws_smithy_json::deserialize::Error::custom( "found more JSON tokens after completing parsing", )); } Ok(builder) } pub fn deser_operation_crate_operation_create_input( value: &[u8], mut builder: crate::output::create_input_output::Builder, ) -> Result<crate::output::create_input_output::Builder, aws_smithy_json::deserialize::Error> { let mut tokens_owned = aws_smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(value)) .peekable(); let tokens = &mut tokens_owned; aws_smithy_json::deserialize::token::expect_start_object(tokens.next())?; loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "inputConfiguration" => { builder = builder.set_input_configuration( crate::json_deser::deser_structure_crate_model_input_configuration( tokens, )?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } if tokens.next().is_some() { return Err(aws_smithy_json::deserialize::Error::custom( "found more JSON tokens after completing parsing", )); } Ok(builder) } pub fn deser_structure_crate_error_resource_not_found_exception_json_err( value: &[u8], mut builder: crate::error::resource_not_found_exception::Builder, ) -> Result<crate::error::resource_not_found_exception::Builder, aws_smithy_json::deserialize::Error> { let mut tokens_owned = aws_smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(value)) .peekable(); let tokens = &mut tokens_owned; aws_smithy_json::deserialize::token::expect_start_object(tokens.next())?; loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "message" => { builder = builder.set_message( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } if tokens.next().is_some() { return Err(aws_smithy_json::deserialize::Error::custom( "found more JSON tokens after completing parsing", )); } Ok(builder) } pub fn deser_operation_crate_operation_describe_alarm_model( value: &[u8], mut builder: crate::output::describe_alarm_model_output::Builder, ) -> Result<crate::output::describe_alarm_model_output::Builder, aws_smithy_json::deserialize::Error> { let mut tokens_owned = aws_smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(value)) .peekable(); let tokens = &mut tokens_owned; aws_smithy_json::deserialize::token::expect_start_object(tokens.next())?; loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "alarmCapabilities" => { builder = builder.set_alarm_capabilities( crate::json_deser::deser_structure_crate_model_alarm_capabilities( tokens, )?, ); } "alarmEventActions" => { builder = builder.set_alarm_event_actions( crate::json_deser::deser_structure_crate_model_alarm_event_actions( tokens, )?, ); } "alarmModelArn" => { builder = builder.set_alarm_model_arn( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "alarmModelDescription" => { builder = builder.set_alarm_model_description( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "alarmModelName" => { builder = builder.set_alarm_model_name( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "alarmModelVersion" => { builder = builder.set_alarm_model_version( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "alarmNotification" => { builder = builder.set_alarm_notification( crate::json_deser::deser_structure_crate_model_alarm_notification( tokens, )?, ); } "alarmRule" => { builder = builder.set_alarm_rule( crate::json_deser::deser_structure_crate_model_alarm_rule(tokens)?, ); } "creationTime" => { builder = builder.set_creation_time( aws_smithy_json::deserialize::token::expect_timestamp_or_null( tokens.next(), aws_smithy_types::instant::Format::EpochSeconds, )?, ); } "key" => { builder = builder.set_key( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "lastUpdateTime" => { builder = builder.set_last_update_time( aws_smithy_json::deserialize::token::expect_timestamp_or_null( tokens.next(), aws_smithy_types::instant::Format::EpochSeconds, )?, ); } "roleArn" => { builder = builder.set_role_arn( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "severity" => { builder = builder.set_severity( aws_smithy_json::deserialize::token::expect_number_or_null( tokens.next(), )? .map(|v| v.to_i32()), ); } "status" => { builder = builder.set_status( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| { s.to_unescaped().map(|u| { crate::model::AlarmModelVersionStatus::from(u.as_ref()) }) }) .transpose()?, ); } "statusMessage" => { builder = builder.set_status_message( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } if tokens.next().is_some() { return Err(aws_smithy_json::deserialize::Error::custom( "found more JSON tokens after completing parsing", )); } Ok(builder) } pub fn deser_operation_crate_operation_describe_detector_model( value: &[u8], mut builder: crate::output::describe_detector_model_output::Builder, ) -> Result< crate::output::describe_detector_model_output::Builder, aws_smithy_json::deserialize::Error, > { let mut tokens_owned = aws_smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(value)) .peekable(); let tokens = &mut tokens_owned; aws_smithy_json::deserialize::token::expect_start_object(tokens.next())?; loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "detectorModel" => { builder = builder.set_detector_model( crate::json_deser::deser_structure_crate_model_detector_model(tokens)?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } if tokens.next().is_some() { return Err(aws_smithy_json::deserialize::Error::custom( "found more JSON tokens after completing parsing", )); } Ok(builder) } pub fn deser_operation_crate_operation_describe_detector_model_analysis( value: &[u8], mut builder: crate::output::describe_detector_model_analysis_output::Builder, ) -> Result< crate::output::describe_detector_model_analysis_output::Builder, aws_smithy_json::deserialize::Error, > { let mut tokens_owned = aws_smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(value)) .peekable(); let tokens = &mut tokens_owned; aws_smithy_json::deserialize::token::expect_start_object(tokens.next())?; loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "status" => { builder = builder.set_status( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| { s.to_unescaped() .map(|u| crate::model::AnalysisStatus::from(u.as_ref())) }) .transpose()?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } if tokens.next().is_some() { return Err(aws_smithy_json::deserialize::Error::custom( "found more JSON tokens after completing parsing", )); } Ok(builder) } pub fn deser_operation_crate_operation_describe_input( value: &[u8], mut builder: crate::output::describe_input_output::Builder, ) -> Result<crate::output::describe_input_output::Builder, aws_smithy_json::deserialize::Error> { let mut tokens_owned = aws_smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(value)) .peekable(); let tokens = &mut tokens_owned; aws_smithy_json::deserialize::token::expect_start_object(tokens.next())?; loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "input" => { builder = builder.set_input( crate::json_deser::deser_structure_crate_model_input(tokens)?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } if tokens.next().is_some() { return Err(aws_smithy_json::deserialize::Error::custom( "found more JSON tokens after completing parsing", )); } Ok(builder) } pub fn deser_structure_crate_error_unsupported_operation_exception_json_err( value: &[u8], mut builder: crate::error::unsupported_operation_exception::Builder, ) -> Result< crate::error::unsupported_operation_exception::Builder, aws_smithy_json::deserialize::Error, > { let mut tokens_owned = aws_smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(value)) .peekable(); let tokens = &mut tokens_owned; aws_smithy_json::deserialize::token::expect_start_object(tokens.next())?; loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "message" => { builder = builder.set_message( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } if tokens.next().is_some() { return Err(aws_smithy_json::deserialize::Error::custom( "found more JSON tokens after completing parsing", )); } Ok(builder) } pub fn deser_operation_crate_operation_describe_logging_options( value: &[u8], mut builder: crate::output::describe_logging_options_output::Builder, ) -> Result< crate::output::describe_logging_options_output::Builder, aws_smithy_json::deserialize::Error, > { let mut tokens_owned = aws_smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(value)) .peekable(); let tokens = &mut tokens_owned; aws_smithy_json::deserialize::token::expect_start_object(tokens.next())?; loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "loggingOptions" => { builder = builder.set_logging_options( crate::json_deser::deser_structure_crate_model_logging_options(tokens)?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } if tokens.next().is_some() { return Err(aws_smithy_json::deserialize::Error::custom( "found more JSON tokens after completing parsing", )); } Ok(builder) } pub fn deser_operation_crate_operation_get_detector_model_analysis_results( value: &[u8], mut builder: crate::output::get_detector_model_analysis_results_output::Builder, ) -> Result< crate::output::get_detector_model_analysis_results_output::Builder, aws_smithy_json::deserialize::Error, > { let mut tokens_owned = aws_smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(value)) .peekable(); let tokens = &mut tokens_owned; aws_smithy_json::deserialize::token::expect_start_object(tokens.next())?; loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "analysisResults" => { builder = builder.set_analysis_results( crate::json_deser::deser_list_com_amazonaws_iotevents_analysis_results( tokens, )?, ); } "nextToken" => { builder = builder.set_next_token( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } if tokens.next().is_some() { return Err(aws_smithy_json::deserialize::Error::custom( "found more JSON tokens after completing parsing", )); } Ok(builder) } pub fn deser_operation_crate_operation_list_alarm_models( value: &[u8], mut builder: crate::output::list_alarm_models_output::Builder, ) -> Result<crate::output::list_alarm_models_output::Builder, aws_smithy_json::deserialize::Error> { let mut tokens_owned = aws_smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(value)) .peekable(); let tokens = &mut tokens_owned; aws_smithy_json::deserialize::token::expect_start_object(tokens.next())?; loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "alarmModelSummaries" => { builder = builder.set_alarm_model_summaries( crate::json_deser::deser_list_com_amazonaws_iotevents_alarm_model_summaries(tokens)? ); } "nextToken" => { builder = builder.set_next_token( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } if tokens.next().is_some() { return Err(aws_smithy_json::deserialize::Error::custom( "found more JSON tokens after completing parsing", )); } Ok(builder) } pub fn deser_operation_crate_operation_list_alarm_model_versions( value: &[u8], mut builder: crate::output::list_alarm_model_versions_output::Builder, ) -> Result< crate::output::list_alarm_model_versions_output::Builder, aws_smithy_json::deserialize::Error, > { let mut tokens_owned = aws_smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(value)) .peekable(); let tokens = &mut tokens_owned; aws_smithy_json::deserialize::token::expect_start_object(tokens.next())?; loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "alarmModelVersionSummaries" => { builder = builder.set_alarm_model_version_summaries( crate::json_deser::deser_list_com_amazonaws_iotevents_alarm_model_version_summaries(tokens)? ); } "nextToken" => { builder = builder.set_next_token( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } if tokens.next().is_some() { return Err(aws_smithy_json::deserialize::Error::custom( "found more JSON tokens after completing parsing", )); } Ok(builder) } pub fn deser_operation_crate_operation_list_detector_models( value: &[u8], mut builder: crate::output::list_detector_models_output::Builder, ) -> Result<crate::output::list_detector_models_output::Builder, aws_smithy_json::deserialize::Error> { let mut tokens_owned = aws_smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(value)) .peekable(); let tokens = &mut tokens_owned; aws_smithy_json::deserialize::token::expect_start_object(tokens.next())?; loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "detectorModelSummaries" => { builder = builder.set_detector_model_summaries( crate::json_deser::deser_list_com_amazonaws_iotevents_detector_model_summaries(tokens)? ); } "nextToken" => { builder = builder.set_next_token( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } if tokens.next().is_some() { return Err(aws_smithy_json::deserialize::Error::custom( "found more JSON tokens after completing parsing", )); } Ok(builder) } pub fn deser_operation_crate_operation_list_detector_model_versions( value: &[u8], mut builder: crate::output::list_detector_model_versions_output::Builder, ) -> Result< crate::output::list_detector_model_versions_output::Builder, aws_smithy_json::deserialize::Error, > { let mut tokens_owned = aws_smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(value)) .peekable(); let tokens = &mut tokens_owned; aws_smithy_json::deserialize::token::expect_start_object(tokens.next())?; loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "detectorModelVersionSummaries" => { builder = builder.set_detector_model_version_summaries( crate::json_deser::deser_list_com_amazonaws_iotevents_detector_model_version_summaries(tokens)? ); } "nextToken" => { builder = builder.set_next_token( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } if tokens.next().is_some() { return Err(aws_smithy_json::deserialize::Error::custom( "found more JSON tokens after completing parsing", )); } Ok(builder) } pub fn deser_operation_crate_operation_list_input_routings( value: &[u8], mut builder: crate::output::list_input_routings_output::Builder, ) -> Result<crate::output::list_input_routings_output::Builder, aws_smithy_json::deserialize::Error> { let mut tokens_owned = aws_smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(value)) .peekable(); let tokens = &mut tokens_owned; aws_smithy_json::deserialize::token::expect_start_object(tokens.next())?; loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "nextToken" => { builder = builder.set_next_token( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "routedResources" => { builder = builder.set_routed_resources( crate::json_deser::deser_list_com_amazonaws_iotevents_routed_resources( tokens, )?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } if tokens.next().is_some() { return Err(aws_smithy_json::deserialize::Error::custom( "found more JSON tokens after completing parsing", )); } Ok(builder) } pub fn deser_operation_crate_operation_list_inputs( value: &[u8], mut builder: crate::output::list_inputs_output::Builder, ) -> Result<crate::output::list_inputs_output::Builder, aws_smithy_json::deserialize::Error> { let mut tokens_owned = aws_smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(value)) .peekable(); let tokens = &mut tokens_owned; aws_smithy_json::deserialize::token::expect_start_object(tokens.next())?; loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "inputSummaries" => { builder = builder.set_input_summaries( crate::json_deser::deser_list_com_amazonaws_iotevents_input_summaries( tokens, )?, ); } "nextToken" => { builder = builder.set_next_token( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } if tokens.next().is_some() { return Err(aws_smithy_json::deserialize::Error::custom( "found more JSON tokens after completing parsing", )); } Ok(builder) } pub fn deser_operation_crate_operation_list_tags_for_resource( value: &[u8], mut builder: crate::output::list_tags_for_resource_output::Builder, ) -> Result< crate::output::list_tags_for_resource_output::Builder, aws_smithy_json::deserialize::Error, > { let mut tokens_owned = aws_smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(value)) .peekable(); let tokens = &mut tokens_owned; aws_smithy_json::deserialize::token::expect_start_object(tokens.next())?; loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "tags" => { builder = builder.set_tags( crate::json_deser::deser_list_com_amazonaws_iotevents_tags(tokens)?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } if tokens.next().is_some() { return Err(aws_smithy_json::deserialize::Error::custom( "found more JSON tokens after completing parsing", )); } Ok(builder) } pub fn deser_operation_crate_operation_start_detector_model_analysis( value: &[u8], mut builder: crate::output::start_detector_model_analysis_output::Builder, ) -> Result< crate::output::start_detector_model_analysis_output::Builder, aws_smithy_json::deserialize::Error, > { let mut tokens_owned = aws_smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(value)) .peekable(); let tokens = &mut tokens_owned; aws_smithy_json::deserialize::token::expect_start_object(tokens.next())?; loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "analysisId" => { builder = builder.set_analysis_id( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } if tokens.next().is_some() { return Err(aws_smithy_json::deserialize::Error::custom( "found more JSON tokens after completing parsing", )); } Ok(builder) } pub fn deser_operation_crate_operation_update_alarm_model( value: &[u8], mut builder: crate::output::update_alarm_model_output::Builder, ) -> Result<crate::output::update_alarm_model_output::Builder, aws_smithy_json::deserialize::Error> { let mut tokens_owned = aws_smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(value)) .peekable(); let tokens = &mut tokens_owned; aws_smithy_json::deserialize::token::expect_start_object(tokens.next())?; loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "alarmModelArn" => { builder = builder.set_alarm_model_arn( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "alarmModelVersion" => { builder = builder.set_alarm_model_version( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "creationTime" => { builder = builder.set_creation_time( aws_smithy_json::deserialize::token::expect_timestamp_or_null( tokens.next(), aws_smithy_types::instant::Format::EpochSeconds, )?, ); } "lastUpdateTime" => { builder = builder.set_last_update_time( aws_smithy_json::deserialize::token::expect_timestamp_or_null( tokens.next(), aws_smithy_types::instant::Format::EpochSeconds, )?, ); } "status" => { builder = builder.set_status( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| { s.to_unescaped().map(|u| { crate::model::AlarmModelVersionStatus::from(u.as_ref()) }) }) .transpose()?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } if tokens.next().is_some() { return Err(aws_smithy_json::deserialize::Error::custom( "found more JSON tokens after completing parsing", )); } Ok(builder) } pub fn deser_operation_crate_operation_update_detector_model( value: &[u8], mut builder: crate::output::update_detector_model_output::Builder, ) -> Result<crate::output::update_detector_model_output::Builder, aws_smithy_json::deserialize::Error> { let mut tokens_owned = aws_smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(value)) .peekable(); let tokens = &mut tokens_owned; aws_smithy_json::deserialize::token::expect_start_object(tokens.next())?; loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "detectorModelConfiguration" => { builder = builder.set_detector_model_configuration( crate::json_deser::deser_structure_crate_model_detector_model_configuration(tokens)? ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } if tokens.next().is_some() { return Err(aws_smithy_json::deserialize::Error::custom( "found more JSON tokens after completing parsing", )); } Ok(builder) } pub fn deser_operation_crate_operation_update_input( value: &[u8], mut builder: crate::output::update_input_output::Builder, ) -> Result<crate::output::update_input_output::Builder, aws_smithy_json::deserialize::Error> { let mut tokens_owned = aws_smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(value)) .peekable(); let tokens = &mut tokens_owned; aws_smithy_json::deserialize::token::expect_start_object(tokens.next())?; loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "inputConfiguration" => { builder = builder.set_input_configuration( crate::json_deser::deser_structure_crate_model_input_configuration( tokens, )?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } if tokens.next().is_some() { return Err(aws_smithy_json::deserialize::Error::custom( "found more JSON tokens after completing parsing", )); } Ok(builder) } pub fn or_empty_doc(data: &[u8]) -> &[u8] { if data.is_empty() { b"{}" } else { data } } pub fn deser_structure_crate_model_detector_model_configuration<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::DetectorModelConfiguration>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::DetectorModelConfiguration::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "detectorModelName" => { builder = builder.set_detector_model_name( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "detectorModelVersion" => { builder = builder.set_detector_model_version( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "detectorModelDescription" => { builder = builder.set_detector_model_description( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "detectorModelArn" => { builder = builder.set_detector_model_arn( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "roleArn" => { builder = builder.set_role_arn( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "creationTime" => { builder = builder.set_creation_time( aws_smithy_json::deserialize::token::expect_timestamp_or_null( tokens.next(), aws_smithy_types::instant::Format::EpochSeconds, )?, ); } "lastUpdateTime" => { builder = builder.set_last_update_time( aws_smithy_json::deserialize::token::expect_timestamp_or_null( tokens.next(), aws_smithy_types::instant::Format::EpochSeconds, )?, ); } "status" => { builder = builder.set_status( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| { s.to_unescaped().map(|u| { crate::model::DetectorModelVersionStatus::from( u.as_ref(), ) }) }) .transpose()?, ); } "key" => { builder = builder.set_key( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "evaluationMethod" => { builder = builder.set_evaluation_method( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| { s.to_unescaped().map(|u| { crate::model::EvaluationMethod::from(u.as_ref()) }) }) .transpose()?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_input_configuration<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::InputConfiguration>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::InputConfiguration::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "inputName" => { builder = builder.set_input_name( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "inputDescription" => { builder = builder.set_input_description( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "inputArn" => { builder = builder.set_input_arn( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "creationTime" => { builder = builder.set_creation_time( aws_smithy_json::deserialize::token::expect_timestamp_or_null( tokens.next(), aws_smithy_types::instant::Format::EpochSeconds, )?, ); } "lastUpdateTime" => { builder = builder.set_last_update_time( aws_smithy_json::deserialize::token::expect_timestamp_or_null( tokens.next(), aws_smithy_types::instant::Format::EpochSeconds, )?, ); } "status" => { builder = builder.set_status( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| { s.to_unescaped() .map(|u| crate::model::InputStatus::from(u.as_ref())) }) .transpose()?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_alarm_capabilities<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::AlarmCapabilities>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::AlarmCapabilities::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "initializationConfiguration" => { builder = builder.set_initialization_configuration( crate::json_deser::deser_structure_crate_model_initialization_configuration(tokens)? ); } "acknowledgeFlow" => { builder = builder.set_acknowledge_flow( crate::json_deser::deser_structure_crate_model_acknowledge_flow(tokens)? ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_alarm_event_actions<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::AlarmEventActions>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::AlarmEventActions::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "alarmActions" => { builder = builder.set_alarm_actions( crate::json_deser::deser_list_com_amazonaws_iotevents_alarm_actions(tokens)? ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_alarm_notification<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::AlarmNotification>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::AlarmNotification::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "notificationActions" => { builder = builder.set_notification_actions( crate::json_deser::deser_list_com_amazonaws_iotevents_notification_actions(tokens)? ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_alarm_rule<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::AlarmRule>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::AlarmRule::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "simpleRule" => { builder = builder.set_simple_rule( crate::json_deser::deser_structure_crate_model_simple_rule( tokens, )?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_detector_model<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::DetectorModel>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::DetectorModel::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "detectorModelDefinition" => { builder = builder.set_detector_model_definition( crate::json_deser::deser_structure_crate_model_detector_model_definition(tokens)? ); } "detectorModelConfiguration" => { builder = builder.set_detector_model_configuration( crate::json_deser::deser_structure_crate_model_detector_model_configuration(tokens)? ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_input<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::Input>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::Input::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "inputConfiguration" => { builder = builder.set_input_configuration( crate::json_deser::deser_structure_crate_model_input_configuration(tokens)? ); } "inputDefinition" => { builder = builder.set_input_definition( crate::json_deser::deser_structure_crate_model_input_definition(tokens)? ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_logging_options<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::LoggingOptions>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::LoggingOptions::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "roleArn" => { builder = builder.set_role_arn( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "level" => { builder = builder.set_level( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| { s.to_unescaped() .map(|u| crate::model::LoggingLevel::from(u.as_ref())) }) .transpose()?, ); } "enabled" => { builder = builder.set_enabled( aws_smithy_json::deserialize::token::expect_bool_or_null( tokens.next(), )?, ); } "detectorDebugOptions" => { builder = builder.set_detector_debug_options( crate::json_deser::deser_list_com_amazonaws_iotevents_detector_debug_options(tokens)? ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } #[allow(clippy::type_complexity, non_snake_case)] pub fn deser_list_com_amazonaws_iotevents_analysis_results<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<std::vec::Vec<crate::model::AnalysisResult>>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartArray { .. }) => { let mut items = Vec::new(); loop { match tokens.peek() { Some(Ok(aws_smithy_json::deserialize::Token::EndArray { .. })) => { tokens.next().transpose().unwrap(); break; } _ => { let value = crate::json_deser::deser_structure_crate_model_analysis_result(tokens)?; if let Some(value) = value { items.push(value); } } } } Ok(Some(items)) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start array or null", )), } } #[allow(clippy::type_complexity, non_snake_case)] pub fn deser_list_com_amazonaws_iotevents_alarm_model_summaries<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result< Option<std::vec::Vec<crate::model::AlarmModelSummary>>, aws_smithy_json::deserialize::Error, > where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartArray { .. }) => { let mut items = Vec::new(); loop { match tokens.peek() { Some(Ok(aws_smithy_json::deserialize::Token::EndArray { .. })) => { tokens.next().transpose().unwrap(); break; } _ => { let value = crate::json_deser::deser_structure_crate_model_alarm_model_summary( tokens, )?; if let Some(value) = value { items.push(value); } } } } Ok(Some(items)) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start array or null", )), } } #[allow(clippy::type_complexity, non_snake_case)] pub fn deser_list_com_amazonaws_iotevents_alarm_model_version_summaries<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result< Option<std::vec::Vec<crate::model::AlarmModelVersionSummary>>, aws_smithy_json::deserialize::Error, > where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartArray { .. }) => { let mut items = Vec::new(); loop { match tokens.peek() { Some(Ok(aws_smithy_json::deserialize::Token::EndArray { .. })) => { tokens.next().transpose().unwrap(); break; } _ => { let value = crate::json_deser::deser_structure_crate_model_alarm_model_version_summary(tokens)? ; if let Some(value) = value { items.push(value); } } } } Ok(Some(items)) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start array or null", )), } } #[allow(clippy::type_complexity, non_snake_case)] pub fn deser_list_com_amazonaws_iotevents_detector_model_summaries<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result< Option<std::vec::Vec<crate::model::DetectorModelSummary>>, aws_smithy_json::deserialize::Error, > where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartArray { .. }) => { let mut items = Vec::new(); loop { match tokens.peek() { Some(Ok(aws_smithy_json::deserialize::Token::EndArray { .. })) => { tokens.next().transpose().unwrap(); break; } _ => { let value = crate::json_deser::deser_structure_crate_model_detector_model_summary( tokens, )?; if let Some(value) = value { items.push(value); } } } } Ok(Some(items)) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start array or null", )), } } #[allow(clippy::type_complexity, non_snake_case)] pub fn deser_list_com_amazonaws_iotevents_detector_model_version_summaries<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result< Option<std::vec::Vec<crate::model::DetectorModelVersionSummary>>, aws_smithy_json::deserialize::Error, > where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartArray { .. }) => { let mut items = Vec::new(); loop { match tokens.peek() { Some(Ok(aws_smithy_json::deserialize::Token::EndArray { .. })) => { tokens.next().transpose().unwrap(); break; } _ => { let value = crate::json_deser::deser_structure_crate_model_detector_model_version_summary(tokens)? ; if let Some(value) = value { items.push(value); } } } } Ok(Some(items)) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start array or null", )), } } #[allow(clippy::type_complexity, non_snake_case)] pub fn deser_list_com_amazonaws_iotevents_routed_resources<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<std::vec::Vec<crate::model::RoutedResource>>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartArray { .. }) => { let mut items = Vec::new(); loop { match tokens.peek() { Some(Ok(aws_smithy_json::deserialize::Token::EndArray { .. })) => { tokens.next().transpose().unwrap(); break; } _ => { let value = crate::json_deser::deser_structure_crate_model_routed_resource(tokens)?; if let Some(value) = value { items.push(value); } } } } Ok(Some(items)) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start array or null", )), } } #[allow(clippy::type_complexity, non_snake_case)] pub fn deser_list_com_amazonaws_iotevents_input_summaries<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<std::vec::Vec<crate::model::InputSummary>>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartArray { .. }) => { let mut items = Vec::new(); loop { match tokens.peek() { Some(Ok(aws_smithy_json::deserialize::Token::EndArray { .. })) => { tokens.next().transpose().unwrap(); break; } _ => { let value = crate::json_deser::deser_structure_crate_model_input_summary(tokens)?; if let Some(value) = value { items.push(value); } } } } Ok(Some(items)) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start array or null", )), } } #[allow(clippy::type_complexity, non_snake_case)] pub fn deser_list_com_amazonaws_iotevents_tags<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<std::vec::Vec<crate::model::Tag>>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartArray { .. }) => { let mut items = Vec::new(); loop { match tokens.peek() { Some(Ok(aws_smithy_json::deserialize::Token::EndArray { .. })) => { tokens.next().transpose().unwrap(); break; } _ => { let value = crate::json_deser::deser_structure_crate_model_tag(tokens)?; if let Some(value) = value { items.push(value); } } } } Ok(Some(items)) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start array or null", )), } } pub fn deser_structure_crate_model_initialization_configuration<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::InitializationConfiguration>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::InitializationConfiguration::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "disabledOnInitialization" => { builder = builder.set_disabled_on_initialization( aws_smithy_json::deserialize::token::expect_bool_or_null( tokens.next(), )?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_acknowledge_flow<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::AcknowledgeFlow>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::AcknowledgeFlow::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "enabled" => { builder = builder.set_enabled( aws_smithy_json::deserialize::token::expect_bool_or_null( tokens.next(), )?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } #[allow(clippy::type_complexity, non_snake_case)] pub fn deser_list_com_amazonaws_iotevents_alarm_actions<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<std::vec::Vec<crate::model::AlarmAction>>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartArray { .. }) => { let mut items = Vec::new(); loop { match tokens.peek() { Some(Ok(aws_smithy_json::deserialize::Token::EndArray { .. })) => { tokens.next().transpose().unwrap(); break; } _ => { let value = crate::json_deser::deser_structure_crate_model_alarm_action(tokens)?; if let Some(value) = value { items.push(value); } } } } Ok(Some(items)) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start array or null", )), } } #[allow(clippy::type_complexity, non_snake_case)] pub fn deser_list_com_amazonaws_iotevents_notification_actions<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result< Option<std::vec::Vec<crate::model::NotificationAction>>, aws_smithy_json::deserialize::Error, > where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartArray { .. }) => { let mut items = Vec::new(); loop { match tokens.peek() { Some(Ok(aws_smithy_json::deserialize::Token::EndArray { .. })) => { tokens.next().transpose().unwrap(); break; } _ => { let value = crate::json_deser::deser_structure_crate_model_notification_action( tokens, )?; if let Some(value) = value { items.push(value); } } } } Ok(Some(items)) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start array or null", )), } } pub fn deser_structure_crate_model_simple_rule<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::SimpleRule>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::SimpleRule::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "inputProperty" => { builder = builder.set_input_property( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "comparisonOperator" => { builder = builder.set_comparison_operator( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| { s.to_unescaped().map(|u| { crate::model::ComparisonOperator::from(u.as_ref()) }) }) .transpose()?, ); } "threshold" => { builder = builder.set_threshold( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_detector_model_definition<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::DetectorModelDefinition>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::DetectorModelDefinition::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "states" => { builder = builder.set_states( crate::json_deser::deser_list_com_amazonaws_iotevents_states( tokens, )?, ); } "initialStateName" => { builder = builder.set_initial_state_name( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_input_definition<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::InputDefinition>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::InputDefinition::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "attributes" => { builder = builder.set_attributes( crate::json_deser::deser_list_com_amazonaws_iotevents_attributes(tokens)? ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } #[allow(clippy::type_complexity, non_snake_case)] pub fn deser_list_com_amazonaws_iotevents_detector_debug_options<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result< Option<std::vec::Vec<crate::model::DetectorDebugOption>>, aws_smithy_json::deserialize::Error, > where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartArray { .. }) => { let mut items = Vec::new(); loop { match tokens.peek() { Some(Ok(aws_smithy_json::deserialize::Token::EndArray { .. })) => { tokens.next().transpose().unwrap(); break; } _ => { let value = crate::json_deser::deser_structure_crate_model_detector_debug_option( tokens, )?; if let Some(value) = value { items.push(value); } } } } Ok(Some(items)) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start array or null", )), } } pub fn deser_structure_crate_model_analysis_result<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::AnalysisResult>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::AnalysisResult::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "type" => { builder = builder.set_type( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "level" => { builder = builder.set_level( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| { s.to_unescaped().map(|u| { crate::model::AnalysisResultLevel::from(u.as_ref()) }) }) .transpose()?, ); } "message" => { builder = builder.set_message( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "locations" => { builder = builder.set_locations( crate::json_deser::deser_list_com_amazonaws_iotevents_analysis_result_locations(tokens)? ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_alarm_model_summary<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::AlarmModelSummary>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::AlarmModelSummary::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "creationTime" => { builder = builder.set_creation_time( aws_smithy_json::deserialize::token::expect_timestamp_or_null( tokens.next(), aws_smithy_types::instant::Format::EpochSeconds, )?, ); } "alarmModelDescription" => { builder = builder.set_alarm_model_description( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "alarmModelName" => { builder = builder.set_alarm_model_name( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_alarm_model_version_summary<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::AlarmModelVersionSummary>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::AlarmModelVersionSummary::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "alarmModelName" => { builder = builder.set_alarm_model_name( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "alarmModelArn" => { builder = builder.set_alarm_model_arn( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "alarmModelVersion" => { builder = builder.set_alarm_model_version( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "roleArn" => { builder = builder.set_role_arn( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "creationTime" => { builder = builder.set_creation_time( aws_smithy_json::deserialize::token::expect_timestamp_or_null( tokens.next(), aws_smithy_types::instant::Format::EpochSeconds, )?, ); } "lastUpdateTime" => { builder = builder.set_last_update_time( aws_smithy_json::deserialize::token::expect_timestamp_or_null( tokens.next(), aws_smithy_types::instant::Format::EpochSeconds, )?, ); } "status" => { builder = builder.set_status( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| { s.to_unescaped().map(|u| { crate::model::AlarmModelVersionStatus::from(u.as_ref()) }) }) .transpose()?, ); } "statusMessage" => { builder = builder.set_status_message( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_detector_model_summary<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::DetectorModelSummary>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::DetectorModelSummary::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "detectorModelName" => { builder = builder.set_detector_model_name( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "detectorModelDescription" => { builder = builder.set_detector_model_description( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "creationTime" => { builder = builder.set_creation_time( aws_smithy_json::deserialize::token::expect_timestamp_or_null( tokens.next(), aws_smithy_types::instant::Format::EpochSeconds, )?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_detector_model_version_summary<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::DetectorModelVersionSummary>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::DetectorModelVersionSummary::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "detectorModelName" => { builder = builder.set_detector_model_name( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "detectorModelVersion" => { builder = builder.set_detector_model_version( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "detectorModelArn" => { builder = builder.set_detector_model_arn( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "roleArn" => { builder = builder.set_role_arn( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "creationTime" => { builder = builder.set_creation_time( aws_smithy_json::deserialize::token::expect_timestamp_or_null( tokens.next(), aws_smithy_types::instant::Format::EpochSeconds, )?, ); } "lastUpdateTime" => { builder = builder.set_last_update_time( aws_smithy_json::deserialize::token::expect_timestamp_or_null( tokens.next(), aws_smithy_types::instant::Format::EpochSeconds, )?, ); } "status" => { builder = builder.set_status( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| { s.to_unescaped().map(|u| { crate::model::DetectorModelVersionStatus::from( u.as_ref(), ) }) }) .transpose()?, ); } "evaluationMethod" => { builder = builder.set_evaluation_method( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| { s.to_unescaped().map(|u| { crate::model::EvaluationMethod::from(u.as_ref()) }) }) .transpose()?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_routed_resource<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::RoutedResource>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::RoutedResource::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "name" => { builder = builder.set_name( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "arn" => { builder = builder.set_arn( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_input_summary<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::InputSummary>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::InputSummary::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "inputName" => { builder = builder.set_input_name( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "inputDescription" => { builder = builder.set_input_description( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "inputArn" => { builder = builder.set_input_arn( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "creationTime" => { builder = builder.set_creation_time( aws_smithy_json::deserialize::token::expect_timestamp_or_null( tokens.next(), aws_smithy_types::instant::Format::EpochSeconds, )?, ); } "lastUpdateTime" => { builder = builder.set_last_update_time( aws_smithy_json::deserialize::token::expect_timestamp_or_null( tokens.next(), aws_smithy_types::instant::Format::EpochSeconds, )?, ); } "status" => { builder = builder.set_status( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| { s.to_unescaped() .map(|u| crate::model::InputStatus::from(u.as_ref())) }) .transpose()?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_tag<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::Tag>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::Tag::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "key" => { builder = builder.set_key( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "value" => { builder = builder.set_value( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_alarm_action<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::AlarmAction>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::AlarmAction::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "sns" => { builder = builder.set_sns( crate::json_deser::deser_structure_crate_model_sns_topic_publish_action(tokens)? ); } "iotTopicPublish" => { builder = builder.set_iot_topic_publish( crate::json_deser::deser_structure_crate_model_iot_topic_publish_action(tokens)? ); } "lambda" => { builder = builder.set_lambda( crate::json_deser::deser_structure_crate_model_lambda_action( tokens, )?, ); } "iotEvents" => { builder = builder.set_iot_events( crate::json_deser::deser_structure_crate_model_iot_events_action(tokens)? ); } "sqs" => { builder = builder.set_sqs( crate::json_deser::deser_structure_crate_model_sqs_action( tokens, )?, ); } "firehose" => { builder = builder.set_firehose( crate::json_deser::deser_structure_crate_model_firehose_action( tokens, )?, ); } "dynamoDB" => { builder = builder.set_dynamo_db( crate::json_deser::deser_structure_crate_model_dynamo_db_action(tokens)? ); } "dynamoDBv2" => { builder = builder.set_dynamo_d_bv2( crate::json_deser::deser_structure_crate_model_dynamo_d_bv2_action(tokens)? ); } "iotSiteWise" => { builder = builder.set_iot_site_wise( crate::json_deser::deser_structure_crate_model_iot_site_wise_action(tokens)? ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_notification_action<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::NotificationAction>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::NotificationAction::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "action" => { builder = builder.set_action( crate::json_deser::deser_structure_crate_model_notification_target_actions(tokens)? ); } "smsConfigurations" => { builder = builder.set_sms_configurations( crate::json_deser::deser_list_com_amazonaws_iotevents_sms_configurations(tokens)? ); } "emailConfigurations" => { builder = builder.set_email_configurations( crate::json_deser::deser_list_com_amazonaws_iotevents_email_configurations(tokens)? ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } #[allow(clippy::type_complexity, non_snake_case)] pub fn deser_list_com_amazonaws_iotevents_states<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<std::vec::Vec<crate::model::State>>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartArray { .. }) => { let mut items = Vec::new(); loop { match tokens.peek() { Some(Ok(aws_smithy_json::deserialize::Token::EndArray { .. })) => { tokens.next().transpose().unwrap(); break; } _ => { let value = crate::json_deser::deser_structure_crate_model_state(tokens)?; if let Some(value) = value { items.push(value); } } } } Ok(Some(items)) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start array or null", )), } } #[allow(clippy::type_complexity, non_snake_case)] pub fn deser_list_com_amazonaws_iotevents_attributes<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<std::vec::Vec<crate::model::Attribute>>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartArray { .. }) => { let mut items = Vec::new(); loop { match tokens.peek() { Some(Ok(aws_smithy_json::deserialize::Token::EndArray { .. })) => { tokens.next().transpose().unwrap(); break; } _ => { let value = crate::json_deser::deser_structure_crate_model_attribute(tokens)?; if let Some(value) = value { items.push(value); } } } } Ok(Some(items)) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start array or null", )), } } pub fn deser_structure_crate_model_detector_debug_option<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::DetectorDebugOption>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::DetectorDebugOption::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "detectorModelName" => { builder = builder.set_detector_model_name( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "keyValue" => { builder = builder.set_key_value( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } #[allow(clippy::type_complexity, non_snake_case)] pub fn deser_list_com_amazonaws_iotevents_analysis_result_locations<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result< Option<std::vec::Vec<crate::model::AnalysisResultLocation>>, aws_smithy_json::deserialize::Error, > where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartArray { .. }) => { let mut items = Vec::new(); loop { match tokens.peek() { Some(Ok(aws_smithy_json::deserialize::Token::EndArray { .. })) => { tokens.next().transpose().unwrap(); break; } _ => { let value = crate::json_deser::deser_structure_crate_model_analysis_result_location(tokens)? ; if let Some(value) = value { items.push(value); } } } } Ok(Some(items)) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start array or null", )), } } pub fn deser_structure_crate_model_sns_topic_publish_action<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::SnsTopicPublishAction>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::SnsTopicPublishAction::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "targetArn" => { builder = builder.set_target_arn( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "payload" => { builder = builder.set_payload( crate::json_deser::deser_structure_crate_model_payload(tokens)?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_iot_topic_publish_action<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::IotTopicPublishAction>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::IotTopicPublishAction::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "mqttTopic" => { builder = builder.set_mqtt_topic( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "payload" => { builder = builder.set_payload( crate::json_deser::deser_structure_crate_model_payload(tokens)?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_lambda_action<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::LambdaAction>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::LambdaAction::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "functionArn" => { builder = builder.set_function_arn( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "payload" => { builder = builder.set_payload( crate::json_deser::deser_structure_crate_model_payload(tokens)?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_iot_events_action<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::IotEventsAction>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::IotEventsAction::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "inputName" => { builder = builder.set_input_name( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "payload" => { builder = builder.set_payload( crate::json_deser::deser_structure_crate_model_payload(tokens)?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_sqs_action<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::SqsAction>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::SqsAction::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "queueUrl" => { builder = builder.set_queue_url( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "useBase64" => { builder = builder.set_use_base64( aws_smithy_json::deserialize::token::expect_bool_or_null( tokens.next(), )?, ); } "payload" => { builder = builder.set_payload( crate::json_deser::deser_structure_crate_model_payload(tokens)?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_firehose_action<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::FirehoseAction>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::FirehoseAction::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "deliveryStreamName" => { builder = builder.set_delivery_stream_name( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "separator" => { builder = builder.set_separator( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "payload" => { builder = builder.set_payload( crate::json_deser::deser_structure_crate_model_payload(tokens)?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_dynamo_db_action<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::DynamoDbAction>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::DynamoDbAction::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "hashKeyType" => { builder = builder.set_hash_key_type( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "hashKeyField" => { builder = builder.set_hash_key_field( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "hashKeyValue" => { builder = builder.set_hash_key_value( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "rangeKeyType" => { builder = builder.set_range_key_type( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "rangeKeyField" => { builder = builder.set_range_key_field( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "rangeKeyValue" => { builder = builder.set_range_key_value( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "operation" => { builder = builder.set_operation( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "payloadField" => { builder = builder.set_payload_field( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "tableName" => { builder = builder.set_table_name( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "payload" => { builder = builder.set_payload( crate::json_deser::deser_structure_crate_model_payload(tokens)?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_dynamo_d_bv2_action<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::DynamoDBv2Action>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::DynamoDBv2Action::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "tableName" => { builder = builder.set_table_name( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "payload" => { builder = builder.set_payload( crate::json_deser::deser_structure_crate_model_payload(tokens)?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_iot_site_wise_action<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::IotSiteWiseAction>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::IotSiteWiseAction::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "entryId" => { builder = builder.set_entry_id( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "assetId" => { builder = builder.set_asset_id( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "propertyId" => { builder = builder.set_property_id( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "propertyAlias" => { builder = builder.set_property_alias( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "propertyValue" => { builder = builder.set_property_value( crate::json_deser::deser_structure_crate_model_asset_property_value(tokens)? ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_notification_target_actions<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::NotificationTargetActions>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::NotificationTargetActions::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "lambdaAction" => { builder = builder.set_lambda_action( crate::json_deser::deser_structure_crate_model_lambda_action( tokens, )?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } #[allow(clippy::type_complexity, non_snake_case)] pub fn deser_list_com_amazonaws_iotevents_sms_configurations<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result< Option<std::vec::Vec<crate::model::SmsConfiguration>>, aws_smithy_json::deserialize::Error, > where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartArray { .. }) => { let mut items = Vec::new(); loop { match tokens.peek() { Some(Ok(aws_smithy_json::deserialize::Token::EndArray { .. })) => { tokens.next().transpose().unwrap(); break; } _ => { let value = crate::json_deser::deser_structure_crate_model_sms_configuration( tokens, )?; if let Some(value) = value { items.push(value); } } } } Ok(Some(items)) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start array or null", )), } } #[allow(clippy::type_complexity, non_snake_case)] pub fn deser_list_com_amazonaws_iotevents_email_configurations<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result< Option<std::vec::Vec<crate::model::EmailConfiguration>>, aws_smithy_json::deserialize::Error, > where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartArray { .. }) => { let mut items = Vec::new(); loop { match tokens.peek() { Some(Ok(aws_smithy_json::deserialize::Token::EndArray { .. })) => { tokens.next().transpose().unwrap(); break; } _ => { let value = crate::json_deser::deser_structure_crate_model_email_configuration( tokens, )?; if let Some(value) = value { items.push(value); } } } } Ok(Some(items)) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start array or null", )), } } pub fn deser_structure_crate_model_state<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::State>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::State::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "stateName" => { builder = builder.set_state_name( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "onInput" => { builder = builder.set_on_input( crate::json_deser::deser_structure_crate_model_on_input_lifecycle(tokens)? ); } "onEnter" => { builder = builder.set_on_enter( crate::json_deser::deser_structure_crate_model_on_enter_lifecycle(tokens)? ); } "onExit" => { builder = builder.set_on_exit( crate::json_deser::deser_structure_crate_model_on_exit_lifecycle(tokens)? ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_attribute<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::Attribute>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::Attribute::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "jsonPath" => { builder = builder.set_json_path( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_analysis_result_location<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::AnalysisResultLocation>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::AnalysisResultLocation::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "path" => { builder = builder.set_path( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_payload<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::Payload>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::Payload::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "contentExpression" => { builder = builder.set_content_expression( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "type" => { builder = builder.set_type( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| { s.to_unescaped() .map(|u| crate::model::PayloadType::from(u.as_ref())) }) .transpose()?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_asset_property_value<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::AssetPropertyValue>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::AssetPropertyValue::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "value" => { builder = builder.set_value( crate::json_deser::deser_structure_crate_model_asset_property_variant(tokens)? ); } "timestamp" => { builder = builder.set_timestamp( crate::json_deser::deser_structure_crate_model_asset_property_timestamp(tokens)? ); } "quality" => { builder = builder.set_quality( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_sms_configuration<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::SmsConfiguration>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::SmsConfiguration::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "senderId" => { builder = builder.set_sender_id( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "additionalMessage" => { builder = builder.set_additional_message( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "recipients" => { builder = builder.set_recipients( crate::json_deser::deser_list_com_amazonaws_iotevents_recipient_details(tokens)? ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_email_configuration<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::EmailConfiguration>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::EmailConfiguration::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "from" => { builder = builder.set_from( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "content" => { builder = builder.set_content( crate::json_deser::deser_structure_crate_model_email_content( tokens, )?, ); } "recipients" => { builder = builder.set_recipients( crate::json_deser::deser_structure_crate_model_email_recipients(tokens)? ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_on_input_lifecycle<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::OnInputLifecycle>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::OnInputLifecycle::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "events" => { builder = builder.set_events( crate::json_deser::deser_list_com_amazonaws_iotevents_events( tokens, )?, ); } "transitionEvents" => { builder = builder.set_transition_events( crate::json_deser::deser_list_com_amazonaws_iotevents_transition_events(tokens)? ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_on_enter_lifecycle<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::OnEnterLifecycle>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::OnEnterLifecycle::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "events" => { builder = builder.set_events( crate::json_deser::deser_list_com_amazonaws_iotevents_events( tokens, )?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_on_exit_lifecycle<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::OnExitLifecycle>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::OnExitLifecycle::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "events" => { builder = builder.set_events( crate::json_deser::deser_list_com_amazonaws_iotevents_events( tokens, )?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_asset_property_variant<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::AssetPropertyVariant>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::AssetPropertyVariant::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "stringValue" => { builder = builder.set_string_value( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "integerValue" => { builder = builder.set_integer_value( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "doubleValue" => { builder = builder.set_double_value( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "booleanValue" => { builder = builder.set_boolean_value( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_asset_property_timestamp<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::AssetPropertyTimestamp>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::AssetPropertyTimestamp::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "timeInSeconds" => { builder = builder.set_time_in_seconds( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "offsetInNanos" => { builder = builder.set_offset_in_nanos( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } #[allow(clippy::type_complexity, non_snake_case)] pub fn deser_list_com_amazonaws_iotevents_recipient_details<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<std::vec::Vec<crate::model::RecipientDetail>>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartArray { .. }) => { let mut items = Vec::new(); loop { match tokens.peek() { Some(Ok(aws_smithy_json::deserialize::Token::EndArray { .. })) => { tokens.next().transpose().unwrap(); break; } _ => { let value = crate::json_deser::deser_structure_crate_model_recipient_detail( tokens, )?; if let Some(value) = value { items.push(value); } } } } Ok(Some(items)) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start array or null", )), } } pub fn deser_structure_crate_model_email_content<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::EmailContent>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::EmailContent::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "subject" => { builder = builder.set_subject( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "additionalMessage" => { builder = builder.set_additional_message( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_email_recipients<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::EmailRecipients>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::EmailRecipients::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "to" => { builder = builder.set_to( crate::json_deser::deser_list_com_amazonaws_iotevents_recipient_details(tokens)? ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } #[allow(clippy::type_complexity, non_snake_case)] pub fn deser_list_com_amazonaws_iotevents_events<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<std::vec::Vec<crate::model::Event>>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartArray { .. }) => { let mut items = Vec::new(); loop { match tokens.peek() { Some(Ok(aws_smithy_json::deserialize::Token::EndArray { .. })) => { tokens.next().transpose().unwrap(); break; } _ => { let value = crate::json_deser::deser_structure_crate_model_event(tokens)?; if let Some(value) = value { items.push(value); } } } } Ok(Some(items)) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start array or null", )), } } #[allow(clippy::type_complexity, non_snake_case)] pub fn deser_list_com_amazonaws_iotevents_transition_events<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<std::vec::Vec<crate::model::TransitionEvent>>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartArray { .. }) => { let mut items = Vec::new(); loop { match tokens.peek() { Some(Ok(aws_smithy_json::deserialize::Token::EndArray { .. })) => { tokens.next().transpose().unwrap(); break; } _ => { let value = crate::json_deser::deser_structure_crate_model_transition_event( tokens, )?; if let Some(value) = value { items.push(value); } } } } Ok(Some(items)) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start array or null", )), } } pub fn deser_structure_crate_model_recipient_detail<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::RecipientDetail>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::RecipientDetail::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "ssoIdentity" => { builder = builder.set_sso_identity( crate::json_deser::deser_structure_crate_model_sso_identity( tokens, )?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_event<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::Event>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::Event::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "eventName" => { builder = builder.set_event_name( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "condition" => { builder = builder.set_condition( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "actions" => { builder = builder.set_actions( crate::json_deser::deser_list_com_amazonaws_iotevents_actions( tokens, )?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_transition_event<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::TransitionEvent>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::TransitionEvent::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "eventName" => { builder = builder.set_event_name( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "condition" => { builder = builder.set_condition( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "actions" => { builder = builder.set_actions( crate::json_deser::deser_list_com_amazonaws_iotevents_actions( tokens, )?, ); } "nextState" => { builder = builder.set_next_state( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_sso_identity<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::SsoIdentity>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::SsoIdentity::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "identityStoreId" => { builder = builder.set_identity_store_id( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "userId" => { builder = builder.set_user_id( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } #[allow(clippy::type_complexity, non_snake_case)] pub fn deser_list_com_amazonaws_iotevents_actions<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<std::vec::Vec<crate::model::Action>>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartArray { .. }) => { let mut items = Vec::new(); loop { match tokens.peek() { Some(Ok(aws_smithy_json::deserialize::Token::EndArray { .. })) => { tokens.next().transpose().unwrap(); break; } _ => { let value = crate::json_deser::deser_structure_crate_model_action(tokens)?; if let Some(value) = value { items.push(value); } } } } Ok(Some(items)) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start array or null", )), } } pub fn deser_structure_crate_model_action<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::Action>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::Action::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "setVariable" => { builder = builder.set_set_variable( crate::json_deser::deser_structure_crate_model_set_variable_action(tokens)? ); } "sns" => { builder = builder.set_sns( crate::json_deser::deser_structure_crate_model_sns_topic_publish_action(tokens)? ); } "iotTopicPublish" => { builder = builder.set_iot_topic_publish( crate::json_deser::deser_structure_crate_model_iot_topic_publish_action(tokens)? ); } "setTimer" => { builder = builder.set_set_timer( crate::json_deser::deser_structure_crate_model_set_timer_action(tokens)? ); } "clearTimer" => { builder = builder.set_clear_timer( crate::json_deser::deser_structure_crate_model_clear_timer_action(tokens)? ); } "resetTimer" => { builder = builder.set_reset_timer( crate::json_deser::deser_structure_crate_model_reset_timer_action(tokens)? ); } "lambda" => { builder = builder.set_lambda( crate::json_deser::deser_structure_crate_model_lambda_action( tokens, )?, ); } "iotEvents" => { builder = builder.set_iot_events( crate::json_deser::deser_structure_crate_model_iot_events_action(tokens)? ); } "sqs" => { builder = builder.set_sqs( crate::json_deser::deser_structure_crate_model_sqs_action( tokens, )?, ); } "firehose" => { builder = builder.set_firehose( crate::json_deser::deser_structure_crate_model_firehose_action( tokens, )?, ); } "dynamoDB" => { builder = builder.set_dynamo_db( crate::json_deser::deser_structure_crate_model_dynamo_db_action(tokens)? ); } "dynamoDBv2" => { builder = builder.set_dynamo_d_bv2( crate::json_deser::deser_structure_crate_model_dynamo_d_bv2_action(tokens)? ); } "iotSiteWise" => { builder = builder.set_iot_site_wise( crate::json_deser::deser_structure_crate_model_iot_site_wise_action(tokens)? ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_set_variable_action<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::SetVariableAction>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::SetVariableAction::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "variableName" => { builder = builder.set_variable_name( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "value" => { builder = builder.set_value( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_set_timer_action<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::SetTimerAction>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::SetTimerAction::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "timerName" => { builder = builder.set_timer_name( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "seconds" => { builder = builder.set_seconds( aws_smithy_json::deserialize::token::expect_number_or_null( tokens.next(), )? .map(|v| v.to_i32()), ); } "durationExpression" => { builder = builder.set_duration_expression( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_clear_timer_action<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::ClearTimerAction>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::ClearTimerAction::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "timerName" => { builder = builder.set_timer_name( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_reset_timer_action<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::ResetTimerAction>, aws_smithy_json::deserialize::Error> where I: Iterator< Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::ResetTimerAction::builder(); loop { match tokens.next().transpose()? { Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break, Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "timerName" => { builder = builder.set_timer_name( aws_smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } _ => aws_smithy_json::deserialize::token::skip_value(tokens)?, } } other => { return Err(aws_smithy_json::deserialize::Error::custom(format!( "expected object key or end object, found: {:?}", other ))) } } } Ok(Some(builder.build())) } _ => Err(aws_smithy_json::deserialize::Error::custom( "expected start object or null", )), } }
45.001375
124
0.438992
e2436e7312ca5a4d2ebcf9ea8fc52815124bcad0
4,014
//! The `nets` module defines an abstract `Network.` The `Network` trait is a highly-abstracted //! representation of the relationships between types in a UTXO network. Concrete implementations //! for various Bitcoin networks are found in the `bitcoin` crate. use crate::{ builder::TxBuilder, enc::AddressEncoder, ser::ByteFormat, types::tx::{Input, Output, RecipientIdentifier, Transaction}, }; /// A Network describes a possible UTXO network. It is primarily a collection of types with /// enforced relationships, but also provides convenient access the the transaction builder, /// the address encoder, and other network-associated functionality. /// /// Because we separate some commonly conflated functionality (e.g. output scripts and addresses) /// we provide Networks to enforce relationships between them. This is why the `Network` trait's /// associated types are complex. It exists to guarantee consistency of associated types across a /// large number of disparate elements. pub trait Network { /// A type handling the network's address semantics. This will typically represent some /// predicate on the transaction. It is used by both the `Encoder` and the `Builder`. type Address; /// A type representing the in-protocol recipient. This is usually different from the /// Address type. type RecipientIdentifier: RecipientIdentifier; /// An error type that will be used by the `Encoder`, and returned by the passthrough /// `encode_address` and `decode_address` functions type Error; /// An `Encoder` that uses the `Address` and `Error` types above. This `Encoder` must /// implement `AddressEncoder`. It handles translating the `Address` type to the networks /// `RecipientIdentifier` type. type Encoder: AddressEncoder< Address = Self::Address, Error = Self::Error, RecipientIdentifier = Self::RecipientIdentifier, >; /// A transaction Input type. This type is used within the `Transaction` and specificies UTXOs /// being spent by the transaction. type TxIn: Input + ByteFormat; /// A transaction Output type. This type is used within the `Transaction` and specificies /// UTXOs being consumed by the transaction. type TxOut: Output<RecipientIdentifier = Self::RecipientIdentifier> + ByteFormat; /// A Transaction type that uses the `TxIn` and `TxOut`. type Tx: Transaction<TxIn = Self::TxIn, TxOut = Self::TxOut>; /// A transaction Builder that uses the `Encoder` and `Transaction` types defined earlier. /// The builder is returned by `Network::tx_builder()`, and provides a convenient interface /// for transaction construction. type Builder: TxBuilder<Encoder = Self::Encoder, Transaction = Self::Tx>; /// Returns a new instance of the associated transaction builder. fn tx_builder() -> Self::Builder { Self::Builder::new() } /// Instantiate a builder from a tx object fn builder_from_tx_ref(tx: &Self::Tx) -> Self::Builder { Self::Builder::from_tx_ref(tx) } /// Instantiate a builder from a tx object fn builder_from_tx(tx: Self::Tx) -> Self::Builder { Self::Builder::from_tx(tx) } /// Instantiate a builder from a hex-serialized transaction fn builder_from_hex(hex_tx: &str) -> Result<Self::Builder, <Self::Tx as Transaction>::TxError> { Self::Builder::from_hex_tx(hex_tx) } /// Encode an address using the network's `Address` semantics fn encode_address(a: &Self::RecipientIdentifier) -> Result<Self::Address, Self::Error> { Self::Encoder::encode_address(a) } /// Decode an address using the network's `Address` semantics fn decode_address(addr: &Self::Address) -> Self::RecipientIdentifier { Self::Encoder::decode_address(addr) } /// Attempt to convert a string into an `Address`. fn string_to_address(s: &str) -> Result<Self::Address, Self::Error> { Self::Encoder::string_to_address(s) } }
43.630435
100
0.700548
26c7e40665c8b84150670c0c175ae309f5971b1c
14,812
use crate::geom::Cell; use crate::geom::CellToNdIndex; use crate::geom::Point; use crate::geom::PointCloud; use crate::map::ExpandableGridMap; use crate::map::GridMap; use crate::map::NoiseModel; use crate::math; use crate::ray_caster; pub const DEFAULT_HIT_PROBABILITY: f32 = 0.7; // 0.85 in logodds pub const DEFAULT_MISS_PROBABILITY: f32 = 0.4; // -0.4 in logodds pub const DEFAULT_MIN_PROBABILITY: f32 = 0.1192; // -2.0 in logodds pub const DEFAULT_MAX_PROBABILITY: f32 = 0.971; // 3.5 in logodds pub const DEFAULT_OCCUPIED_THRESHOLD: f32 = 0.5; // 0.0 in logodds pub const DEFAULT_MAX_RANGE: f32 = 60.0; // meters /// An approximate noise model for lidar point clouds. /// Values are stored in logodds to prevent semi-expensive log calculations for each cell update. pub struct LidarNoiseModel { pub hit_probability_logodds: f32, pub miss_probability_logodds: f32, pub min_probability_logodds: f32, pub max_probability_logodds: f32, pub occupied_threshold_logodds: f32, pub max_range: f32, } impl LidarNoiseModel { pub fn new( hit_probability: f32, miss_probability: f32, min_probability: f32, max_probability: f32, occupied_threshold: f32, max_range: f32, ) -> Self { assert!(hit_probability >= 0.0 && hit_probability <= 1.0); assert!(miss_probability >= 0.0 && miss_probability <= 1.0); assert!(min_probability >= 0.0 && min_probability <= 1.0); assert!(max_probability >= 0.0 && max_probability <= 1.0); assert!(occupied_threshold >= 0.0 && occupied_threshold <= 1.0); LidarNoiseModel { hit_probability_logodds: math::logodds_from_probability(hit_probability), miss_probability_logodds: math::logodds_from_probability(miss_probability), min_probability_logodds: math::logodds_from_probability(min_probability), max_probability_logodds: math::logodds_from_probability(max_probability), occupied_threshold_logodds: math::logodds_from_probability(occupied_threshold), max_range: max_range, } } pub fn default() -> Self { LidarNoiseModel::new( DEFAULT_HIT_PROBABILITY, DEFAULT_MISS_PROBABILITY, DEFAULT_MIN_PROBABILITY, DEFAULT_MAX_PROBABILITY, DEFAULT_OCCUPIED_THRESHOLD, DEFAULT_MAX_RANGE, ) } pub fn integrate_hit<NaD, NdD>( &self, grid_map: &mut impl ExpandableGridMap<f32, NaD, NdD>, cell: &Cell<NaD>, ) where NaD: na::DimName, NdD: nd::Dimension, Cell<NaD>: CellToNdIndex<NaD, NdD>, na::DefaultAllocator: na::allocator::Allocator<f32, NaD> + na::allocator::Allocator<isize, NaD>, { grid_map.set( &cell, (grid_map.get(&cell) + self.hit_probability_logodds).min(self.max_probability_logodds), ); } pub fn integrate_miss<NaD, NdD>( &self, grid_map: &mut impl GridMap<f32, NaD, NdD>, cell: &Cell<NaD>, ) where NaD: na::DimName, NdD: nd::Dimension, Cell<NaD>: CellToNdIndex<NaD, NdD>, na::DefaultAllocator: na::allocator::Allocator<f32, NaD> + na::allocator::Allocator<isize, NaD>, { grid_map.set( &cell, (grid_map.get(&cell) + self.miss_probability_logodds).max(self.min_probability_logodds), ); } } impl<NaD, NdD> NoiseModel<NaD, NdD> for LidarNoiseModel where NaD: na::DimName + std::hash::Hash, NdD: nd::Dimension, Cell<NaD>: CellToNdIndex<NaD, NdD>, <na::DefaultAllocator as na::allocator::Allocator<isize, NaD>>::Buffer: std::hash::Hash, na::DefaultAllocator: na::allocator::Allocator<f32, NaD> + na::allocator::Allocator<isize, NaD>, { fn default_noise_model() -> Self { LidarNoiseModel::default() } #[inline] fn default_cell_value(&self) -> f32 { 0.0 } fn integrate_point_cloud( &self, grid_map: &mut impl ExpandableGridMap<f32, NaD, NdD>, origin: &Point<NaD>, point_cloud: &PointCloud<NaD>, ) { grid_map.expand_bounds(&point_cloud.bounds(), self.default_cell_value()); let (free_cells, occupied_cells) = ray_caster::cast_rays( origin, point_cloud, &grid_map.bounds(), grid_map.resolution(), self.max_range, ); let track_changes = grid_map.track_changes(); let mut changed_cells: Vec<Cell<NaD>> = vec![]; if track_changes { changed_cells.reserve(free_cells.len() + occupied_cells.len()); } for cell in &free_cells { self.integrate_miss(grid_map, cell); if track_changes { changed_cells.push(cell.clone()); } } for cell in &occupied_cells { self.integrate_hit(grid_map, cell); if track_changes { changed_cells.push(cell.clone()); } } if track_changes { grid_map.add_changed_cells(changed_cells); } } fn occupied(&self, grid_map: &impl GridMap<f32, NaD, NdD>, cell: &Cell<NaD>) -> bool { grid_map.get(cell) > self.occupied_threshold_logodds } } #[cfg(test)] mod tests { use crate as kuba; use kuba::prelude::*; #[test] fn integrate_point_cloud2() { let origin = kuba::point2![0.0, 0.0]; let point_cloud = kuba::PointCloud2::from_points(&[kuba::point2![0.1, 0.0], kuba::point2![0.0, 0.1]]); let mut grid_map = kuba::GridMap2f::new( 0.1, kuba::Bounds2::empty(), kuba::grid_map::DEFAULT_TILE_SIZE, 0.0, ); let noise_model = kuba::map::LidarNoiseModel::default(); noise_model.integrate_point_cloud(&mut grid_map, &origin, &point_cloud); assert_eq!(noise_model.occupied(&grid_map, &kuba::cell2![0, 0]), false); assert_eq!(noise_model.occupied(&grid_map, &kuba::cell2![1, 0]), true); assert_eq!(noise_model.occupied(&grid_map, &kuba::cell2![0, 1]), true); assert_eq!(noise_model.occupied(&grid_map, &kuba::cell2![1, 1]), false); assert_eq!( grid_map.get(&kuba::cell2![0, 0]), noise_model.miss_probability_logodds ); assert_eq!( grid_map.get(&kuba::cell2![0, 1]), noise_model.hit_probability_logodds ); assert_eq!( grid_map.get(&kuba::cell2![1, 0]), noise_model.hit_probability_logodds ); assert_eq!(grid_map.get(&kuba::cell2![1, 1]), 0.0); noise_model.integrate_point_cloud(&mut grid_map, &origin, &point_cloud); noise_model.integrate_point_cloud(&mut grid_map, &origin, &point_cloud); noise_model.integrate_point_cloud(&mut grid_map, &origin, &point_cloud); assert_eq!( grid_map.get(&kuba::cell2![0, 0]), 4.0 * noise_model.miss_probability_logodds ); assert_eq!( grid_map.get(&kuba::cell2![0, 1]), 4.0 * noise_model.hit_probability_logodds ); assert_eq!( grid_map.get(&kuba::cell2![1, 0]), 4.0 * noise_model.hit_probability_logodds ); assert_eq!(grid_map.get(&kuba::cell2![1, 1]), 0.0); noise_model.integrate_point_cloud(&mut grid_map, &origin, &point_cloud); assert_eq!( grid_map.get(&kuba::cell2![0, 0]), noise_model.min_probability_logodds ); assert_eq!( grid_map.get(&kuba::cell2![0, 1]), noise_model.max_probability_logodds ); assert_eq!( grid_map.get(&kuba::cell2![1, 0]), noise_model.max_probability_logodds ); assert_eq!(grid_map.get(&kuba::cell2![1, 1]), 0.0); noise_model.integrate_point_cloud(&mut grid_map, &origin, &point_cloud); assert_eq!( grid_map.get(&kuba::cell2![0, 0]), noise_model.min_probability_logodds ); assert_eq!( grid_map.get(&kuba::cell2![0, 1]), noise_model.max_probability_logodds ); assert_eq!( grid_map.get(&kuba::cell2![1, 0]), noise_model.max_probability_logodds ); assert_eq!(grid_map.get(&kuba::cell2![1, 1]), 0.0); assert_eq!(noise_model.occupied(&grid_map, &kuba::cell2![0, 0]), false); assert_eq!(noise_model.occupied(&grid_map, &kuba::cell2![1, 0]), true); assert_eq!(noise_model.occupied(&grid_map, &kuba::cell2![0, 1]), true); assert_eq!(noise_model.occupied(&grid_map, &kuba::cell2![1, 1]), false); } #[test] fn integrate_point_cloud3() { let origin = kuba::point3![0.0, 0.0, 0.0]; let point_cloud = kuba::PointCloud3::from_points(&[ kuba::point3![0.1, 0.0, 0.0], kuba::point3![0.0, 0.1, 0.0], kuba::point3![0.0, 0.0, 0.1], ]); let mut grid_map = kuba::GridMap3f::new( 0.1, kuba::bounds3![[0.0, 0.0, 0.0], [0.2, 0.2, 0.2]], kuba::grid_map::DEFAULT_TILE_SIZE, 0.0, ); let noise_model = kuba::map::LidarNoiseModel::default(); noise_model.integrate_point_cloud(&mut grid_map, &origin, &point_cloud); assert_eq!( noise_model.occupied(&grid_map, &kuba::cell3![1, 0, 0]), true ); assert_eq!( noise_model.occupied(&grid_map, &kuba::cell3![0, 1, 0]), true ); assert_eq!( noise_model.occupied(&grid_map, &kuba::cell3![0, 0, 1]), true ); assert_eq!( noise_model.occupied(&grid_map, &kuba::cell3![0, 0, 0]), false ); assert_eq!( noise_model.occupied(&grid_map, &kuba::cell3![0, 1, 1]), false ); assert_eq!( noise_model.occupied(&grid_map, &kuba::cell3![1, 1, 0]), false ); assert_eq!( noise_model.occupied(&grid_map, &kuba::cell3![1, 0, 1]), false ); assert_eq!( noise_model.occupied(&grid_map, &kuba::cell3![1, 1, 1]), false ); assert_eq!( grid_map.get(&kuba::cell3![0, 0, 0]), noise_model.miss_probability_logodds ); assert_eq!( grid_map.get(&kuba::cell3![1, 0, 0]), noise_model.hit_probability_logodds ); assert_eq!( grid_map.get(&kuba::cell3![0, 1, 0]), noise_model.hit_probability_logodds ); assert_eq!( grid_map.get(&kuba::cell3![0, 0, 1]), noise_model.hit_probability_logodds ); assert_eq!(grid_map.get(&kuba::cell3![1, 1, 0]), 0.0); assert_eq!(grid_map.get(&kuba::cell3![1, 0, 1]), 0.0); assert_eq!(grid_map.get(&kuba::cell3![0, 1, 1]), 0.0); assert_eq!(grid_map.get(&kuba::cell3![1, 1, 1]), 0.0); noise_model.integrate_point_cloud(&mut grid_map, &origin, &point_cloud); noise_model.integrate_point_cloud(&mut grid_map, &origin, &point_cloud); noise_model.integrate_point_cloud(&mut grid_map, &origin, &point_cloud); assert_eq!( grid_map.get(&kuba::cell3![0, 0, 0]), 4.0 * noise_model.miss_probability_logodds ); assert_eq!( grid_map.get(&kuba::cell3![1, 0, 0]), 4.0 * noise_model.hit_probability_logodds ); assert_eq!( grid_map.get(&kuba::cell3![0, 1, 0]), 4.0 * noise_model.hit_probability_logodds ); assert_eq!( grid_map.get(&kuba::cell3![0, 0, 1]), 4.0 * noise_model.hit_probability_logodds ); assert_eq!(grid_map.get(&kuba::cell3![1, 1, 0]), 0.0); assert_eq!(grid_map.get(&kuba::cell3![1, 0, 1]), 0.0); assert_eq!(grid_map.get(&kuba::cell3![0, 1, 1]), 0.0); assert_eq!(grid_map.get(&kuba::cell3![1, 1, 1]), 0.0); noise_model.integrate_point_cloud(&mut grid_map, &origin, &point_cloud); assert_eq!( grid_map.get(&kuba::cell3![0, 0, 0]), noise_model.min_probability_logodds ); assert_eq!( grid_map.get(&kuba::cell3![1, 0, 0]), noise_model.max_probability_logodds ); assert_eq!( grid_map.get(&kuba::cell3![0, 1, 0]), noise_model.max_probability_logodds ); assert_eq!( grid_map.get(&kuba::cell3![0, 0, 1]), noise_model.max_probability_logodds ); assert_eq!(grid_map.get(&kuba::cell3![1, 1, 0]), 0.0); assert_eq!(grid_map.get(&kuba::cell3![1, 0, 1]), 0.0); assert_eq!(grid_map.get(&kuba::cell3![0, 1, 1]), 0.0); assert_eq!(grid_map.get(&kuba::cell3![1, 1, 1]), 0.0); noise_model.integrate_point_cloud(&mut grid_map, &origin, &point_cloud); assert_eq!( grid_map.get(&kuba::cell3![0, 0, 0]), noise_model.min_probability_logodds ); assert_eq!( grid_map.get(&kuba::cell3![1, 0, 0]), noise_model.max_probability_logodds ); assert_eq!( grid_map.get(&kuba::cell3![0, 1, 0]), noise_model.max_probability_logodds ); assert_eq!( grid_map.get(&kuba::cell3![0, 0, 1]), noise_model.max_probability_logodds ); assert_eq!(grid_map.get(&kuba::cell3![1, 1, 0]), 0.0); assert_eq!(grid_map.get(&kuba::cell3![1, 0, 1]), 0.0); assert_eq!(grid_map.get(&kuba::cell3![0, 1, 1]), 0.0); assert_eq!(grid_map.get(&kuba::cell3![1, 1, 1]), 0.0); assert_eq!( noise_model.occupied(&grid_map, &kuba::cell3![1, 0, 0]), true ); assert_eq!( noise_model.occupied(&grid_map, &kuba::cell3![0, 1, 0]), true ); assert_eq!( noise_model.occupied(&grid_map, &kuba::cell3![0, 0, 1]), true ); assert_eq!( noise_model.occupied(&grid_map, &kuba::cell3![0, 0, 0]), false ); assert_eq!( noise_model.occupied(&grid_map, &kuba::cell3![0, 1, 1]), false ); assert_eq!( noise_model.occupied(&grid_map, &kuba::cell3![1, 1, 0]), false ); assert_eq!( noise_model.occupied(&grid_map, &kuba::cell3![1, 0, 1]), false ); assert_eq!( noise_model.occupied(&grid_map, &kuba::cell3![1, 1, 1]), false ); } }
35.951456
100
0.565217
f7a3400e0c604832ca8d3ba642591057cd9cb2f0
8,626
use std::cmp::Ordering; use std::collections::BinaryHeap; use std::error::Error; #[derive(Debug, Clone, Default, Eq, PartialEq)] pub struct ChitonDensityMap { map: Vec<usize>, width: usize, height: usize } impl ChitonDensityMap { pub fn parse_string(content: String) -> Result<ChitonDensityMap, Box<dyn Error>> { let mut density_map = ChitonDensityMap::default(); let lines = content.lines().collect::<Vec<_>>(); density_map.width = lines[0].len(); density_map.height = lines.len(); density_map.map = lines .iter() .flat_map(|&line| { line .chars() .map(|c| (c as u8 - '0' as u8) as usize) }).collect::<Vec<_>>(); Ok(density_map) } pub fn shortest_path_score(&self) -> usize { let mut costs = vec![usize::MAX; self.width * self.height]; costs[0] = 0; let mut queue = Vec::with_capacity(100); queue.push((0, 0)); while queue.len() > 0 { let (x, y) = queue.pop().unwrap(); let current_cost = costs[y * self.width + x]; // Left if x > 0 { let target_cost = costs[y * self.width + x - 1]; if target_cost == usize::MAX || current_cost + self.map[y * self.width + x - 1] < target_cost { costs[y * self.width + x - 1] = current_cost + self.map[y * self.width + x - 1]; queue.push((x - 1, y)); } } // Right if x < self.width - 1 { let target_cost = costs[y * self.width + x + 1]; if target_cost == usize::MAX || current_cost + self.map[y * self.width + x + 1] < target_cost { costs[y * self.width + x + 1] = current_cost + self.map[y * self.width + x + 1]; queue.push((x + 1, y)); } } // Top if y > 0 { let target_cost = costs[(y - 1) * self.width + x]; if target_cost == usize::MAX || current_cost + self.map[(y - 1) * self.width + x] < target_cost { costs[(y - 1) * self.width + x] = current_cost + self.map[(y - 1) * self.width + x]; queue.push((x, y - 1)); } } // Bottom if y < self.height - 1 { let target_cost = costs[(y + 1) * self.width + x]; if target_cost == usize::MAX || current_cost + self.map[(y + 1) * self.width + x] < target_cost { costs[(y + 1) * self.width + x] = current_cost + self.map[(y + 1) * self.width + x]; queue.push((x, y + 1)); } } } costs[costs.len() - 1] } pub fn shortest_path_score_5x(&self) -> usize { let mut costs = vec![usize::MAX; (5 * self.width) * (5 * self.height)]; costs[0] = 0; let mut queue = BinaryHeap::new(); queue.push(State { x: 0, y: 0, cost: 0 }); while queue.len() > 0 { let state = queue.pop().unwrap(); let x = state.x; let y = state.y; let current_cost = costs[y * 5 * self.width + x]; if x == 5 * self.width - 1 && y == 5 * self.height - 1 { break; } // Left if x > 0 { let target_cost = costs[y * 5 * self.width + x - 1]; let cell_cost = self.get_5x_cost(x - 1, y); if target_cost == usize::MAX || current_cost + cell_cost < target_cost { costs[y * 5 * self.width + x - 1] = current_cost + cell_cost; queue.push(State { x: x - 1, y: y, cost: current_cost + cell_cost }); } } // Right if x < 5 * self.width - 1 { let target_cost = costs[y * 5 * self.width + x + 1]; let cell_cost = self.get_5x_cost(x + 1, y); if target_cost == usize::MAX || current_cost + cell_cost < target_cost { costs[y * 5 * self.width + x + 1] = current_cost + cell_cost; queue.push(State { x: x + 1, y: y, cost: current_cost + cell_cost }); } } // Top if y > 0 { let target_cost = costs[(y - 1) * 5 * self.width + x]; let cell_cost = self.get_5x_cost(x, y - 1); if target_cost == usize::MAX || current_cost + cell_cost < target_cost { costs[(y - 1) * 5 * self.width + x] = current_cost + cell_cost; queue.push(State { x: x, y: y - 1, cost: current_cost + cell_cost }); } } // Bottom if y < 5 * self.height - 1 { let target_cost = costs[(y + 1) * 5 * self.width + x]; let cell_cost = self.get_5x_cost(x, y + 1); if target_cost == usize::MAX || current_cost + cell_cost < target_cost { costs[(y + 1) * 5 * self.width + x] = current_cost + cell_cost; queue.push(State { x: x, y: y + 1, cost: current_cost + cell_cost }); } } } costs[costs.len() - 1] } fn get_5x_cost(&self, x: usize, y: usize) -> usize { let (xq, xm) = (x / self.width, x % self.width); let (yq, ym) = (y / self.height, y % self.height); let cost = self.map[ym * self.width + xm] + xq + yq; if cost > 9 { cost % 10 + 1 } else { cost } } } #[derive(Debug, Clone, Copy, Eq, PartialEq)] struct State { x: usize, y: usize, cost: usize, } impl Ord for State { fn cmp(&self, other: &Self) -> Ordering { other.cost.cmp(&self.cost) .then_with(|| self.y.cmp(&other.y)) .then_with(|| self.x.cmp(&other.x)) } } impl PartialOrd for State { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) } } #[cfg(test)] mod tests { use crate::ChitonDensityMap; #[test] fn parse_example_case() { let content = "1163751742 1381373672 2136511328 3694931569 7463417111 1319128137 1359912421 3125421639 1293138521 2311944581 " .to_string(); let input = ChitonDensityMap::parse_string(content).unwrap(); assert_eq!(input, ChitonDensityMap { map: vec![ 1,1,6,3,7,5,1,7,4,2, 1,3,8,1,3,7,3,6,7,2, 2,1,3,6,5,1,1,3,2,8, 3,6,9,4,9,3,1,5,6,9, 7,4,6,3,4,1,7,1,1,1, 1,3,1,9,1,2,8,1,3,7, 1,3,5,9,9,1,2,4,2,1, 3,1,2,5,4,2,1,6,3,9, 1,2,9,3,1,3,8,5,2,1, 2,3,1,1,9,4,4,5,8,1, ], width: 10, height: 10 }); } #[test] fn part_1_example_case() { let input = ChitonDensityMap { map: vec![ 1,1,6,3,7,5,1,7,4,2, 1,3,8,1,3,7,3,6,7,2, 2,1,3,6,5,1,1,3,2,8, 3,6,9,4,9,3,1,5,6,9, 7,4,6,3,4,1,7,1,1,1, 1,3,1,9,1,2,8,1,3,7, 1,3,5,9,9,1,2,4,2,1, 3,1,2,5,4,2,1,6,3,9, 1,2,9,3,1,3,8,5,2,1, 2,3,1,1,9,4,4,5,8,1, ], width: 10, height: 10 }; assert_eq!(input.shortest_path_score(), 40); } #[test] fn part_2_example_case() { let input = ChitonDensityMap { map: vec![ 1,1,6,3,7,5,1,7,4,2, 1,3,8,1,3,7,3,6,7,2, 2,1,3,6,5,1,1,3,2,8, 3,6,9,4,9,3,1,5,6,9, 7,4,6,3,4,1,7,1,1,1, 1,3,1,9,1,2,8,1,3,7, 1,3,5,9,9,1,2,4,2,1, 3,1,2,5,4,2,1,6,3,9, 1,2,9,3,1,3,8,5,2,1, 2,3,1,1,9,4,4,5,8,1, ], width: 10, height: 10 }; assert_eq!(input.shortest_path_score_5x(), 315); } }
30.697509
113
0.418966
dd859475f9c8e06c58ecdeaf246e749e5269df70
229
// build-pass (FIXME(62277): could be check-pass?) #![allow(dead_code)] #![allow(non_upper_case_globals)] // pretty-expanded FIXME #23616 static mut n_mut: usize = 0; static n: &'static usize = unsafe{ &n_mut }; fn main() {}
19.083333
50
0.676856
8711d5a04a4fad4a8e964bfa9e670b278bb1f52d
1,550
// Copyright 2021 Datafuse Labs. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use common_exception::ErrorCode; use tokio::sync::broadcast; /// A task that can be started and stopped. #[async_trait::async_trait] pub trait Stoppable { /// Start working without blocking the calling thread. /// When returned, it should have been successfully started. /// Otherwise an Err() should be returned. /// /// Calling `start()` on a started task should get an error. async fn start(&mut self) -> Result<(), ErrorCode>; /// Blocking stop. It should not return until everything is cleaned up. /// /// In case a graceful `stop()` had blocked for too long, /// the caller submit a FORCE stop by sending a `()` to `force`. /// An impl should either close everything at once, or just ignore the `force` signal if it does not support force stop. /// /// Calling `stop()` twice should get an error. async fn stop(&mut self, mut force: Option<broadcast::Receiver<()>>) -> Result<(), ErrorCode>; }
41.891892
124
0.696774
d9d9e70e58f0c4f4bd2ab162264a744894e80fdb
2,578
// Copyright 2018 The proptest developers // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #[macro_use] extern crate proptest_derive; #[derive(Debug, Arbitrary)] //~ ERROR: 2 errors //~| [proptest_derive, E0028] //~| [proptest_derive, E0006] enum NonFatal { #[proptest(skip, strategy = "(0..10).prop_map(NonFatal::V1)")] V1(u8), } #[derive(Debug, Arbitrary)] //~ ERROR: [proptest_derive, E0028] enum T0 { #[proptest(skip, strategy = "(0..10).prop_map(T0::V1)")] V1(u8), V2, } #[derive(Debug, Arbitrary)] //~ ERROR: [proptest_derive, E0028] enum T1 { #[proptest( skip, strategy = "(0..10).prop_map(|field| T0::V1 { field })" )] V1 { field: u8 }, V2, } #[derive(Debug, Arbitrary)] //~ ERROR: [proptest_derive, E0028] enum T2 { #[proptest(skip)] V1( #[proptest(strategy = "0..10")] u8 ), V2, } #[derive(Debug, Arbitrary)] //~ ERROR: [proptest_derive, E0028] enum T3 { #[proptest(skip)] V1 { #[proptest(strategy = "0..10")] field: u8 }, V2, } #[derive(Debug, Arbitrary)] //~ ERROR: [proptest_derive, E0028] enum T4 { #[proptest(skip, value = "T0::V1(1)")] V1(u8), V2, } #[derive(Debug, Arbitrary)] //~ ERROR: [proptest_derive, E0028] enum T5 { #[proptest(skip, value = "T0::V1 { field: 3 }")] V1 { field: u8 }, V2, } #[derive(Debug, Arbitrary)] //~ ERROR: [proptest_derive, E0028] enum T6 { #[proptest(skip)] V1( #[proptest(value = "42")] u8 ), V2, } #[derive(Debug, Arbitrary)] //~ ERROR: [proptest_derive, E0028] enum T7 { #[proptest(skip)] V1 { #[proptest(value = "1337")] field: usize }, V2, } #[derive(Debug, Arbitrary)] //~ ERROR: [proptest_derive, E0028] enum T8 { #[proptest(skip)] V1 { #[proptest(value("1337"))] field: usize }, V2, } #[derive(Debug, Arbitrary)] //~ ERROR: [proptest_derive, E0028] enum T9 { #[proptest(skip)] V1 { #[proptest(value(1337))] field: usize }, V2, } #[derive(Debug, Arbitrary)] //~ ERROR: [proptest_derive, E0028] enum T10 { #[proptest(skip)] V1 { #[proptest(value = 1337)] field: usize }, V2, }
20.790323
68
0.554694
0a7e164ca5b985ad46ba04907a8dbb11a09be163
855
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. trait repeat<A> { fn get(&self) -> A; } impl<A:Clone + 'static> repeat<A> for ~A { fn get(&self) -> A { (**self).clone() } } fn repeater<A:Clone + 'static>(v: ~A) -> ~repeat:<A> { // Note: owned kind is not necessary as A appears in the trait type ~v as ~repeat:<A> // No } pub fn main() { let x = 3; let y = repeater(~x); assert_eq!(x, y.get()); }
29.482759
71
0.644444
f85fd524a5d9bd7f35eafaaccb4d42d0fd62b717
37,916
use rustc_data_structures::fx::FxHashMap; use syntax_pos::Span; use crate::hir::def_id::DefId; use crate::hir; use crate::hir::Node; use crate::infer::{self, InferCtxt, InferOk, TypeVariableOrigin}; use crate::infer::outlives::free_region_map::FreeRegionRelations; use crate::traits::{self, PredicateObligation}; use crate::ty::{self, Ty, TyCtxt, GenericParamDefKind}; use crate::ty::fold::{BottomUpFolder, TypeFoldable, TypeFolder, TypeVisitor}; use crate::ty::subst::{Kind, InternalSubsts, SubstsRef, UnpackedKind}; use crate::util::nodemap::DefIdMap; pub type OpaqueTypeMap<'tcx> = DefIdMap<OpaqueTypeDecl<'tcx>>; /// Information about the opaque, abstract types whose values we /// are inferring in this function (these are the `impl Trait` that /// appear in the return type). #[derive(Copy, Clone, Debug)] pub struct OpaqueTypeDecl<'tcx> { /// The substitutions that we apply to the abstract that this /// `impl Trait` desugars to. e.g., if: /// /// fn foo<'a, 'b, T>() -> impl Trait<'a> /// /// winds up desugared to: /// /// abstract type Foo<'x, X>: Trait<'x> /// fn foo<'a, 'b, T>() -> Foo<'a, T> /// /// then `substs` would be `['a, T]`. pub substs: SubstsRef<'tcx>, /// The type variable that represents the value of the abstract type /// that we require. In other words, after we compile this function, /// we will be created a constraint like: /// /// Foo<'a, T> = ?C /// /// where `?C` is the value of this type variable. =) It may /// naturally refer to the type and lifetime parameters in scope /// in this function, though ultimately it should only reference /// those that are arguments to `Foo` in the constraint above. (In /// other words, `?C` should not include `'b`, even though it's a /// lifetime parameter on `foo`.) pub concrete_ty: Ty<'tcx>, /// Returns `true` if the `impl Trait` bounds include region bounds. /// For example, this would be true for: /// /// fn foo<'a, 'b, 'c>() -> impl Trait<'c> + 'a + 'b /// /// but false for: /// /// fn foo<'c>() -> impl Trait<'c> /// /// unless `Trait` was declared like: /// /// trait Trait<'c>: 'c /// /// in which case it would be true. /// /// This is used during regionck to decide whether we need to /// impose any additional constraints to ensure that region /// variables in `concrete_ty` wind up being constrained to /// something from `substs` (or, at minimum, things that outlive /// the fn body). (Ultimately, writeback is responsible for this /// check.) pub has_required_region_bounds: bool, /// The origin of the existential type pub origin: hir::ExistTyOrigin, } impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { /// Replaces all opaque types in `value` with fresh inference variables /// and creates appropriate obligations. For example, given the input: /// /// impl Iterator<Item = impl Debug> /// /// this method would create two type variables, `?0` and `?1`. It would /// return the type `?0` but also the obligations: /// /// ?0: Iterator<Item = ?1> /// ?1: Debug /// /// Moreover, it returns a `OpaqueTypeMap` that would map `?0` to /// info about the `impl Iterator<..>` type and `?1` to info about /// the `impl Debug` type. /// /// # Parameters /// /// - `parent_def_id` -- the `DefId` of the function in which the opaque type /// is defined /// - `body_id` -- the body-id with which the resulting obligations should /// be associated /// - `param_env` -- the in-scope parameter environment to be used for /// obligations /// - `value` -- the value within which we are instantiating opaque types pub fn instantiate_opaque_types<T: TypeFoldable<'tcx>>( &self, parent_def_id: DefId, body_id: hir::HirId, param_env: ty::ParamEnv<'tcx>, value: &T, ) -> InferOk<'tcx, (T, OpaqueTypeMap<'tcx>)> { debug!("instantiate_opaque_types(value={:?}, parent_def_id={:?}, body_id={:?}, \ param_env={:?})", value, parent_def_id, body_id, param_env, ); let mut instantiator = Instantiator { infcx: self, parent_def_id, body_id, param_env, opaque_types: Default::default(), obligations: vec![], }; let value = instantiator.instantiate_opaque_types_in_map(value); InferOk { value: (value, instantiator.opaque_types), obligations: instantiator.obligations, } } /// Given the map `opaque_types` containing the existential `impl /// Trait` types whose underlying, hidden types are being /// inferred, this method adds constraints to the regions /// appearing in those underlying hidden types to ensure that they /// at least do not refer to random scopes within the current /// function. These constraints are not (quite) sufficient to /// guarantee that the regions are actually legal values; that /// final condition is imposed after region inference is done. /// /// # The Problem /// /// Let's work through an example to explain how it works. Assume /// the current function is as follows: /// /// ```text /// fn foo<'a, 'b>(..) -> (impl Bar<'a>, impl Bar<'b>) /// ``` /// /// Here, we have two `impl Trait` types whose values are being /// inferred (the `impl Bar<'a>` and the `impl /// Bar<'b>`). Conceptually, this is sugar for a setup where we /// define underlying abstract types (`Foo1`, `Foo2`) and then, in /// the return type of `foo`, we *reference* those definitions: /// /// ```text /// abstract type Foo1<'x>: Bar<'x>; /// abstract type Foo2<'x>: Bar<'x>; /// fn foo<'a, 'b>(..) -> (Foo1<'a>, Foo2<'b>) { .. } /// // ^^^^ ^^ /// // | | /// // | substs /// // def_id /// ``` /// /// As indicating in the comments above, each of those references /// is (in the compiler) basically a substitution (`substs`) /// applied to the type of a suitable `def_id` (which identifies /// `Foo1` or `Foo2`). /// /// Now, at this point in compilation, what we have done is to /// replace each of the references (`Foo1<'a>`, `Foo2<'b>`) with /// fresh inference variables C1 and C2. We wish to use the values /// of these variables to infer the underlying types of `Foo1` and /// `Foo2`. That is, this gives rise to higher-order (pattern) unification /// constraints like: /// /// ```text /// for<'a> (Foo1<'a> = C1) /// for<'b> (Foo1<'b> = C2) /// ``` /// /// For these equation to be satisfiable, the types `C1` and `C2` /// can only refer to a limited set of regions. For example, `C1` /// can only refer to `'static` and `'a`, and `C2` can only refer /// to `'static` and `'b`. The job of this function is to impose that /// constraint. /// /// Up to this point, C1 and C2 are basically just random type /// inference variables, and hence they may contain arbitrary /// regions. In fact, it is fairly likely that they do! Consider /// this possible definition of `foo`: /// /// ```text /// fn foo<'a, 'b>(x: &'a i32, y: &'b i32) -> (impl Bar<'a>, impl Bar<'b>) { /// (&*x, &*y) /// } /// ``` /// /// Here, the values for the concrete types of the two impl /// traits will include inference variables: /// /// ```text /// &'0 i32 /// &'1 i32 /// ``` /// /// Ordinarily, the subtyping rules would ensure that these are /// sufficiently large. But since `impl Bar<'a>` isn't a specific /// type per se, we don't get such constraints by default. This /// is where this function comes into play. It adds extra /// constraints to ensure that all the regions which appear in the /// inferred type are regions that could validly appear. /// /// This is actually a bit of a tricky constraint in general. We /// want to say that each variable (e.g., `'0`) can only take on /// values that were supplied as arguments to the abstract type /// (e.g., `'a` for `Foo1<'a>`) or `'static`, which is always in /// scope. We don't have a constraint quite of this kind in the current /// region checker. /// /// # The Solution /// /// We make use of the constraint that we *do* have in the `<=` /// relation. To do that, we find the "minimum" of all the /// arguments that appear in the substs: that is, some region /// which is less than all the others. In the case of `Foo1<'a>`, /// that would be `'a` (it's the only choice, after all). Then we /// apply that as a least bound to the variables (e.g., `'a <= /// '0`). /// /// In some cases, there is no minimum. Consider this example: /// /// ```text /// fn baz<'a, 'b>() -> impl Trait<'a, 'b> { ... } /// ``` /// /// Here we would report an error, because `'a` and `'b` have no /// relation to one another. /// /// # The `free_region_relations` parameter /// /// The `free_region_relations` argument is used to find the /// "minimum" of the regions supplied to a given abstract type. /// It must be a relation that can answer whether `'a <= 'b`, /// where `'a` and `'b` are regions that appear in the "substs" /// for the abstract type references (the `<'a>` in `Foo1<'a>`). /// /// Note that we do not impose the constraints based on the /// generic regions from the `Foo1` definition (e.g., `'x`). This /// is because the constraints we are imposing here is basically /// the concern of the one generating the constraining type C1, /// which is the current function. It also means that we can /// take "implied bounds" into account in some cases: /// /// ```text /// trait SomeTrait<'a, 'b> { } /// fn foo<'a, 'b>(_: &'a &'b u32) -> impl SomeTrait<'a, 'b> { .. } /// ``` /// /// Here, the fact that `'b: 'a` is known only because of the /// implied bounds from the `&'a &'b u32` parameter, and is not /// "inherent" to the abstract type definition. /// /// # Parameters /// /// - `opaque_types` -- the map produced by `instantiate_opaque_types` /// - `free_region_relations` -- something that can be used to relate /// the free regions (`'a`) that appear in the impl trait. pub fn constrain_opaque_types<FRR: FreeRegionRelations<'tcx>>( &self, opaque_types: &OpaqueTypeMap<'tcx>, free_region_relations: &FRR, ) { debug!("constrain_opaque_types()"); for (&def_id, opaque_defn) in opaque_types { self.constrain_opaque_type(def_id, opaque_defn, free_region_relations); } } pub fn constrain_opaque_type<FRR: FreeRegionRelations<'tcx>>( &self, def_id: DefId, opaque_defn: &OpaqueTypeDecl<'tcx>, free_region_relations: &FRR, ) { debug!("constrain_opaque_type()"); debug!("constrain_opaque_type: def_id={:?}", def_id); debug!("constrain_opaque_type: opaque_defn={:#?}", opaque_defn); let concrete_ty = self.resolve_type_vars_if_possible(&opaque_defn.concrete_ty); debug!("constrain_opaque_type: concrete_ty={:?}", concrete_ty); let abstract_type_generics = self.tcx.generics_of(def_id); let span = self.tcx.def_span(def_id); // If there are required region bounds, we can just skip // ahead. There will already be a registered region // obligation related `concrete_ty` to those regions. if opaque_defn.has_required_region_bounds { return; } // There were no `required_region_bounds`, // so we have to search for a `least_region`. // Go through all the regions used as arguments to the // abstract type. These are the parameters to the abstract // type; so in our example above, `substs` would contain // `['a]` for the first impl trait and `'b` for the // second. let mut least_region = None; for param in &abstract_type_generics.params { match param.kind { GenericParamDefKind::Lifetime => {} _ => continue } // Get the value supplied for this region from the substs. let subst_arg = opaque_defn.substs.region_at(param.index as usize); // Compute the least upper bound of it with the other regions. debug!("constrain_opaque_types: least_region={:?}", least_region); debug!("constrain_opaque_types: subst_arg={:?}", subst_arg); match least_region { None => least_region = Some(subst_arg), Some(lr) => { if free_region_relations.sub_free_regions(lr, subst_arg) { // keep the current least region } else if free_region_relations.sub_free_regions(subst_arg, lr) { // switch to `subst_arg` least_region = Some(subst_arg); } else { // There are two regions (`lr` and // `subst_arg`) which are not relatable. We can't // find a best choice. let context_name = match opaque_defn.origin { hir::ExistTyOrigin::ExistentialType => "existential type", hir::ExistTyOrigin::ReturnImplTrait => "impl Trait", hir::ExistTyOrigin::AsyncFn => "async fn", }; let msg = format!("ambiguous lifetime bound in `{}`", context_name); let mut err = self.tcx .sess .struct_span_err(span, &msg); let lr_name = lr.to_string(); let subst_arg_name = subst_arg.to_string(); let label_owned; let label = match (&*lr_name, &*subst_arg_name) { ("'_", "'_") => "the elided lifetimes here do not outlive one another", _ => { label_owned = format!( "neither `{}` nor `{}` outlives the other", lr_name, subst_arg_name, ); &label_owned } }; err.span_label(span, label); if let hir::ExistTyOrigin::AsyncFn = opaque_defn.origin { err.note("multiple unrelated lifetimes are not allowed in \ `async fn`."); err.note("if you're using argument-position elided lifetimes, consider \ switching to a single named lifetime."); } err.emit(); least_region = Some(self.tcx.mk_region(ty::ReEmpty)); break; } } } } let least_region = least_region.unwrap_or(self.tcx.lifetimes.re_static); debug!("constrain_opaque_types: least_region={:?}", least_region); concrete_ty.visit_with(&mut OpaqueTypeOutlivesVisitor { infcx: self, least_region, span, }); } /// Given the fully resolved, instantiated type for an opaque /// type, i.e., the value of an inference variable like C1 or C2 /// (*), computes the "definition type" for an abstract type /// definition -- that is, the inferred value of `Foo1<'x>` or /// `Foo2<'x>` that we would conceptually use in its definition: /// /// abstract type Foo1<'x>: Bar<'x> = AAA; <-- this type AAA /// abstract type Foo2<'x>: Bar<'x> = BBB; <-- or this type BBB /// fn foo<'a, 'b>(..) -> (Foo1<'a>, Foo2<'b>) { .. } /// /// Note that these values are defined in terms of a distinct set of /// generic parameters (`'x` instead of `'a`) from C1 or C2. The main /// purpose of this function is to do that translation. /// /// (*) C1 and C2 were introduced in the comments on /// `constrain_opaque_types`. Read that comment for more context. /// /// # Parameters /// /// - `def_id`, the `impl Trait` type /// - `opaque_defn`, the opaque definition created in `instantiate_opaque_types` /// - `instantiated_ty`, the inferred type C1 -- fully resolved, lifted version of /// `opaque_defn.concrete_ty` pub fn infer_opaque_definition_from_instantiation( &self, def_id: DefId, opaque_defn: &OpaqueTypeDecl<'tcx>, instantiated_ty: Ty<'gcx>, ) -> Ty<'gcx> { debug!( "infer_opaque_definition_from_instantiation(def_id={:?}, instantiated_ty={:?})", def_id, instantiated_ty ); let gcx = self.tcx.global_tcx(); // Use substs to build up a reverse map from regions to their // identity mappings. This is necessary because of `impl // Trait` lifetimes are computed by replacing existing // lifetimes with 'static and remapping only those used in the // `impl Trait` return type, resulting in the parameters // shifting. let id_substs = InternalSubsts::identity_for_item(gcx, def_id); let map: FxHashMap<Kind<'tcx>, Kind<'gcx>> = opaque_defn .substs .iter() .enumerate() .map(|(index, subst)| (*subst, id_substs[index])) .collect(); // Convert the type from the function into a type valid outside // the function, by replacing invalid regions with 'static, // after producing an error for each of them. let definition_ty = instantiated_ty.fold_with(&mut ReverseMapper::new( self.tcx, self.is_tainted_by_errors(), def_id, map, instantiated_ty, )); debug!( "infer_opaque_definition_from_instantiation: definition_ty={:?}", definition_ty ); // We can unwrap here because our reverse mapper always // produces things with 'gcx lifetime, though the type folder // obscures that. let definition_ty = gcx.lift(&definition_ty).unwrap(); definition_ty } } // Visitor that requires that (almost) all regions in the type visited outlive // `least_region`. We cannot use `push_outlives_components` because regions in // closure signatures are not included in their outlives components. We need to // ensure all regions outlive the given bound so that we don't end up with, // say, `ReScope` appearing in a return type and causing ICEs when other // functions end up with region constraints involving regions from other // functions. // // We also cannot use `for_each_free_region` because for closures it includes // the regions parameters from the enclosing item. // // We ignore any type parameters because impl trait values are assumed to // capture all the in-scope type parameters. struct OpaqueTypeOutlivesVisitor<'a, 'gcx, 'tcx> { infcx: &'a InferCtxt<'a, 'gcx, 'tcx>, least_region: ty::Region<'tcx>, span: Span, } impl<'tcx> TypeVisitor<'tcx> for OpaqueTypeOutlivesVisitor<'_, '_, 'tcx> { fn visit_binder<T: TypeFoldable<'tcx>>(&mut self, t: &ty::Binder<T>) -> bool { t.skip_binder().visit_with(self); false // keep visiting } fn visit_region(&mut self, r: ty::Region<'tcx>) -> bool { match *r { // ignore bound regions, keep visiting ty::ReLateBound(_, _) => false, _ => { self.infcx.sub_regions(infer::CallReturn(self.span), self.least_region, r); false } } } fn visit_ty(&mut self, ty: Ty<'tcx>) -> bool { // We're only interested in types involving regions if !ty.flags.intersects(ty::TypeFlags::HAS_FREE_REGIONS) { return false; // keep visiting } match ty.sty { ty::Closure(def_id, ref substs) => { // Skip lifetime parameters of the enclosing item(s) for upvar_ty in substs.upvar_tys(def_id, self.infcx.tcx) { upvar_ty.visit_with(self); } substs.closure_sig_ty(def_id, self.infcx.tcx).visit_with(self); } ty::Generator(def_id, ref substs, _) => { // Skip lifetime parameters of the enclosing item(s) // Also skip the witness type, because that has no free regions. for upvar_ty in substs.upvar_tys(def_id, self.infcx.tcx) { upvar_ty.visit_with(self); } substs.return_ty(def_id, self.infcx.tcx).visit_with(self); substs.yield_ty(def_id, self.infcx.tcx).visit_with(self); } _ => { ty.super_visit_with(self); } } false } } struct ReverseMapper<'cx, 'gcx: 'tcx, 'tcx: 'cx> { tcx: TyCtxt<'cx, 'gcx, 'tcx>, /// If errors have already been reported in this fn, we suppress /// our own errors because they are sometimes derivative. tainted_by_errors: bool, opaque_type_def_id: DefId, map: FxHashMap<Kind<'tcx>, Kind<'gcx>>, map_missing_regions_to_empty: bool, /// initially `Some`, set to `None` once error has been reported hidden_ty: Option<Ty<'tcx>>, } impl<'cx, 'gcx, 'tcx> ReverseMapper<'cx, 'gcx, 'tcx> { fn new( tcx: TyCtxt<'cx, 'gcx, 'tcx>, tainted_by_errors: bool, opaque_type_def_id: DefId, map: FxHashMap<Kind<'tcx>, Kind<'gcx>>, hidden_ty: Ty<'tcx>, ) -> Self { Self { tcx, tainted_by_errors, opaque_type_def_id, map, map_missing_regions_to_empty: false, hidden_ty: Some(hidden_ty), } } fn fold_kind_mapping_missing_regions_to_empty(&mut self, kind: Kind<'tcx>) -> Kind<'tcx> { assert!(!self.map_missing_regions_to_empty); self.map_missing_regions_to_empty = true; let kind = kind.fold_with(self); self.map_missing_regions_to_empty = false; kind } fn fold_kind_normally(&mut self, kind: Kind<'tcx>) -> Kind<'tcx> { assert!(!self.map_missing_regions_to_empty); kind.fold_with(self) } } impl<'cx, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for ReverseMapper<'cx, 'gcx, 'tcx> { fn tcx(&self) -> TyCtxt<'_, 'gcx, 'tcx> { self.tcx } fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> { match r { // ignore bound regions that appear in the type (e.g., this // would ignore `'r` in a type like `for<'r> fn(&'r u32)`. ty::ReLateBound(..) | // ignore `'static`, as that can appear anywhere ty::ReStatic | // ignore `ReScope`, which may appear in impl Trait in bindings. ty::ReScope(..) => return r, _ => { } } match self.map.get(&r.into()).map(|k| k.unpack()) { Some(UnpackedKind::Lifetime(r1)) => r1, Some(u) => panic!("region mapped to unexpected kind: {:?}", u), None => { if !self.map_missing_regions_to_empty && !self.tainted_by_errors { if let Some(hidden_ty) = self.hidden_ty.take() { let span = self.tcx.def_span(self.opaque_type_def_id); let mut err = struct_span_err!( self.tcx.sess, span, E0700, "hidden type for `impl Trait` captures lifetime that \ does not appear in bounds", ); // Assuming regionck succeeded, then we must // be capturing *some* region from the fn // header, and hence it must be free, so it's // ok to invoke this fn (which doesn't accept // all regions, and would ICE if an // inappropriate region is given). We check // `is_tainted_by_errors` by errors above, so // we don't get in here unless regionck // succeeded. (Note also that if regionck // failed, then the regions we are attempting // to map here may well be giving errors // *because* the constraints were not // satisfiable.) self.tcx.note_and_explain_free_region( &mut err, &format!("hidden type `{}` captures ", hidden_ty), r, "" ); err.emit(); } } self.tcx.lifetimes.re_empty }, } } fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> { match ty.sty { ty::Closure(def_id, substs) => { // I am a horrible monster and I pray for death. When // we encounter a closure here, it is always a closure // from within the function that we are currently // type-checking -- one that is now being encapsulated // in an existential abstract type. Ideally, we would // go through the types/lifetimes that it references // and treat them just like we would any other type, // which means we would error out if we find any // reference to a type/region that is not in the // "reverse map". // // **However,** in the case of closures, there is a // somewhat subtle (read: hacky) consideration. The // problem is that our closure types currently include // all the lifetime parameters declared on the // enclosing function, even if they are unused by the // closure itself. We can't readily filter them out, // so here we replace those values with `'empty`. This // can't really make a difference to the rest of the // compiler; those regions are ignored for the // outlives relation, and hence don't affect trait // selection or auto traits, and they are erased // during codegen. let generics = self.tcx.generics_of(def_id); let substs = self.tcx.mk_substs(substs.substs.iter().enumerate().map( |(index, &kind)| { if index < generics.parent_count { // Accommodate missing regions in the parent kinds... self.fold_kind_mapping_missing_regions_to_empty(kind) } else { // ...but not elsewhere. self.fold_kind_normally(kind) } }, )); self.tcx.mk_closure(def_id, ty::ClosureSubsts { substs }) } _ => ty.super_fold_with(self), } } } struct Instantiator<'a, 'gcx: 'tcx, 'tcx: 'a> { infcx: &'a InferCtxt<'a, 'gcx, 'tcx>, parent_def_id: DefId, body_id: hir::HirId, param_env: ty::ParamEnv<'tcx>, opaque_types: OpaqueTypeMap<'tcx>, obligations: Vec<PredicateObligation<'tcx>>, } impl<'a, 'gcx, 'tcx> Instantiator<'a, 'gcx, 'tcx> { fn instantiate_opaque_types_in_map<T: TypeFoldable<'tcx>>(&mut self, value: &T) -> T { debug!("instantiate_opaque_types_in_map(value={:?})", value); let tcx = self.infcx.tcx; value.fold_with(&mut BottomUpFolder { tcx, ty_op: |ty| { if let ty::Opaque(def_id, substs) = ty.sty { // Check that this is `impl Trait` type is // declared by `parent_def_id` -- i.e., one whose // value we are inferring. At present, this is // always true during the first phase of // type-check, but not always true later on during // NLL. Once we support named abstract types more fully, // this same scenario will be able to arise during all phases. // // Here is an example using `abstract type` that indicates // the distinction we are checking for: // // ```rust // mod a { // pub abstract type Foo: Iterator; // pub fn make_foo() -> Foo { .. } // } // // mod b { // fn foo() -> a::Foo { a::make_foo() } // } // ``` // // Here, the return type of `foo` references a // `Opaque` indeed, but not one whose value is // presently being inferred. You can get into a // similar situation with closure return types // today: // // ```rust // fn foo() -> impl Iterator { .. } // fn bar() { // let x = || foo(); // returns the Opaque assoc with `foo` // } // ``` if let Some(opaque_hir_id) = tcx.hir().as_local_hir_id(def_id) { let parent_def_id = self.parent_def_id; let def_scope_default = || { let opaque_parent_hir_id = tcx.hir().get_parent_item(opaque_hir_id); parent_def_id == tcx.hir() .local_def_id_from_hir_id(opaque_parent_hir_id) }; let (in_definition_scope, origin) = match tcx.hir().find_by_hir_id(opaque_hir_id) { Some(Node::Item(item)) => match item.node { // impl trait hir::ItemKind::Existential(hir::ExistTy { impl_trait_fn: Some(parent), origin, .. }) => (parent == self.parent_def_id, origin), // named existential types hir::ItemKind::Existential(hir::ExistTy { impl_trait_fn: None, origin, .. }) => ( may_define_existential_type( tcx, self.parent_def_id, opaque_hir_id, ), origin, ), _ => (def_scope_default(), hir::ExistTyOrigin::ExistentialType), }, Some(Node::ImplItem(item)) => match item.node { hir::ImplItemKind::Existential(_) => ( may_define_existential_type( tcx, self.parent_def_id, opaque_hir_id, ), hir::ExistTyOrigin::ExistentialType, ), _ => (def_scope_default(), hir::ExistTyOrigin::ExistentialType), }, _ => bug!( "expected (impl) item, found {}", tcx.hir().hir_to_string(opaque_hir_id), ), }; if in_definition_scope { return self.fold_opaque_ty(ty, def_id, substs, origin); } debug!( "instantiate_opaque_types_in_map: \ encountered opaque outside its definition scope \ def_id={:?}", def_id, ); } } ty }, lt_op: |lt| lt, ct_op: |ct| ct, }) } fn fold_opaque_ty( &mut self, ty: Ty<'tcx>, def_id: DefId, substs: SubstsRef<'tcx>, origin: hir::ExistTyOrigin, ) -> Ty<'tcx> { let infcx = self.infcx; let tcx = infcx.tcx; debug!( "instantiate_opaque_types: Opaque(def_id={:?}, substs={:?})", def_id, substs ); // Use the same type variable if the exact same Opaque appears more // than once in the return type (e.g., if it's passed to a type alias). if let Some(opaque_defn) = self.opaque_types.get(&def_id) { return opaque_defn.concrete_ty; } let span = tcx.def_span(def_id); let ty_var = infcx.next_ty_var(TypeVariableOrigin::TypeInference(span)); let predicates_of = tcx.predicates_of(def_id); debug!( "instantiate_opaque_types: predicates: {:#?}", predicates_of, ); let bounds = predicates_of.instantiate(tcx, substs); debug!("instantiate_opaque_types: bounds={:?}", bounds); let required_region_bounds = tcx.required_region_bounds(ty, bounds.predicates.clone()); debug!( "instantiate_opaque_types: required_region_bounds={:?}", required_region_bounds ); // make sure that we are in fact defining the *entire* type // e.g., `existential type Foo<T: Bound>: Bar;` needs to be // defined by a function like `fn foo<T: Bound>() -> Foo<T>`. debug!( "instantiate_opaque_types: param_env: {:#?}", self.param_env, ); debug!( "instantiate_opaque_types: generics: {:#?}", tcx.generics_of(def_id), ); self.opaque_types.insert( def_id, OpaqueTypeDecl { substs, concrete_ty: ty_var, has_required_region_bounds: !required_region_bounds.is_empty(), origin, }, ); debug!("instantiate_opaque_types: ty_var={:?}", ty_var); self.obligations.reserve(bounds.predicates.len()); for predicate in bounds.predicates { // Change the predicate to refer to the type variable, // which will be the concrete type instead of the opaque type. // This also instantiates nested instances of `impl Trait`. let predicate = self.instantiate_opaque_types_in_map(&predicate); let cause = traits::ObligationCause::new(span, self.body_id, traits::SizedReturnType); // Require that the predicate holds for the concrete type. debug!("instantiate_opaque_types: predicate={:?}", predicate); self.obligations .push(traits::Obligation::new(cause, self.param_env, predicate)); } ty_var } } /// Returns `true` if `opaque_node_id` is a sibling or a child of a sibling of `def_id`. /// /// ```rust /// pub mod foo { /// pub mod bar { /// pub existential type Baz; /// /// fn f1() -> Baz { .. } /// } /// /// fn f2() -> bar::Baz { .. } /// } /// ``` /// /// Here, `def_id` is the `DefId` of the existential type `Baz` and `opaque_node_id` is the /// `NodeId` of the reference to `Baz` (i.e., the return type of both `f1` and `f2`). /// We return `true` if the reference is within the same module as the existential type /// (i.e., `true` for `f1`, `false` for `f2`). pub fn may_define_existential_type( tcx: TyCtxt<'_, '_, '_>, def_id: DefId, opaque_hir_id: hir::HirId, ) -> bool { let mut hir_id = tcx .hir() .as_local_hir_id(def_id) .unwrap(); // named existential types can be defined by any siblings or // children of siblings let mod_id = tcx.hir().get_parent_item(opaque_hir_id); // so we walk up the node tree until we hit the root or the parent // of the opaque type while hir_id != mod_id && hir_id != hir::CRATE_HIR_ID { hir_id = tcx.hir().get_parent_item(hir_id); } // syntactically we are allowed to define the concrete type hir_id == mod_id }
40.901834
100
0.527403
3abe13bbc38ad7e2773dc8bb8dbd2b45f4dda9be
912
//! Checks for regresssions in the CLI interface code //! //! To skip unit tests, and only run integration tests, execute: //! //! ```sh //! cargo test --test integration //! ``` //! //! By default, the main binary is used. But you can specify another //! binary by setting the `COMPILER_BINARY` environment flag. For //! example: //! //! ```sh //! COMPILER_BINARY="./run" cargo test --test integration //! ``` use ::optimization::Level; use integration_test_codegen::*; use runner_integration_tests::*; use std::path::PathBuf; gen_lexer_integration_tests!(); gen_parser_integration_tests!(); gen_ast_reference_integration_tests!(); gen_ast_idempotence_integration_tests!(); gen_semantic_integration_tests!(); gen_lints_integration_tests!(); gen_ast_inspector_tests!(); gen_assembly_integration_tests!(); gen_binary_integration_tests!(); gen_timeout_integration_tests!(); gen_optimization_integration_tests!();
28.5
68
0.754386
6af3383208174c40463730f838c6b41d53949ada
22,857
//! A client for the ledger state, from the perspective of an arbitrary validator. //! //! Use start_tcp_client() to create a client and then import BanksClientExt to //! access its methods. Additional "*_with_context" methods are also available, //! but they are undocumented, may change over time, and are generally more //! cumbersome to use. pub use { crate::error::BanksClientError, solana_banks_interface::{BanksClient as TarpcClient, TransactionStatus}, }; use { borsh::BorshDeserialize, futures::{future::join_all, Future, FutureExt, TryFutureExt}, solana_banks_interface::{BanksRequest, BanksResponse, BanksTransactionResultWithSimulation}, solana_program::{ clock::Slot, fee_calculator::FeeCalculator, hash::Hash, program_pack::Pack, pubkey::Pubkey, rent::Rent, sysvar::Sysvar, }, solana_sdk::{ account::{from_account, Account}, commitment_config::CommitmentLevel, message::Message, signature::Signature, transaction::{self, Transaction}, }, tarpc::{ client::{self, NewClient, RequestDispatch}, context::{self, Context}, serde_transport::tcp, ClientMessage, Response, Transport, }, tokio::{net::ToSocketAddrs, time::Duration}, tokio_serde::formats::Bincode, }; mod error; // This exists only for backward compatibility pub trait BanksClientExt {} #[derive(Clone)] pub struct BanksClient { inner: TarpcClient, } impl BanksClient { #[allow(clippy::new_ret_no_self)] pub fn new<C>( config: client::Config, transport: C, ) -> NewClient<TarpcClient, RequestDispatch<BanksRequest, BanksResponse, C>> where C: Transport<ClientMessage<BanksRequest>, Response<BanksResponse>>, { TarpcClient::new(config, transport) } pub fn send_transaction_with_context( &mut self, ctx: Context, transaction: Transaction, ) -> impl Future<Output = Result<(), BanksClientError>> + '_ { self.inner .send_transaction_with_context(ctx, transaction) .map_err(Into::into) } #[deprecated( since = "1.9.0", note = "Please use `get_fee_for_message` or `is_blockhash_valid` instead" )] pub fn get_fees_with_commitment_and_context( &mut self, ctx: Context, commitment: CommitmentLevel, ) -> impl Future<Output = Result<(FeeCalculator, Hash, u64), BanksClientError>> + '_ { #[allow(deprecated)] self.inner .get_fees_with_commitment_and_context(ctx, commitment) .map_err(Into::into) } pub fn get_transaction_status_with_context( &mut self, ctx: Context, signature: Signature, ) -> impl Future<Output = Result<Option<TransactionStatus>, BanksClientError>> + '_ { self.inner .get_transaction_status_with_context(ctx, signature) .map_err(Into::into) } pub fn get_slot_with_context( &mut self, ctx: Context, commitment: CommitmentLevel, ) -> impl Future<Output = Result<Slot, BanksClientError>> + '_ { self.inner .get_slot_with_context(ctx, commitment) .map_err(Into::into) } pub fn get_block_height_with_context( &mut self, ctx: Context, commitment: CommitmentLevel, ) -> impl Future<Output = Result<Slot, BanksClientError>> + '_ { self.inner .get_block_height_with_context(ctx, commitment) .map_err(Into::into) } pub fn process_transaction_with_commitment_and_context( &mut self, ctx: Context, transaction: Transaction, commitment: CommitmentLevel, ) -> impl Future<Output = Result<Option<transaction::Result<()>>, BanksClientError>> + '_ { self.inner .process_transaction_with_commitment_and_context(ctx, transaction, commitment) .map_err(Into::into) } pub fn process_transaction_with_preflight_and_commitment_and_context( &mut self, ctx: Context, transaction: Transaction, commitment: CommitmentLevel, ) -> impl Future<Output = Result<BanksTransactionResultWithSimulation, BanksClientError>> + '_ { self.inner .process_transaction_with_preflight_and_commitment_and_context( ctx, transaction, commitment, ) .map_err(Into::into) } pub fn get_account_with_commitment_and_context( &mut self, ctx: Context, address: Pubkey, commitment: CommitmentLevel, ) -> impl Future<Output = Result<Option<Account>, BanksClientError>> + '_ { self.inner .get_account_with_commitment_and_context(ctx, address, commitment) .map_err(Into::into) } /// Send a transaction and return immediately. The server will resend the /// transaction until either it is accepted by the cluster or the transaction's /// blockhash expires. pub fn send_transaction( &mut self, transaction: Transaction, ) -> impl Future<Output = Result<(), BanksClientError>> + '_ { self.send_transaction_with_context(context::current(), transaction) } /// Return the fee parameters associated with a recent, rooted blockhash. The cluster /// will use the transaction's blockhash to look up these same fee parameters and /// use them to calculate the transaction fee. #[deprecated( since = "1.9.0", note = "Please use `get_fee_for_message` or `is_blockhash_valid` instead" )] pub fn get_fees( &mut self, ) -> impl Future<Output = Result<(FeeCalculator, Hash, u64), BanksClientError>> + '_ { #[allow(deprecated)] self.get_fees_with_commitment_and_context(context::current(), CommitmentLevel::default()) } /// Return the cluster Sysvar pub fn get_sysvar<T: Sysvar>( &mut self, ) -> impl Future<Output = Result<T, BanksClientError>> + '_ { self.get_account(T::id()).map(|result| { let sysvar = result?.ok_or(BanksClientError::ClientError("Sysvar not present"))?; from_account::<T, _>(&sysvar).ok_or(BanksClientError::ClientError( "Failed to deserialize sysvar", )) }) } /// Return the cluster rent pub fn get_rent(&mut self) -> impl Future<Output = Result<Rent, BanksClientError>> + '_ { self.get_sysvar::<Rent>() } /// Return a recent, rooted blockhash from the server. The cluster will only accept /// transactions with a blockhash that has not yet expired. Use the `get_fees` /// method to get both a blockhash and the blockhash's last valid slot. #[deprecated(since = "1.9.0", note = "Please use `get_latest_blockhash` instead")] pub fn get_recent_blockhash( &mut self, ) -> impl Future<Output = Result<Hash, BanksClientError>> + '_ { #[allow(deprecated)] self.get_fees().map(|result| Ok(result?.1)) } /// Send a transaction and return after the transaction has been rejected or /// reached the given level of commitment. pub fn process_transaction_with_commitment( &mut self, transaction: Transaction, commitment: CommitmentLevel, ) -> impl Future<Output = Result<(), BanksClientError>> + '_ { let mut ctx = context::current(); ctx.deadline += Duration::from_secs(50); self.process_transaction_with_commitment_and_context(ctx, transaction, commitment) .map(|result| match result? { None => Err(BanksClientError::ClientError( "invalid blockhash or fee-payer", )), Some(transaction_result) => Ok(transaction_result?), }) } /// Send a transaction and return any preflight (sanitization or simulation) errors, or return /// after the transaction has been rejected or reached the given level of commitment. pub fn process_transaction_with_preflight_and_commitment( &mut self, transaction: Transaction, commitment: CommitmentLevel, ) -> impl Future<Output = Result<(), BanksClientError>> + '_ { let mut ctx = context::current(); ctx.deadline += Duration::from_secs(50); self.process_transaction_with_preflight_and_commitment_and_context( ctx, transaction, commitment, ) .map(|result| match result? { BanksTransactionResultWithSimulation { result: None, simulation_details: _, } => Err(BanksClientError::ClientError( "invalid blockhash or fee-payer", )), BanksTransactionResultWithSimulation { result: Some(Err(err)), simulation_details: Some(simulation_details), } => Err(BanksClientError::SimulationError { err, logs: simulation_details.logs, units_consumed: simulation_details.units_consumed, return_data: simulation_details.return_data, }), BanksTransactionResultWithSimulation { result: Some(result), simulation_details: _, } => result.map_err(Into::into), }) } /// Send a transaction and return any preflight (sanitization or simulation) errors, or return /// after the transaction has been finalized or rejected. pub fn process_transaction_with_preflight( &mut self, transaction: Transaction, ) -> impl Future<Output = Result<(), BanksClientError>> + '_ { self.process_transaction_with_preflight_and_commitment( transaction, CommitmentLevel::default(), ) } /// Send a transaction and return until the transaction has been finalized or rejected. pub fn process_transaction( &mut self, transaction: Transaction, ) -> impl Future<Output = Result<(), BanksClientError>> + '_ { self.process_transaction_with_commitment(transaction, CommitmentLevel::default()) } pub async fn process_transactions_with_commitment( &mut self, transactions: Vec<Transaction>, commitment: CommitmentLevel, ) -> Result<(), BanksClientError> { let mut clients: Vec<_> = transactions.iter().map(|_| self.clone()).collect(); let futures = clients .iter_mut() .zip(transactions) .map(|(client, transaction)| { client.process_transaction_with_commitment(transaction, commitment) }); let statuses = join_all(futures).await; statuses.into_iter().collect() // Convert Vec<Result<_, _>> to Result<Vec<_>> } /// Send transactions and return until the transaction has been finalized or rejected. pub fn process_transactions( &mut self, transactions: Vec<Transaction>, ) -> impl Future<Output = Result<(), BanksClientError>> + '_ { self.process_transactions_with_commitment(transactions, CommitmentLevel::default()) } /// Return the most recent rooted slot. All transactions at or below this slot /// are said to be finalized. The cluster will not fork to a higher slot. pub fn get_root_slot(&mut self) -> impl Future<Output = Result<Slot, BanksClientError>> + '_ { self.get_slot_with_context(context::current(), CommitmentLevel::default()) } /// Return the most recent rooted block height. All transactions at or below this height /// are said to be finalized. The cluster will not fork to a higher block height. pub fn get_root_block_height( &mut self, ) -> impl Future<Output = Result<Slot, BanksClientError>> + '_ { self.get_block_height_with_context(context::current(), CommitmentLevel::default()) } /// Return the account at the given address at the slot corresponding to the given /// commitment level. If the account is not found, None is returned. pub fn get_account_with_commitment( &mut self, address: Pubkey, commitment: CommitmentLevel, ) -> impl Future<Output = Result<Option<Account>, BanksClientError>> + '_ { self.get_account_with_commitment_and_context(context::current(), address, commitment) } /// Return the account at the given address at the time of the most recent root slot. /// If the account is not found, None is returned. pub fn get_account( &mut self, address: Pubkey, ) -> impl Future<Output = Result<Option<Account>, BanksClientError>> + '_ { self.get_account_with_commitment(address, CommitmentLevel::default()) } /// Return the unpacked account data at the given address /// If the account is not found, an error is returned pub fn get_packed_account_data<T: Pack>( &mut self, address: Pubkey, ) -> impl Future<Output = Result<T, BanksClientError>> + '_ { self.get_account(address).map(|result| { let account = result?.ok_or(BanksClientError::ClientError("Account not found"))?; T::unpack_from_slice(&account.data) .map_err(|_| BanksClientError::ClientError("Failed to deserialize account")) }) } /// Return the unpacked account data at the given address /// If the account is not found, an error is returned pub fn get_account_data_with_borsh<T: BorshDeserialize>( &mut self, address: Pubkey, ) -> impl Future<Output = Result<T, BanksClientError>> + '_ { self.get_account(address).map(|result| { let account = result?.ok_or(BanksClientError::ClientError("Account not found"))?; T::try_from_slice(&account.data).map_err(Into::into) }) } /// Return the balance in lamports of an account at the given address at the slot /// corresponding to the given commitment level. pub fn get_balance_with_commitment( &mut self, address: Pubkey, commitment: CommitmentLevel, ) -> impl Future<Output = Result<u64, BanksClientError>> + '_ { self.get_account_with_commitment_and_context(context::current(), address, commitment) .map(|result| Ok(result?.map(|x| x.lamports).unwrap_or(0))) } /// Return the balance in lamports of an account at the given address at the time /// of the most recent root slot. pub fn get_balance( &mut self, address: Pubkey, ) -> impl Future<Output = Result<u64, BanksClientError>> + '_ { self.get_balance_with_commitment(address, CommitmentLevel::default()) } /// Return the status of a transaction with a signature matching the transaction's first /// signature. Return None if the transaction is not found, which may be because the /// blockhash was expired or the fee-paying account had insufficient funds to pay the /// transaction fee. Note that servers rarely store the full transaction history. This /// method may return None if the transaction status has been discarded. pub fn get_transaction_status( &mut self, signature: Signature, ) -> impl Future<Output = Result<Option<TransactionStatus>, BanksClientError>> + '_ { self.get_transaction_status_with_context(context::current(), signature) } /// Same as get_transaction_status, but for multiple transactions. pub async fn get_transaction_statuses( &mut self, signatures: Vec<Signature>, ) -> Result<Vec<Option<TransactionStatus>>, BanksClientError> { // tarpc futures oddly hold a mutable reference back to the client so clone the client upfront let mut clients_and_signatures: Vec<_> = signatures .into_iter() .map(|signature| (self.clone(), signature)) .collect(); let futs = clients_and_signatures .iter_mut() .map(|(client, signature)| client.get_transaction_status(*signature)); let statuses = join_all(futs).await; // Convert Vec<Result<_, _>> to Result<Vec<_>> statuses.into_iter().collect() } pub fn get_latest_blockhash( &mut self, ) -> impl Future<Output = Result<Hash, BanksClientError>> + '_ { self.get_latest_blockhash_with_commitment(CommitmentLevel::default()) .map(|result| { result? .map(|x| x.0) .ok_or(BanksClientError::ClientError("valid blockhash not found")) .map_err(Into::into) }) } pub fn get_latest_blockhash_with_commitment( &mut self, commitment: CommitmentLevel, ) -> impl Future<Output = Result<Option<(Hash, u64)>, BanksClientError>> + '_ { self.get_latest_blockhash_with_commitment_and_context(context::current(), commitment) } pub fn get_latest_blockhash_with_commitment_and_context( &mut self, ctx: Context, commitment: CommitmentLevel, ) -> impl Future<Output = Result<Option<(Hash, u64)>, BanksClientError>> + '_ { self.inner .get_latest_blockhash_with_commitment_and_context(ctx, commitment) .map_err(Into::into) } pub fn get_fee_for_message_with_commitment_and_context( &mut self, ctx: Context, commitment: CommitmentLevel, message: Message, ) -> impl Future<Output = Result<Option<u64>, BanksClientError>> + '_ { self.inner .get_fee_for_message_with_commitment_and_context(ctx, commitment, message) .map_err(Into::into) } } pub async fn start_client<C>(transport: C) -> Result<BanksClient, BanksClientError> where C: Transport<ClientMessage<BanksRequest>, Response<BanksResponse>> + Send + 'static, { Ok(BanksClient { inner: TarpcClient::new(client::Config::default(), transport).spawn(), }) } pub async fn start_tcp_client<T: ToSocketAddrs>(addr: T) -> Result<BanksClient, BanksClientError> { let transport = tcp::connect(addr, Bincode::default).await?; Ok(BanksClient { inner: TarpcClient::new(client::Config::default(), transport).spawn(), }) } #[cfg(test)] mod tests { use { super::*, solana_banks_server::banks_server::start_local_server, solana_runtime::{ bank::Bank, bank_forks::BankForks, commitment::BlockCommitmentCache, genesis_utils::create_genesis_config, }, solana_sdk::{message::Message, signature::Signer, system_instruction}, std::sync::{Arc, RwLock}, tarpc::transport, tokio::{runtime::Runtime, time::sleep}, }; #[test] fn test_banks_client_new() { let (client_transport, _server_transport) = transport::channel::unbounded(); BanksClient::new(client::Config::default(), client_transport); } #[test] fn test_banks_server_transfer_via_server() -> Result<(), BanksClientError> { // This test shows the preferred way to interact with BanksServer. // It creates a runtime explicitly (no globals via tokio macros) and calls // `runtime.block_on()` just once, to run all the async code. let genesis = create_genesis_config(10); let bank = Bank::new_for_tests(&genesis.genesis_config); let slot = bank.slot(); let block_commitment_cache = Arc::new(RwLock::new( BlockCommitmentCache::new_for_tests_with_slots(slot, slot), )); let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); let bob_pubkey = solana_sdk::pubkey::new_rand(); let mint_pubkey = genesis.mint_keypair.pubkey(); let instruction = system_instruction::transfer(&mint_pubkey, &bob_pubkey, 1); let message = Message::new(&[instruction], Some(&mint_pubkey)); Runtime::new()?.block_on(async { let client_transport = start_local_server(bank_forks, block_commitment_cache, Duration::from_millis(1)) .await; let mut banks_client = start_client(client_transport).await?; let recent_blockhash = banks_client.get_latest_blockhash().await?; let transaction = Transaction::new(&[&genesis.mint_keypair], message, recent_blockhash); banks_client.process_transaction(transaction).await.unwrap(); assert_eq!(banks_client.get_balance(bob_pubkey).await?, 1); Ok(()) }) } #[test] fn test_banks_server_transfer_via_client() -> Result<(), BanksClientError> { // The caller may not want to hold the connection open until the transaction // is processed (or blockhash expires). In this test, we verify the // server-side functionality is available to the client. let genesis = create_genesis_config(10); let bank = Bank::new_for_tests(&genesis.genesis_config); let slot = bank.slot(); let block_commitment_cache = Arc::new(RwLock::new( BlockCommitmentCache::new_for_tests_with_slots(slot, slot), )); let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); let mint_pubkey = &genesis.mint_keypair.pubkey(); let bob_pubkey = solana_sdk::pubkey::new_rand(); let instruction = system_instruction::transfer(mint_pubkey, &bob_pubkey, 1); let message = Message::new(&[instruction], Some(mint_pubkey)); Runtime::new()?.block_on(async { let client_transport = start_local_server(bank_forks, block_commitment_cache, Duration::from_millis(1)) .await; let mut banks_client = start_client(client_transport).await?; let (recent_blockhash, last_valid_block_height) = banks_client .get_latest_blockhash_with_commitment(CommitmentLevel::default()) .await? .unwrap(); let transaction = Transaction::new(&[&genesis.mint_keypair], message, recent_blockhash); let signature = transaction.signatures[0]; banks_client.send_transaction(transaction).await?; let mut status = banks_client.get_transaction_status(signature).await?; while status.is_none() { let root_block_height = banks_client.get_root_block_height().await?; if root_block_height > last_valid_block_height { break; } sleep(Duration::from_millis(100)).await; status = banks_client.get_transaction_status(signature).await?; } assert!(status.unwrap().err.is_none()); assert_eq!(banks_client.get_balance(bob_pubkey).await?, 1); Ok(()) }) } }
39.890052
102
0.639148
18f8bc5f053159a29bb8ab311e6cc259ad827dcb
2,279
use homectl_types::{ device::{Device, DevicesState}, group::{FlattenedGroupConfig, FlattenedGroupsConfig, GroupDeviceLink, GroupId, GroupsConfig}, }; use super::devices::find_device; #[derive(Clone)] pub struct Groups { config: GroupsConfig, } impl Groups { pub fn new(config: GroupsConfig) -> Self { Groups { config } } pub fn get_flattened_groups(&self, devices: &DevicesState) -> FlattenedGroupsConfig { self.config .iter() .map(|(group_id, group)| { ( group_id.clone(), FlattenedGroupConfig { name: group.name.clone(), device_ids: self .find_group_devices(devices, group_id) .into_iter() .map(|device| device.get_device_key()) .collect(), hidden: group.hidden, }, ) }) .collect() } /// Returns all GroupDeviceLinks that belong to given group pub fn find_group_device_links(&self, group_id: &GroupId) -> Vec<GroupDeviceLink> { let group = self.config.get(group_id); let results = group.map(|group| { let mut results = vec![]; for device_link in group.devices.clone().unwrap_or_default() { results.push(device_link); } for group_link in group.groups.clone().unwrap_or_default() { let mut device_links = self.find_group_device_links(&group_link.group_id); results.append(device_links.as_mut()); } results }); results.unwrap_or_default() } pub fn find_group_devices(&self, devices: &DevicesState, group_id: &GroupId) -> Vec<Device> { let group_device_links = self.find_group_device_links(group_id); group_device_links .iter() .filter_map(|gdl| { find_device( devices, &gdl.integration_id, gdl.device_id.as_ref(), gdl.name.as_ref(), ) }) .collect() } }
30.386667
97
0.512505
75209c3a727ae1b6443765a9346b61560d8e508f
184
cfg_if::cfg_if! { if #[cfg(unix)] { mod unix; pub use self::unix::*; } else if #[cfg(windows)] { mod windows; pub use self::windows::*; } }
18.4
33
0.461957
efde2e26f14ca7b67a7418ffc99b2ed4307b0c8f
6,202
use fmterr::fmt_err; use fs_extra::dir::{copy as copy_dir, CopyOptions}; use perseus::{ internal::{build::build_app, export::export_app, get_path_prefix_server}, PluginAction, SsrNode, }; use perseus_engine::app::{ get_app_root, get_immutable_store, get_locales, get_mutable_store, get_plugins, get_static_aliases, get_templates_map, get_translations_manager, }; use std::fs; use std::path::PathBuf; #[tokio::main] async fn main() { let exit_code = real_main().await; std::process::exit(exit_code) } async fn real_main() -> i32 { // We want to be working in the root of `.perseus/` std::env::set_current_dir("../").unwrap(); let plugins = get_plugins::<SsrNode>(); // Building and exporting must be sequential, but that can be done in parallel with static directory/alias copying let exit_code = build_and_export().await; if exit_code != 0 { return exit_code; } // After that's done, we can do two copy operations in parallel at least let exit_code_1 = tokio::task::spawn_blocking(copy_static_dir); let exit_code_2 = tokio::task::spawn_blocking(copy_static_aliases); // These errors come from any panics in the threads, which should be propagated up to a panic in the main thread in this case exit_code_1.await.unwrap(); exit_code_2.await.unwrap(); plugins .functional_actions .export_actions .after_successful_export .run((), plugins.get_plugin_data()); println!("Static exporting successfully completed!"); 0 } async fn build_and_export() -> i32 { let plugins = get_plugins::<SsrNode>(); plugins .functional_actions .build_actions .before_build .run((), plugins.get_plugin_data()); let immutable_store = get_immutable_store(&plugins); // We don't need this in exporting, but the build process does let mutable_store = get_mutable_store(); let translations_manager = get_translations_manager().await; let locales = get_locales(&plugins); // Build the site for all the common locales (done in parallel), denying any non-exportable features // We need to build and generate those artifacts before we can proceed on to exporting let templates_map = get_templates_map::<SsrNode>(&plugins); let build_res = build_app( &templates_map, &locales, (&immutable_store, &mutable_store), &translations_manager, // We use another binary to handle normal building true, ) .await; if let Err(err) = build_res { let err_msg = fmt_err(&err); plugins .functional_actions .export_actions .after_failed_build .run(err, plugins.get_plugin_data()); eprintln!("{}", err_msg); return 1; } plugins .functional_actions .export_actions .after_successful_build .run((), plugins.get_plugin_data()); // Turn the build artifacts into self-contained static files let app_root = get_app_root(&plugins); let export_res = export_app( &templates_map, // Perseus always uses one HTML file, and there's no point in letting a plugin change that "../index.html", &locales, &app_root, &immutable_store, &translations_manager, get_path_prefix_server(), ) .await; if let Err(err) = export_res { let err_msg = fmt_err(&err); plugins .functional_actions .export_actions .after_failed_export .run(err, plugins.get_plugin_data()); eprintln!("{}", err_msg); return 1; } 0 } fn copy_static_dir() -> i32 { let plugins = get_plugins::<SsrNode>(); // Loop through any static aliases and copy them in too // Unlike with the server, these could override pages! // We'll copy from the alias to the path (it could be a directory or a file) // Remember: `alias` has a leading `/`! for (alias, path) in get_static_aliases(&plugins) { let from = PathBuf::from(path); let to = format!("dist/exported{}", alias); if from.is_dir() { if let Err(err) = copy_dir(&from, &to, &CopyOptions::new()) { let err_msg = format!( "couldn't copy static alias directory from '{}' to '{}': '{}'", from.to_str().map(|s| s.to_string()).unwrap(), to, fmt_err(&err) ); plugins .functional_actions .export_actions .after_failed_static_alias_dir_copy .run(err.to_string(), plugins.get_plugin_data()); eprintln!("{}", err_msg); return 1; } } else if let Err(err) = fs::copy(&from, &to) { let err_msg = format!( "couldn't copy static alias file from '{}' to '{}': '{}'", from.to_str().map(|s| s.to_string()).unwrap(), to, fmt_err(&err) ); plugins .functional_actions .export_actions .after_failed_static_alias_file_copy .run(err, plugins.get_plugin_data()); eprintln!("{}", err_msg); return 1; } } 0 } fn copy_static_aliases() -> i32 { let plugins = get_plugins::<SsrNode>(); // Copy the `static` directory into the export package if it exists // If the user wants extra, they can use static aliases, plugins are unnecessary here let static_dir = PathBuf::from("../static"); if static_dir.exists() { if let Err(err) = copy_dir(&static_dir, "dist/exported/.perseus/", &CopyOptions::new()) { let err_msg = format!("couldn't copy static directory: '{}'", fmt_err(&err)); plugins .functional_actions .export_actions .after_failed_static_copy .run(err.to_string(), plugins.get_plugin_data()); eprintln!("{}", err_msg); return 1; } } 0 }
34.076923
129
0.593679
d98201fa8d64923027d1fd6aff25940d46158909
2,498
use super::FontRef; /// Uniquely generated value for identifying and caching fonts. #[derive(Copy, Clone, PartialOrd, Ord, PartialEq, Eq, Hash, Debug)] pub struct CacheKey(pub(crate) u64); impl CacheKey { /// Generates a new cache key. pub fn new() -> Self { use core::sync::atomic::{AtomicU64, Ordering}; static KEY: AtomicU64 = AtomicU64::new(1); Self(KEY.fetch_add(1, Ordering::Relaxed)) } /// Returns the underlying value of the key. pub fn value(self) -> u64 { self.0 } } impl Default for CacheKey { fn default() -> Self { Self::new() } } pub struct FontCache<T> { entries: Vec<Entry<T>>, max_entries: usize, epoch: u64, } impl<T> FontCache<T> { pub fn new(max_entries: usize) -> Self { Self { entries: Vec::new(), epoch: 0, max_entries, } } pub fn get<'a>(&'a mut self, font: &FontRef, mut f: impl FnMut(&FontRef) -> T) -> (u64, &'a T) { let (found, index) = self.find(font); if found { let entry = &mut self.entries[index]; entry.epoch = self.epoch; (entry.id, &entry.data) } else { self.epoch += 1; let data = f(font); let id = font.key.value(); if index == self.entries.len() { self.entries.push(Entry { epoch: self.epoch, id, data, }); let entry = self.entries.last().unwrap(); (id, &entry.data) } else { let entry = &mut self.entries[index]; entry.epoch = self.epoch; entry.id = id; entry.data = data; (id, &entry.data) } } } fn find(&self, font: &FontRef) -> (bool, usize) { let mut lowest = 0; let mut lowest_epoch = self.epoch; let id = font.key.value(); for (i, entry) in self.entries.iter().enumerate() { if entry.id == id { return (true, i); } if entry.epoch < lowest_epoch { lowest_epoch = entry.epoch; lowest = i; } } if self.entries.len() < self.max_entries { (false, self.entries.len()) } else { (false, lowest) } } } struct Entry<T> { epoch: u64, id: u64, data: T, }
26.020833
100
0.473979
d527ed123f9e8dcdf0ba3933b5774b3c35bab73f
737
pub struct CryptTypeStr<'a>(pub(crate) &'a str); impl<'a> CryptTypeStr<'a> { pub fn as_str(&self) -> &str { self.0 } } #[derive(Debug, Copy, Clone, Eq, Hash, PartialEq)] pub enum CryptType { Luks1, Luks2, } impl<'a> From<CryptTypeStr<'a>> for CryptType { fn from(string: CryptTypeStr) -> Self { match string.as_str() { "LUKS1" => CryptType::Luks1, "LUKS2" => CryptType::Luks2, string => panic!("unknown type string: {}", string), } } } impl From<CryptType> for CryptTypeStr<'static> { fn from(t: CryptType) -> Self { let string = match t { Luks1 => "LUKS1", Luks2 => "LUKS2", }; CryptTypeStr(string) } }
22.333333
64
0.540027
b9acda0eddbdd46c73e32e40b14f1b38f637b9f0
1,079
extern crate dmi; #[test] fn table_checksum_is_valid() { let s = dmi::Smbios { anchor: *b"_SM_", checksum: 0xc2, length: 0x1f, major_version: 0x02, minor_version: 0x07, max_structure_size: 0xb8, revision: 0x00, formatted: [0x00, 0x00, 0x00, 0x00, 0x00], inter_anchor: *b"_DMI_", inter_checksum: 0x3e, table_length: 0x0c15, table_address: 0x000e92f0, structure_count: 0x0052, bcd_revision: 27, }; assert!(s.is_valid()); } #[test] fn table_checksum_is_invalid() { let s = dmi::Smbios { anchor: *b"_SM_", checksum: 0x00, length: 0x1f, major_version: 0x02, minor_version: 0x07, max_structure_size: 0xb8, revision: 0x00, formatted: [0x00, 0x00, 0x00, 0x00, 0x00], inter_anchor: *b"_DMI_", inter_checksum: 0x3e, table_length: 0x0c15, table_address: 0x000e92f0, structure_count: 0x0052, bcd_revision: 27, }; assert!(!s.is_valid()); }
23.456522
50
0.569045
ed0ba18fd57a6ec3e3d7870c2b3efd7f3c772457
253
use collisions::shapes::Shape; pub trait Intersection<T: Shape> { type Output; fn intersection(&self, other: &T) -> Option<Self::Output>; fn fast_intersection(&self, other: &T) -> bool { self.intersection(other).is_some() } }
21.083333
62
0.636364
6af0ab11f6eb5481ce4537c553046ddd0a02725b
6,750
use std::iter::{self, FromIterator}; use std::{slice, vec}; use super::container::Container; use crate::{NonSortedIntegers, RoaringBitmap}; /// An iterator for `RoaringBitmap`. pub struct Iter<'a> { inner: iter::Flatten<slice::Iter<'a, Container>>, size_hint: u64, } /// An iterator for `RoaringBitmap`. pub struct IntoIter { inner: iter::Flatten<vec::IntoIter<Container>>, size_hint: u64, } impl Iter<'_> { fn new(containers: &[Container]) -> Iter { let size_hint = containers.iter().map(|c| c.len()).sum(); Iter { inner: containers.iter().flatten(), size_hint } } } impl IntoIter { fn new(containers: Vec<Container>) -> IntoIter { let size_hint = containers.iter().map(|c| c.len()).sum(); IntoIter { inner: containers.into_iter().flatten(), size_hint } } } impl Iterator for Iter<'_> { type Item = u32; fn next(&mut self) -> Option<u32> { self.size_hint = self.size_hint.saturating_sub(1); self.inner.next() } fn size_hint(&self) -> (usize, Option<usize>) { if self.size_hint < usize::MAX as u64 { (self.size_hint as usize, Some(self.size_hint as usize)) } else { (usize::MAX, None) } } } impl DoubleEndedIterator for Iter<'_> { fn next_back(&mut self) -> Option<Self::Item> { self.size_hint = self.size_hint.saturating_sub(1); self.inner.next_back() } } #[cfg(target_pointer_width = "64")] impl ExactSizeIterator for Iter<'_> { fn len(&self) -> usize { self.size_hint as usize } } impl Iterator for IntoIter { type Item = u32; fn next(&mut self) -> Option<u32> { self.size_hint = self.size_hint.saturating_sub(1); self.inner.next() } fn size_hint(&self) -> (usize, Option<usize>) { if self.size_hint < usize::MAX as u64 { (self.size_hint as usize, Some(self.size_hint as usize)) } else { (usize::MAX, None) } } } impl DoubleEndedIterator for IntoIter { fn next_back(&mut self) -> Option<Self::Item> { self.size_hint = self.size_hint.saturating_sub(1); self.inner.next_back() } } #[cfg(target_pointer_width = "64")] impl ExactSizeIterator for IntoIter { fn len(&self) -> usize { self.size_hint as usize } } impl RoaringBitmap { /// Iterator over each value stored in the RoaringBitmap, guarantees values are ordered by value. /// /// # Examples /// /// ```rust /// use roaring::RoaringBitmap; /// use std::iter::FromIterator; /// /// let bitmap = (1..3).collect::<RoaringBitmap>(); /// let mut iter = bitmap.iter(); /// /// assert_eq!(iter.next(), Some(1)); /// assert_eq!(iter.next(), Some(2)); /// assert_eq!(iter.next(), None); /// ``` pub fn iter(&self) -> Iter { Iter::new(&self.containers) } } impl<'a> IntoIterator for &'a RoaringBitmap { type Item = u32; type IntoIter = Iter<'a>; fn into_iter(self) -> Iter<'a> { self.iter() } } impl IntoIterator for RoaringBitmap { type Item = u32; type IntoIter = IntoIter; fn into_iter(self) -> IntoIter { IntoIter::new(self.containers) } } impl FromIterator<u32> for RoaringBitmap { fn from_iter<I: IntoIterator<Item = u32>>(iterator: I) -> RoaringBitmap { let mut rb = RoaringBitmap::new(); rb.extend(iterator); rb } } impl Extend<u32> for RoaringBitmap { fn extend<I: IntoIterator<Item = u32>>(&mut self, iterator: I) { for value in iterator { self.insert(value); } } } impl RoaringBitmap { /// Create the set from a sorted iterator. Values must be sorted and deduplicated. /// /// The values of the iterator must be ordered and strictly greater than the greatest value /// in the set. If a value in the iterator doesn't satisfy this requirement, it is not added /// and the append operation is stopped. /// /// Returns `Ok` with the requested `RoaringBitmap`, `Err` with the number of elements /// that were correctly appended before failure. /// /// # Example: Create a set from an ordered list of integers. /// /// ```rust /// use roaring::RoaringBitmap; /// /// let mut rb = RoaringBitmap::from_sorted_iter(0..10).unwrap(); /// /// assert!(rb.iter().eq(0..10)); /// ``` /// /// # Example: Try to create a set from a non-ordered list of integers. /// /// ```rust /// use roaring::RoaringBitmap; /// /// let integers = 0..10u32; /// let error = RoaringBitmap::from_sorted_iter(integers.rev()).unwrap_err(); /// /// assert_eq!(error.valid_until(), 1); /// ``` pub fn from_sorted_iter<I: IntoIterator<Item = u32>>( iterator: I, ) -> Result<RoaringBitmap, NonSortedIntegers> { let mut rb = RoaringBitmap::new(); rb.append(iterator).map(|_| rb) } /// Extend the set with a sorted iterator. /// /// The values of the iterator must be ordered and strictly greater than the greatest value /// in the set. If a value in the iterator doesn't satisfy this requirement, it is not added /// and the append operation is stopped. /// /// Returns `Ok` with the number of elements appended to the set, `Err` with /// the number of elements we effectively appended before an error occurred. /// /// # Examples /// /// ```rust /// use roaring::RoaringBitmap; /// /// let mut rb = RoaringBitmap::new(); /// assert_eq!(rb.append(0..10), Ok(10)); /// /// assert!(rb.iter().eq(0..10)); /// ``` pub fn append<I: IntoIterator<Item = u32>>( &mut self, iterator: I, ) -> Result<u64, NonSortedIntegers> { // Name shadowed to prevent accidentally referencing the param let mut iterator = iterator.into_iter(); let mut prev = match (iterator.next(), self.max()) { (None, _) => return Ok(0), (Some(first), Some(max)) if first <= max => { return Err(NonSortedIntegers { valid_until: 0 }) } (Some(first), _) => first, }; // It is now guaranteed that so long as the values of the iterator are // monotonically increasing they must also be the greatest in the set. self.push_unchecked(prev); let mut count = 1; for value in iterator { if value <= prev { return Err(NonSortedIntegers { valid_until: count }); } else { self.push_unchecked(value); prev = value; count += 1; } } Ok(count) } }
28.008299
101
0.580296
1db7e9925420852bf15ac3a40a283e80527a8436
9,223
/// Macro to create several `Parameter`s at once. /// /// # Example /// ``` /// use ics::parameters; /// use ics::components::Property; /// use ics::properties::DtStart; /// /// # fn main() { /// let mut date = DtStart::new("20180906"); /// date.append(parameters!("TZID" => "America/New_York"; "VALUE" => "DATE")); /// assert_eq!( /// Property::from(date).to_string(), /// "DTSTART;TZID=America/New_York;VALUE=DATE:20180906\r\n" /// ); /// # } /// ``` #[macro_export] macro_rules! parameters { ($($key:expr => $value:expr);*) => { { use std::collections::BTreeMap; use $crate::components::Parameters; let mut parameters: Parameters = BTreeMap::new(); $( parameters.insert($key.into(), $value.into()); )* parameters } }; } #[cfg(test)] mod test { use crate::components::Parameters; use std::collections::BTreeMap; #[test] fn parameters() { let mut b_map: Parameters = BTreeMap::new(); b_map.insert("VALUE".into(), "BOOLEAN".into()); b_map.insert("CUTYPE".into(), "GROUP".into()); let param = parameters!("VALUE" => "BOOLEAN"; "CUTYPE" => "GROUP"); assert_eq!(b_map, param); } } // Creation and conversion from builder types to Property macro_rules! property { ($(#[$outer:meta])* $type:ident, $name:expr) => { #[doc = "`"]#[doc=$name]#[doc = "` Property"] /// $(#[$outer])* #[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] pub struct $type<'a> { value: Cow<'a, str>, parameters: Parameters<'a> } impl<'a> $type<'a> { #[doc = "Creates a new `"]#[doc=$name]#[doc = "` Property with the given value."] pub fn new<S>(value: S) -> Self where S: Into<Cow<'a, str>> { Self { value: value.into(), parameters: BTreeMap::new() } } /// Adds a parameter to the property. pub fn add<P>(&mut self, parameter: P) where P: Into<Parameter<'a>> { let param = parameter.into(); self.parameters.insert(param.key, param.value); } /// Adds several parameters at once to the property. For creating /// several parameters at once, consult the documentation of /// the [`parameters!`] macro. pub fn append(&mut self, mut parameters: Parameters<'a>) { self.parameters.append(&mut parameters); } } impl_from_prop!($type, $name); }; } macro_rules! property_with_constructor { ( $(#[$outer:meta])* $type:ident, $name:expr, $($(#[$inner:meta])* fn $const_ident:ident() { $value:expr });* ) => { #[doc = "`"]#[doc=$name]#[doc = "` Property"] /// $(#[$outer])* #[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] pub struct $type<'a> { value: Cow<'a, str>, parameters: Parameters<'a> } impl<'a> $type<'a> { #[doc = "Creates a new `"]#[doc=$name]#[doc = "` Property with the given value."] pub fn new<S>(value: S) -> Self where S: Into<Cow<'a, str>> { Self { value: value.into(), parameters: BTreeMap::new() } } $( $(#[$inner])* /// #[doc = "Property Value: "]#[doc = $value] pub fn $const_ident() -> Self { Self::new($value) } )* /// Adds a parameter to the property. pub fn add<P>(&mut self, parameter: P) where P: Into<Parameter<'a>> { let param = parameter.into(); self.parameters.insert(param.key, param.value); } /// Adds several parameters at once to the property. For creating /// several parameters at once, consult the documentation of /// the [`parameters!`] macro. pub fn append(&mut self, mut parameters: Parameters<'a>) { self.parameters.append(&mut parameters); } } impl_from_prop!($type, $name); }; } // Creation and conversion from builder types to Property with default value // types as parameter // This matters right now only for the newer properties from RFC7986. #[cfg(feature = "rfc7986")] macro_rules! property_with_parameter { ($type:ident, $name:expr, $value:expr) => { #[doc = "`"]#[doc=$name]#[doc = "` Property"] /// /// Newer properties that have a different value type than `TEXT` have to include the `VALUE` parameter. #[doc = "This property already contains `VALUE:"]#[doc=$value]#[doc="`. Do not add this parameter manually."] #[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] pub struct $type<'a> { value: Cow<'a, str>, parameters: Parameters<'a> } impl<'a> $type<'a> { #[doc = "Creates a new `"]#[doc=$name]#[doc = "` Property with the given value."] pub fn new<S>(value: S) -> Self where S: Into<Cow<'a, str>> { Self { value: value.into(), parameters: parameters!("VALUE" => $value) } } /// Adds a parameter to the property. pub fn add<P>(&mut self, parameter: P) where P: Into<Parameter<'a>> { let param = parameter.into(); self.parameters.insert(param.key, param.value); } /// Adds several parameters at once to the property. For creating /// several parameters at once, consult the documentation of /// the [`parameters!`] macro. pub fn append(&mut self, mut parameters: Parameters<'a>) { self.parameters.append(&mut parameters); } } impl_from_prop!($type, $name); }; } // Creation and conversion from builder types to Parameter macro_rules! parameter { ($(#[$outer:meta])* $type:ident, $name:expr) => { #[doc = "`"]#[doc=$name]#[doc = "` Parameter"] /// $(#[$outer])* #[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] pub struct $type<'a> { value: Cow<'a, str> } impl<'a> $type<'a> { #[doc = "Creates a new `"]#[doc=$name]#[doc = "` Parameter with the given value."] pub fn new<S>(value: S) -> Self where S: Into<Cow<'a, str>> { Self { value: value.into() } } } impl_from_param!($type, $name); }; } macro_rules! parameter_with_const { ( $(#[$outer:meta])* $type:ident, $name:expr, $($(#[$inner:meta])* const $const_ident:ident = $value:expr);* ) => { #[doc = "`"]#[doc=$name]#[doc = "` Parameter"] /// $(#[$outer])* #[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] pub struct $type<'a> { value: Cow<'a, str> } impl<'a> $type<'a> { #[doc = "Creates a new `"]#[doc=$name]#[doc = "` Parameter with the given value."] pub fn new<S>(value: S) -> Self where S: Into<Cow<'a, str>> { Self { value: value.into() } } $( $(#[$inner])* /// #[doc = "Parameter Value: "]#[doc = $value] pub const $const_ident: Self = Self { value: Cow::Borrowed($value) }; )* } impl_from_param!($type, $name); }; } macro_rules! impl_default_prop { ($type:ident, $default:expr) => { impl<'a> Default for $type<'a> { fn default() -> Self { Self { value: $default.into(), parameters: BTreeMap::new() } } } }; } macro_rules! impl_from_prop { ($type:ident, $name:expr) => { impl<'a> From<$type<'a>> for Property<'a> { fn from(builder: $type<'a>) -> Self { Property { key: $name.into(), value: builder.value, parameters: builder.parameters } } } }; } macro_rules! impl_from_param { ($type:ident, $name:expr) => { impl<'a> From<$type<'a>> for Parameter<'a> { fn from(builder: $type<'a>) -> Self { Parameter { key: $name.into(), value: builder.value } } } }; }
30.539735
117
0.459287
64155d620111559dddcb367332029224135d2bb2
3,639
#[doc = "Reader of register MB2_64B_WORD2"] pub type R = crate::R<u32, super::MB2_64B_WORD2>; #[doc = "Writer for register MB2_64B_WORD2"] pub type W = crate::W<u32, super::MB2_64B_WORD2>; #[doc = "Register MB2_64B_WORD2 `reset()`'s with value 0"] impl crate::ResetValue for super::MB2_64B_WORD2 { type Type = u32; #[inline(always)] fn reset_value() -> Self::Type { 0 } } #[doc = "Reader of field `DATA_BYTE_11`"] pub type DATA_BYTE_11_R = crate::R<u8, u8>; #[doc = "Write proxy for field `DATA_BYTE_11`"] pub struct DATA_BYTE_11_W<'a> { w: &'a mut W, } impl<'a> DATA_BYTE_11_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !0xff) | ((value as u32) & 0xff); self.w } } #[doc = "Reader of field `DATA_BYTE_10`"] pub type DATA_BYTE_10_R = crate::R<u8, u8>; #[doc = "Write proxy for field `DATA_BYTE_10`"] pub struct DATA_BYTE_10_W<'a> { w: &'a mut W, } impl<'a> DATA_BYTE_10_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0xff << 8)) | (((value as u32) & 0xff) << 8); self.w } } #[doc = "Reader of field `DATA_BYTE_9`"] pub type DATA_BYTE_9_R = crate::R<u8, u8>; #[doc = "Write proxy for field `DATA_BYTE_9`"] pub struct DATA_BYTE_9_W<'a> { w: &'a mut W, } impl<'a> DATA_BYTE_9_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0xff << 16)) | (((value as u32) & 0xff) << 16); self.w } } #[doc = "Reader of field `DATA_BYTE_8`"] pub type DATA_BYTE_8_R = crate::R<u8, u8>; #[doc = "Write proxy for field `DATA_BYTE_8`"] pub struct DATA_BYTE_8_W<'a> { w: &'a mut W, } impl<'a> DATA_BYTE_8_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0xff << 24)) | (((value as u32) & 0xff) << 24); self.w } } impl R { #[doc = "Bits 0:7 - Data byte 0 of Rx/Tx frame."] #[inline(always)] pub fn data_byte_11(&self) -> DATA_BYTE_11_R { DATA_BYTE_11_R::new((self.bits & 0xff) as u8) } #[doc = "Bits 8:15 - Data byte 1 of Rx/Tx frame."] #[inline(always)] pub fn data_byte_10(&self) -> DATA_BYTE_10_R { DATA_BYTE_10_R::new(((self.bits >> 8) & 0xff) as u8) } #[doc = "Bits 16:23 - Data byte 2 of Rx/Tx frame."] #[inline(always)] pub fn data_byte_9(&self) -> DATA_BYTE_9_R { DATA_BYTE_9_R::new(((self.bits >> 16) & 0xff) as u8) } #[doc = "Bits 24:31 - Data byte 3 of Rx/Tx frame."] #[inline(always)] pub fn data_byte_8(&self) -> DATA_BYTE_8_R { DATA_BYTE_8_R::new(((self.bits >> 24) & 0xff) as u8) } } impl W { #[doc = "Bits 0:7 - Data byte 0 of Rx/Tx frame."] #[inline(always)] pub fn data_byte_11(&mut self) -> DATA_BYTE_11_W { DATA_BYTE_11_W { w: self } } #[doc = "Bits 8:15 - Data byte 1 of Rx/Tx frame."] #[inline(always)] pub fn data_byte_10(&mut self) -> DATA_BYTE_10_W { DATA_BYTE_10_W { w: self } } #[doc = "Bits 16:23 - Data byte 2 of Rx/Tx frame."] #[inline(always)] pub fn data_byte_9(&mut self) -> DATA_BYTE_9_W { DATA_BYTE_9_W { w: self } } #[doc = "Bits 24:31 - Data byte 3 of Rx/Tx frame."] #[inline(always)] pub fn data_byte_8(&mut self) -> DATA_BYTE_8_W { DATA_BYTE_8_W { w: self } } }
32.20354
86
0.580654
e2b498834314e5a0ae9b06aeae2bc010740400c6
523
use crate::pixiv::helper_structs::comment::Comment; use serde::{Deserialize, Serialize}; /// IllustrationComment #[derive(Serialize, Deserialize, Debug)] pub struct IllustrationComment { comments: Vec<Comment>, next_url: String, total_comments: u32, } impl IntoIterator for IllustrationComment { type Item = Comment; type IntoIter = std::vec::IntoIter<Self::Item>; /// Consume the struct, yielding an iterator. fn into_iter(self) -> Self::IntoIter { self.comments.into_iter() } }
24.904762
51
0.697897
bff15785e82ce867f0b9d16c78f2951632393a3b
138,515
use { crate::{ block_error::BlockError, blockstore::Blockstore, blockstore_db::BlockstoreError, blockstore_meta::SlotMeta, leader_schedule_cache::LeaderScheduleCache, }, chrono_humanize::{Accuracy, HumanTime, Tense}, crossbeam_channel::Sender, itertools::Itertools, log::*, rand::{seq::SliceRandom, thread_rng}, rayon::{prelude::*, ThreadPool}, solana_entry::entry::{ self, create_ticks, Entry, EntrySlice, EntryType, EntryVerificationStatus, VerifyRecyclers, }, solana_measure::measure::Measure, solana_metrics::{datapoint_error, inc_new_counter_debug}, solana_program_runtime::timings::{ExecuteTimingType, ExecuteTimings}, solana_rayon_threadlimit::get_thread_count, solana_runtime::{ accounts_db::{AccountShrinkThreshold, AccountsDbConfig}, accounts_index::AccountSecondaryIndexes, accounts_update_notifier_interface::AccountsUpdateNotifier, bank::{ Bank, RentDebits, TransactionBalancesSet, TransactionExecutionResult, TransactionResults, }, bank_forks::BankForks, bank_utils, block_cost_limits::*, commitment::VOTE_THRESHOLD_SIZE, cost_model::CostModel, snapshot_config::SnapshotConfig, snapshot_package::{AccountsPackageSender, SnapshotType}, snapshot_utils, transaction_batch::TransactionBatch, transaction_cost_metrics_sender::TransactionCostMetricsSender, vote_account::VoteAccount, vote_sender_types::ReplayVoteSender, }, solana_sdk::{ clock::{Slot, MAX_PROCESSING_AGE}, feature_set, genesis_config::GenesisConfig, hash::Hash, instruction::InstructionError, pubkey::Pubkey, signature::{Keypair, Signature}, timing, transaction::{ Result, SanitizedTransaction, TransactionError, TransactionVerificationMode, VersionedTransaction, }, }, solana_transaction_status::token_balances::{ collect_token_balances, TransactionTokenBalancesSet, }, std::{ borrow::Cow, cell::RefCell, collections::{HashMap, HashSet}, path::PathBuf, result, sync::{Arc, RwLock}, time::{Duration, Instant}, }, thiserror::Error, }; // it tracks the block cost available capacity - number of compute-units allowed // by max block cost limit #[derive(Debug)] pub struct BlockCostCapacityMeter { pub capacity: u64, pub accumulated_cost: u64, } impl Default for BlockCostCapacityMeter { fn default() -> Self { BlockCostCapacityMeter::new(MAX_BLOCK_UNITS) } } impl BlockCostCapacityMeter { pub fn new(capacity_limit: u64) -> Self { Self { capacity: capacity_limit, accumulated_cost: 0_u64, } } // return the remaining capacity pub fn accumulate(&mut self, cost: u64) -> u64 { self.accumulated_cost += cost; self.capacity.saturating_sub(self.accumulated_cost) } } pub type BlockstoreProcessorInner = (BankForks, LeaderScheduleCache, Option<Slot>); pub type BlockstoreProcessorResult = result::Result<BlockstoreProcessorInner, BlockstoreProcessorError>; thread_local!(static PAR_THREAD_POOL: RefCell<ThreadPool> = RefCell::new(rayon::ThreadPoolBuilder::new() .num_threads(get_thread_count()) .thread_name(|ix| format!("blockstore_processor_{}", ix)) .build() .unwrap()) ); fn first_err(results: &[Result<()>]) -> Result<()> { for r in results { if r.is_err() { return r.clone(); } } Ok(()) } // Includes transaction signature for unit-testing fn get_first_error( batch: &TransactionBatch, fee_collection_results: Vec<Result<()>>, ) -> Option<(Result<()>, Signature)> { let mut first_err = None; for (result, transaction) in fee_collection_results .iter() .zip(batch.sanitized_transactions()) { if let Err(ref err) = result { if first_err.is_none() { first_err = Some((result.clone(), *transaction.signature())); } warn!( "Unexpected validator error: {:?}, transaction: {:?}", err, transaction ); datapoint_error!( "validator_process_entry_error", ( "error", format!("error: {:?}, transaction: {:?}", err, transaction), String ) ); } } first_err } fn aggregate_total_execution_units(execute_timings: &ExecuteTimings) -> u64 { let mut execute_cost_units: u64 = 0; for (program_id, timing) in &execute_timings.details.per_program_timings { if timing.count < 1 { continue; } execute_cost_units = execute_cost_units.saturating_add(timing.accumulated_units / timing.count as u64); trace!("aggregated execution cost of {:?} {:?}", program_id, timing); } execute_cost_units } fn execute_batch( batch: &TransactionBatch, bank: &Arc<Bank>, transaction_status_sender: Option<&TransactionStatusSender>, replay_vote_sender: Option<&ReplayVoteSender>, timings: &mut ExecuteTimings, cost_capacity_meter: Arc<RwLock<BlockCostCapacityMeter>>, ) -> Result<()> { let record_token_balances = transaction_status_sender.is_some(); let mut mint_decimals: HashMap<Pubkey, u8> = HashMap::new(); let pre_token_balances = if record_token_balances { collect_token_balances(bank, batch, &mut mint_decimals) } else { vec![] }; let pre_process_units: u64 = aggregate_total_execution_units(timings); let (tx_results, balances) = batch.bank().load_execute_and_commit_transactions( batch, MAX_PROCESSING_AGE, transaction_status_sender.is_some(), transaction_status_sender.is_some(), transaction_status_sender.is_some(), timings, ); if bank .feature_set .is_active(&feature_set::gate_large_block::id()) { let execution_cost_units = aggregate_total_execution_units(timings) - pre_process_units; let remaining_block_cost_cap = cost_capacity_meter .write() .unwrap() .accumulate(execution_cost_units); debug!( "bank {} executed a batch, number of transactions {}, total execute cu {}, remaining block cost cap {}", bank.slot(), batch.sanitized_transactions().len(), execution_cost_units, remaining_block_cost_cap, ); if remaining_block_cost_cap == 0_u64 { return Err(TransactionError::WouldExceedMaxBlockCostLimit); } } bank_utils::find_and_send_votes( batch.sanitized_transactions(), &tx_results, replay_vote_sender, ); let TransactionResults { fee_collection_results, execution_results, rent_debits, .. } = tx_results; if bank .feature_set .is_active(&feature_set::cap_accounts_data_len::id()) { check_accounts_data_size(&execution_results)?; } if let Some(transaction_status_sender) = transaction_status_sender { let transactions = batch.sanitized_transactions().to_vec(); let post_token_balances = if record_token_balances { collect_token_balances(bank, batch, &mut mint_decimals) } else { vec![] }; let token_balances = TransactionTokenBalancesSet::new(pre_token_balances, post_token_balances); transaction_status_sender.send_transaction_status_batch( bank.clone(), transactions, execution_results, balances, token_balances, rent_debits, ); } let first_err = get_first_error(batch, fee_collection_results); first_err.map(|(result, _)| result).unwrap_or(Ok(())) } fn execute_batches_internal( bank: &Arc<Bank>, batches: &[TransactionBatch], entry_callback: Option<&ProcessCallback>, transaction_status_sender: Option<&TransactionStatusSender>, replay_vote_sender: Option<&ReplayVoteSender>, timings: &mut ExecuteTimings, cost_capacity_meter: Arc<RwLock<BlockCostCapacityMeter>>, ) -> Result<()> { inc_new_counter_debug!("bank-par_execute_entries-count", batches.len()); let (results, new_timings): (Vec<Result<()>>, Vec<ExecuteTimings>) = PAR_THREAD_POOL.with(|thread_pool| { thread_pool.borrow().install(|| { batches .into_par_iter() .map(|batch| { let mut timings = ExecuteTimings::default(); let result = execute_batch( batch, bank, transaction_status_sender, replay_vote_sender, &mut timings, cost_capacity_meter.clone(), ); if let Some(entry_callback) = entry_callback { entry_callback(bank); } (result, timings) }) .unzip() }) }); timings.saturating_add_in_place(ExecuteTimingType::TotalBatchesLen, batches.len() as u64); timings.saturating_add_in_place(ExecuteTimingType::NumExecuteBatches, 1); for timing in new_timings { timings.accumulate(&timing); } first_err(&results) } fn execute_batches( bank: &Arc<Bank>, batches: &[TransactionBatch], entry_callback: Option<&ProcessCallback>, transaction_status_sender: Option<&TransactionStatusSender>, replay_vote_sender: Option<&ReplayVoteSender>, timings: &mut ExecuteTimings, cost_capacity_meter: Arc<RwLock<BlockCostCapacityMeter>>, ) -> Result<()> { let lock_results = batches .iter() .flat_map(|batch| batch.lock_results().clone()) .collect::<Vec<_>>(); let sanitized_txs = batches .iter() .flat_map(|batch| batch.sanitized_transactions().to_vec()) .collect::<Vec<_>>(); let cost_model = CostModel::new(); let mut minimal_tx_cost = u64::MAX; let mut total_cost: u64 = 0; // Allowing collect here, since it also computes the minimal tx cost, and aggregate cost. // These two values are later used for checking if the tx_costs vector needs to be iterated over. #[allow(clippy::needless_collect)] let tx_costs = sanitized_txs .iter() .map(|tx| { let cost = cost_model.calculate_cost(tx).sum(); minimal_tx_cost = std::cmp::min(minimal_tx_cost, cost); total_cost = total_cost.saturating_add(cost); cost }) .collect::<Vec<_>>(); let target_batch_count = get_thread_count() as u64; let mut tx_batches: Vec<TransactionBatch> = vec![]; let rebatched_txs = if total_cost > target_batch_count.saturating_mul(minimal_tx_cost) { let target_batch_cost = total_cost / target_batch_count; let mut batch_cost: u64 = 0; let mut slice_start = 0; tx_costs.into_iter().enumerate().for_each(|(index, cost)| { let next_index = index + 1; batch_cost = batch_cost.saturating_add(cost); if batch_cost >= target_batch_cost || next_index == sanitized_txs.len() { let txs = &sanitized_txs[slice_start..=index]; let results = &lock_results[slice_start..=index]; let tx_batch = TransactionBatch::new(results.to_vec(), bank, Cow::from(txs)); slice_start = next_index; tx_batches.push(tx_batch); batch_cost = 0; } }); &tx_batches[..] } else { batches }; execute_batches_internal( bank, rebatched_txs, entry_callback, transaction_status_sender, replay_vote_sender, timings, cost_capacity_meter, ) } /// Process an ordered list of entries in parallel /// 1. In order lock accounts for each entry while the lock succeeds, up to a Tick entry /// 2. Process the locked group in parallel /// 3. Register the `Tick` if it's available /// 4. Update the leader scheduler, goto 1 pub fn process_entries_for_tests( bank: &Arc<Bank>, entries: Vec<Entry>, randomize: bool, transaction_status_sender: Option<&TransactionStatusSender>, replay_vote_sender: Option<&ReplayVoteSender>, ) -> Result<()> { let verify_transaction = { let bank = bank.clone(); move |versioned_tx: VersionedTransaction| -> Result<SanitizedTransaction> { bank.verify_transaction(versioned_tx, TransactionVerificationMode::FullVerification) } }; let mut timings = ExecuteTimings::default(); let mut entries = entry::verify_transactions(entries, Arc::new(verify_transaction))?; let result = process_entries_with_callback( bank, &mut entries, randomize, None, transaction_status_sender, replay_vote_sender, None, &mut timings, Arc::new(RwLock::new(BlockCostCapacityMeter::default())), ); debug!("process_entries: {:?}", timings); result } // Note: If randomize is true this will shuffle entries' transactions in-place. fn process_entries_with_callback( bank: &Arc<Bank>, entries: &mut [EntryType], randomize: bool, entry_callback: Option<&ProcessCallback>, transaction_status_sender: Option<&TransactionStatusSender>, replay_vote_sender: Option<&ReplayVoteSender>, transaction_cost_metrics_sender: Option<&TransactionCostMetricsSender>, timings: &mut ExecuteTimings, cost_capacity_meter: Arc<RwLock<BlockCostCapacityMeter>>, ) -> Result<()> { // accumulator for entries that can be processed in parallel let mut batches = vec![]; let mut tick_hashes = vec![]; let mut rng = thread_rng(); for entry in entries { match entry { EntryType::Tick(hash) => { // If it's a tick, save it for later tick_hashes.push(hash); if bank.is_block_boundary(bank.tick_height() + tick_hashes.len() as u64) { // If it's a tick that will cause a new blockhash to be created, // execute the group and register the tick execute_batches( bank, &batches, entry_callback, transaction_status_sender, replay_vote_sender, timings, cost_capacity_meter.clone(), )?; batches.clear(); for hash in &tick_hashes { bank.register_tick(hash); } tick_hashes.clear(); } } EntryType::Transactions(transactions) => { if let Some(transaction_cost_metrics_sender) = transaction_cost_metrics_sender { transaction_cost_metrics_sender .send_cost_details(bank.clone(), transactions.iter()); } if randomize { transactions.shuffle(&mut rng); } loop { // try to lock the accounts let batch = bank.prepare_sanitized_batch(transactions); let first_lock_err = first_err(batch.lock_results()); // if locking worked if first_lock_err.is_ok() { batches.push(batch); // done with this entry break; } // else we failed to lock, 2 possible reasons if batches.is_empty() { // An entry has account lock conflicts with *itself*, which should not happen // if generated by a properly functioning leader datapoint_error!( "validator_process_entry_error", ( "error", format!( "Lock accounts error, entry conflicts with itself, txs: {:?}", transactions ), String ) ); // bail first_lock_err?; } else { // else we have an entry that conflicts with a prior entry // execute the current queue and try to process this entry again execute_batches( bank, &batches, entry_callback, transaction_status_sender, replay_vote_sender, timings, cost_capacity_meter.clone(), )?; batches.clear(); } } } } } execute_batches( bank, &batches, entry_callback, transaction_status_sender, replay_vote_sender, timings, cost_capacity_meter, )?; for hash in tick_hashes { bank.register_tick(hash); } Ok(()) } #[derive(Error, Debug)] pub enum BlockstoreProcessorError { #[error("failed to load entries, error: {0}")] FailedToLoadEntries(#[from] BlockstoreError), #[error("failed to load meta")] FailedToLoadMeta, #[error("invalid block error: {0}")] InvalidBlock(#[from] BlockError), #[error("invalid transaction error: {0}")] InvalidTransaction(#[from] TransactionError), #[error("no valid forks found")] NoValidForksFound, #[error("invalid hard fork slot {0}")] InvalidHardFork(Slot), #[error("root bank with mismatched capitalization at {0}")] RootBankWithMismatchedCapitalization(Slot), } /// Callback for accessing bank state while processing the blockstore pub type ProcessCallback = Arc<dyn Fn(&Bank) + Sync + Send>; #[derive(Default, Clone)] pub struct ProcessOptions { pub bpf_jit: bool, pub poh_verify: bool, pub full_leader_cache: bool, pub dev_halt_at_slot: Option<Slot>, pub entry_callback: Option<ProcessCallback>, pub override_num_threads: Option<usize>, pub new_hard_forks: Option<Vec<Slot>>, pub debug_keys: Option<Arc<HashSet<Pubkey>>>, pub account_indexes: AccountSecondaryIndexes, pub accounts_db_caching_enabled: bool, pub limit_load_slot_count_from_snapshot: Option<usize>, pub allow_dead_slots: bool, pub accounts_db_test_hash_calculation: bool, pub accounts_db_skip_shrink: bool, pub accounts_db_config: Option<AccountsDbConfig>, pub verify_index: bool, pub shrink_ratio: AccountShrinkThreshold, } pub fn process_blockstore( genesis_config: &GenesisConfig, blockstore: &Blockstore, account_paths: Vec<PathBuf>, opts: ProcessOptions, cache_block_meta_sender: Option<&CacheBlockMetaSender>, snapshot_config: Option<&SnapshotConfig>, accounts_package_sender: AccountsPackageSender, accounts_update_notifier: Option<AccountsUpdateNotifier>, ) -> BlockstoreProcessorResult { if let Some(num_threads) = opts.override_num_threads { PAR_THREAD_POOL.with(|pool| { *pool.borrow_mut() = rayon::ThreadPoolBuilder::new() .num_threads(num_threads) .build() .unwrap() }); } // Setup bank for slot 0 let bank0 = Bank::new_with_paths( genesis_config, account_paths, opts.debug_keys.clone(), Some(&crate::builtins::get(opts.bpf_jit)), opts.account_indexes.clone(), opts.accounts_db_caching_enabled, opts.shrink_ratio, false, opts.accounts_db_config.clone(), accounts_update_notifier, ); let bank_forks = BankForks::new(bank0); info!("processing ledger for slot 0..."); let recyclers = VerifyRecyclers::default(); process_bank_0( &bank_forks.root_bank(), blockstore, &opts, &recyclers, cache_block_meta_sender, ); do_process_blockstore_from_root( blockstore, bank_forks, &opts, &recyclers, None, cache_block_meta_sender, snapshot_config, accounts_package_sender, None, ) } /// Process blockstore from a known root bank #[allow(clippy::too_many_arguments)] pub(crate) fn process_blockstore_from_root( blockstore: &Blockstore, bank_forks: BankForks, opts: &ProcessOptions, recyclers: &VerifyRecyclers, transaction_status_sender: Option<&TransactionStatusSender>, cache_block_meta_sender: Option<&CacheBlockMetaSender>, snapshot_config: Option<&SnapshotConfig>, accounts_package_sender: AccountsPackageSender, last_full_snapshot_slot: Slot, ) -> BlockstoreProcessorResult { do_process_blockstore_from_root( blockstore, bank_forks, opts, recyclers, transaction_status_sender, cache_block_meta_sender, snapshot_config, accounts_package_sender, Some(last_full_snapshot_slot), ) } #[allow(clippy::too_many_arguments)] fn do_process_blockstore_from_root( blockstore: &Blockstore, mut bank_forks: BankForks, opts: &ProcessOptions, recyclers: &VerifyRecyclers, transaction_status_sender: Option<&TransactionStatusSender>, cache_block_meta_sender: Option<&CacheBlockMetaSender>, snapshot_config: Option<&SnapshotConfig>, accounts_package_sender: AccountsPackageSender, mut last_full_snapshot_slot: Option<Slot>, ) -> BlockstoreProcessorResult { // Starting slot must be a root, and thus has no parents assert_eq!(bank_forks.banks().len(), 1); let bank = bank_forks.root_bank(); assert!(bank.parent().is_none()); let start_slot = bank.slot(); info!("processing ledger from slot {}...", start_slot); let now = Instant::now(); if let Some(ref new_hard_forks) = opts.new_hard_forks { let hard_forks = bank.hard_forks(); for hard_fork_slot in new_hard_forks.iter() { if *hard_fork_slot > start_slot { hard_forks.write().unwrap().register(*hard_fork_slot); } else { warn!( "Hard fork at {} ignored, --hard-fork option can be removed.", hard_fork_slot ); } } } // ensure start_slot is rooted for correct replay if blockstore.is_primary_access() { blockstore .set_roots(std::iter::once(&start_slot)) .expect("Couldn't set root slot on startup"); } else { assert!( blockstore.is_root(start_slot), "starting slot isn't root and can't update due to being secondary blockstore access: {}", start_slot ); } if let Ok(metas) = blockstore.slot_meta_iterator(start_slot) { if let Some((slot, _meta)) = metas.last() { info!("ledger holds data through slot {}", slot); } } let mut timing = ExecuteTimings::default(); // Iterate and replay slots from blockstore starting from `start_slot` let mut leader_schedule_cache = LeaderScheduleCache::new_from_bank(&bank); if opts.full_leader_cache { leader_schedule_cache.set_max_schedules(std::usize::MAX); } if let Some(start_slot_meta) = blockstore .meta(start_slot) .unwrap_or_else(|_| panic!("Failed to get meta for slot {}", start_slot)) { load_frozen_forks( &mut bank_forks, start_slot, &start_slot_meta, blockstore, &leader_schedule_cache, opts, recyclers, transaction_status_sender, cache_block_meta_sender, snapshot_config, accounts_package_sender, &mut timing, &mut last_full_snapshot_slot, )?; } else { // If there's no meta for the input `start_slot`, then we started from a snapshot // and there's no point in processing the rest of blockstore and implies blockstore // should be empty past this point. }; let processing_time = now.elapsed(); let debug_verify = opts.accounts_db_test_hash_calculation; let mut time_cap = Measure::start("capitalization"); // We might be promptly restarted after bad capitalization was detected while creating newer snapshot. // In that case, we're most likely restored from the last good snapshot and replayed up to this root. // So again check here for the bad capitalization to avoid to continue until the next snapshot creation. if !bank_forks .root_bank() .calculate_and_verify_capitalization(debug_verify) { return Err( BlockstoreProcessorError::RootBankWithMismatchedCapitalization(bank_forks.root()), ); } time_cap.stop(); datapoint_info!( "process_blockstore_from_root", ("total_time_us", processing_time.as_micros(), i64), ("frozen_banks", bank_forks.frozen_banks().len(), i64), ("slot", bank_forks.root(), i64), ("forks", bank_forks.banks().len(), i64), ("calculate_capitalization_us", time_cap.as_us(), i64), ); info!("ledger processing timing: {:?}", timing); let mut bank_slots = bank_forks.banks().keys().collect::<Vec<_>>(); bank_slots.sort_unstable(); info!( "ledger processed in {}. root slot is {}, {} bank{}: {}", HumanTime::from(chrono::Duration::from_std(processing_time).unwrap()) .to_text_en(Accuracy::Precise, Tense::Present), bank_forks.root(), bank_slots.len(), if bank_slots.len() > 1 { "s" } else { "" }, bank_slots.iter().map(|slot| slot.to_string()).join(", "), ); assert!(bank_forks.active_banks().is_empty()); Ok((bank_forks, leader_schedule_cache, last_full_snapshot_slot)) } /// Verify that a segment of entries has the correct number of ticks and hashes fn verify_ticks( bank: &Bank, entries: &[Entry], slot_full: bool, tick_hash_count: &mut u64, ) -> std::result::Result<(), BlockError> { let next_bank_tick_height = bank.tick_height() + entries.tick_count(); let max_bank_tick_height = bank.max_tick_height(); if next_bank_tick_height > max_bank_tick_height { warn!("Too many entry ticks found in slot: {}", bank.slot()); return Err(BlockError::TooManyTicks); } if next_bank_tick_height < max_bank_tick_height && slot_full { info!("Too few entry ticks found in slot: {}", bank.slot()); return Err(BlockError::TooFewTicks); } if next_bank_tick_height == max_bank_tick_height { let has_trailing_entry = entries.last().map(|e| !e.is_tick()).unwrap_or_default(); if has_trailing_entry { warn!("Slot: {} did not end with a tick entry", bank.slot()); return Err(BlockError::TrailingEntry); } if !slot_full { warn!("Slot: {} was not marked full", bank.slot()); return Err(BlockError::InvalidLastTick); } } let hashes_per_tick = bank.hashes_per_tick().unwrap_or(0); if !entries.verify_tick_hash_count(tick_hash_count, hashes_per_tick) { warn!( "Tick with invalid number of hashes found in slot: {}", bank.slot() ); return Err(BlockError::InvalidTickHashCount); } Ok(()) } fn confirm_full_slot( blockstore: &Blockstore, bank: &Arc<Bank>, opts: &ProcessOptions, recyclers: &VerifyRecyclers, progress: &mut ConfirmationProgress, transaction_status_sender: Option<&TransactionStatusSender>, replay_vote_sender: Option<&ReplayVoteSender>, timing: &mut ExecuteTimings, ) -> result::Result<(), BlockstoreProcessorError> { let mut confirmation_timing = ConfirmationTiming::default(); let skip_verification = !opts.poh_verify; confirm_slot( blockstore, bank, &mut confirmation_timing, progress, skip_verification, transaction_status_sender, replay_vote_sender, None, opts.entry_callback.as_ref(), recyclers, opts.allow_dead_slots, )?; timing.accumulate(&confirmation_timing.execute_timings); if !bank.is_complete() { Err(BlockstoreProcessorError::InvalidBlock( BlockError::Incomplete, )) } else { Ok(()) } } pub struct ConfirmationTiming { pub started: Instant, pub replay_elapsed: u64, pub poh_verify_elapsed: u64, pub transaction_verify_elapsed: u64, pub fetch_elapsed: u64, pub fetch_fail_elapsed: u64, pub execute_timings: ExecuteTimings, } impl Default for ConfirmationTiming { fn default() -> Self { Self { started: Instant::now(), replay_elapsed: 0, poh_verify_elapsed: 0, transaction_verify_elapsed: 0, fetch_elapsed: 0, fetch_fail_elapsed: 0, execute_timings: ExecuteTimings::default(), } } } #[derive(Default)] pub struct ConfirmationProgress { pub last_entry: Hash, pub tick_hash_count: u64, pub num_shreds: u64, pub num_entries: usize, pub num_txs: usize, } impl ConfirmationProgress { pub fn new(last_entry: Hash) -> Self { Self { last_entry, ..Self::default() } } } #[allow(clippy::too_many_arguments)] pub fn confirm_slot( blockstore: &Blockstore, bank: &Arc<Bank>, timing: &mut ConfirmationTiming, progress: &mut ConfirmationProgress, skip_verification: bool, transaction_status_sender: Option<&TransactionStatusSender>, replay_vote_sender: Option<&ReplayVoteSender>, transaction_cost_metrics_sender: Option<&TransactionCostMetricsSender>, entry_callback: Option<&ProcessCallback>, recyclers: &VerifyRecyclers, allow_dead_slots: bool, ) -> result::Result<(), BlockstoreProcessorError> { let slot = bank.slot(); let (entries, num_shreds, slot_full) = { let mut load_elapsed = Measure::start("load_elapsed"); let load_result = blockstore .get_slot_entries_with_shred_info(slot, progress.num_shreds, allow_dead_slots) .map_err(BlockstoreProcessorError::FailedToLoadEntries); load_elapsed.stop(); if load_result.is_err() { timing.fetch_fail_elapsed += load_elapsed.as_us(); } else { timing.fetch_elapsed += load_elapsed.as_us(); } load_result }?; let num_entries = entries.len(); let num_txs = entries.iter().map(|e| e.transactions.len()).sum::<usize>(); trace!( "Fetched entries for slot {}, num_entries: {}, num_shreds: {}, num_txs: {}, slot_full: {}", slot, num_entries, num_shreds, num_txs, slot_full, ); if !skip_verification { let tick_hash_count = &mut progress.tick_hash_count; verify_ticks(bank, &entries, slot_full, tick_hash_count).map_err(|err| { warn!( "{:#?}, slot: {}, entry len: {}, tick_height: {}, last entry: {}, last_blockhash: {}, shred_index: {}, slot_full: {}", err, slot, num_entries, bank.tick_height(), progress.last_entry, bank.last_blockhash(), num_shreds, slot_full, ); err })?; } let last_entry_hash = entries.last().map(|e| e.hash); let verifier = if !skip_verification { datapoint_debug!("verify-batch-size", ("size", num_entries as i64, i64)); let entry_state = entries.start_verify(&progress.last_entry, recyclers.clone()); if entry_state.status() == EntryVerificationStatus::Failure { warn!("Ledger proof of history failed at slot: {}", slot); return Err(BlockError::InvalidEntryHash.into()); } Some(entry_state) } else { None }; let verify_transaction = { let bank = bank.clone(); move |versioned_tx: VersionedTransaction, verification_mode: TransactionVerificationMode| -> Result<SanitizedTransaction> { bank.verify_transaction(versioned_tx, verification_mode) } }; let check_start = Instant::now(); let check_result = entry::start_verify_transactions( entries, skip_verification, recyclers.clone(), Arc::new(verify_transaction), ); let transaction_cpu_duration_us = timing::duration_as_us(&check_start.elapsed()); match check_result { Ok(mut check_result) => { let entries = check_result.entries(); assert!(entries.is_some()); let mut replay_elapsed = Measure::start("replay_elapsed"); let mut execute_timings = ExecuteTimings::default(); let cost_capacity_meter = Arc::new(RwLock::new(BlockCostCapacityMeter::default())); // Note: This will shuffle entries' transactions in-place. let process_result = process_entries_with_callback( bank, &mut entries.unwrap(), true, // shuffle transactions. entry_callback, transaction_status_sender, replay_vote_sender, transaction_cost_metrics_sender, &mut execute_timings, cost_capacity_meter, ) .map_err(BlockstoreProcessorError::from); replay_elapsed.stop(); timing.replay_elapsed += replay_elapsed.as_us(); timing.execute_timings.accumulate(&execute_timings); // If running signature verification on the GPU, wait for that // computation to finish, and get the result of it. If we did the // signature verification on the CPU, this just returns the // already-computed result produced in start_verify_transactions. // Either way, check the result of the signature verification. if !check_result.finish_verify() { warn!("Ledger proof of history failed at slot: {}", bank.slot()); return Err(TransactionError::SignatureFailure.into()); } if let Some(mut verifier) = verifier { let verified = verifier.finish_verify(); timing.poh_verify_elapsed += verifier.poh_duration_us(); // The GPU Entry verification (if any) is kicked off right when the CPU-side // Entry verification finishes, so these times should be disjoint timing.transaction_verify_elapsed += transaction_cpu_duration_us + check_result.gpu_verify_duration(); if !verified { warn!("Ledger proof of history failed at slot: {}", bank.slot()); return Err(BlockError::InvalidEntryHash.into()); } } process_result?; progress.num_shreds += num_shreds; progress.num_entries += num_entries; progress.num_txs += num_txs; if let Some(last_entry_hash) = last_entry_hash { progress.last_entry = last_entry_hash; } Ok(()) } Err(err) => { warn!("Ledger proof of history failed at slot: {}", bank.slot()); Err(err.into()) } } } // Special handling required for processing the entries in slot 0 fn process_bank_0( bank0: &Arc<Bank>, blockstore: &Blockstore, opts: &ProcessOptions, recyclers: &VerifyRecyclers, cache_block_meta_sender: Option<&CacheBlockMetaSender>, ) { assert_eq!(bank0.slot(), 0); let mut progress = ConfirmationProgress::new(bank0.last_blockhash()); confirm_full_slot( blockstore, bank0, opts, recyclers, &mut progress, None, None, &mut ExecuteTimings::default(), ) .expect("processing for bank 0 must succeed"); bank0.freeze(); blockstore.insert_bank_hash(bank0.slot(), bank0.hash(), false); cache_block_meta(bank0, cache_block_meta_sender); } // Given a bank, add its children to the pending slots queue if those children slots are // complete fn process_next_slots( bank: &Arc<Bank>, meta: &SlotMeta, blockstore: &Blockstore, leader_schedule_cache: &LeaderScheduleCache, pending_slots: &mut Vec<(SlotMeta, Bank, Hash)>, ) -> result::Result<(), BlockstoreProcessorError> { if meta.next_slots.is_empty() { return Ok(()); } // This is a fork point if there are multiple children, create a new child bank for each fork for next_slot in &meta.next_slots { let next_meta = blockstore .meta(*next_slot) .map_err(|err| { warn!("Failed to load meta for slot {}: {:?}", next_slot, err); BlockstoreProcessorError::FailedToLoadMeta })? .unwrap(); // Only process full slots in blockstore_processor, replay_stage // handles any partials if next_meta.is_full() { let next_bank = Bank::new_from_parent( bank, &leader_schedule_cache .slot_leader_at(*next_slot, Some(bank)) .unwrap(), *next_slot, ); trace!( "New bank for slot {}, parent slot is {}", next_slot, bank.slot(), ); pending_slots.push((next_meta, next_bank, bank.last_blockhash())); } } // Reverse sort by slot, so the next slot to be processed can be popped pending_slots.sort_by(|a, b| b.1.slot().cmp(&a.1.slot())); Ok(()) } // Iterate through blockstore processing slots starting from the root slot pointed to by the // given `meta` and return a vector of frozen bank forks #[allow(clippy::too_many_arguments)] fn load_frozen_forks( bank_forks: &mut BankForks, start_slot: Slot, start_slot_meta: &SlotMeta, blockstore: &Blockstore, leader_schedule_cache: &LeaderScheduleCache, opts: &ProcessOptions, recyclers: &VerifyRecyclers, transaction_status_sender: Option<&TransactionStatusSender>, cache_block_meta_sender: Option<&CacheBlockMetaSender>, snapshot_config: Option<&SnapshotConfig>, accounts_package_sender: AccountsPackageSender, timing: &mut ExecuteTimings, last_full_snapshot_slot: &mut Option<Slot>, ) -> result::Result<(), BlockstoreProcessorError> { let mut all_banks = HashMap::new(); let mut last_status_report = Instant::now(); let mut last_free = Instant::now(); let mut pending_slots = vec![]; let mut slots_elapsed = 0; let mut txs = 0; let blockstore_max_root = blockstore.max_root(); let mut root = bank_forks.root(); let max_root = std::cmp::max(root, blockstore_max_root); info!( "load_frozen_forks() latest root from blockstore: {}, max_root: {}", blockstore_max_root, max_root, ); process_next_slots( bank_forks.get(start_slot).unwrap(), start_slot_meta, blockstore, leader_schedule_cache, &mut pending_slots, )?; let dev_halt_at_slot = opts.dev_halt_at_slot.unwrap_or(std::u64::MAX); if bank_forks.root() != dev_halt_at_slot { while !pending_slots.is_empty() { timing.details.per_program_timings.clear(); let (meta, bank, last_entry_hash) = pending_slots.pop().unwrap(); let slot = bank.slot(); if last_status_report.elapsed() > Duration::from_secs(2) { let secs = last_status_report.elapsed().as_secs() as f32; last_status_report = Instant::now(); info!( "processing ledger: slot={}, last root slot={} slots={} slots/s={:?} txs/s={}", slot, root, slots_elapsed, slots_elapsed as f32 / secs, txs as f32 / secs, ); slots_elapsed = 0; txs = 0; } let mut progress = ConfirmationProgress::new(last_entry_hash); let bank = bank_forks.insert(bank); if process_single_slot( blockstore, &bank, opts, recyclers, &mut progress, transaction_status_sender, cache_block_meta_sender, None, timing, ) .is_err() { assert!(bank_forks.remove(bank.slot()).is_some()); continue; } txs += progress.num_txs; // Block must be frozen by this point, otherwise `process_single_slot` would // have errored above assert!(bank.is_frozen()); all_banks.insert(bank.slot(), bank.clone()); // If we've reached the last known root in blockstore, start looking // for newer cluster confirmed roots let new_root_bank = { if bank_forks.root() >= max_root { supermajority_root_from_vote_accounts( bank.slot(), bank.total_epoch_stake(), &bank.vote_accounts(), ).and_then(|supermajority_root| { if supermajority_root > root { // If there's a cluster confirmed root greater than our last // replayed root, then because the cluster confirmed root should // be descended from our last root, it must exist in `all_banks` let cluster_root_bank = all_banks.get(&supermajority_root).unwrap(); // cluster root must be a descendant of our root, otherwise something // is drastically wrong assert!(cluster_root_bank.ancestors.contains_key(&root)); info!( "blockstore processor found new cluster confirmed root: {}, observed in bank: {}", cluster_root_bank.slot(), bank.slot() ); // Ensure cluster-confirmed root and parents are set as root in blockstore let mut rooted_slots = vec![]; let mut new_root_bank = cluster_root_bank.clone(); loop { if new_root_bank.slot() == root { break; } // Found the last root in the chain, yay! assert!(new_root_bank.slot() > root); rooted_slots.push((new_root_bank.slot(), new_root_bank.hash())); // As noted, the cluster confirmed root should be descended from // our last root; therefore parent should be set new_root_bank = new_root_bank.parent().unwrap(); } inc_new_counter_info!("load_frozen_forks-cluster-confirmed-root", rooted_slots.len()); blockstore.set_roots(rooted_slots.iter().map(|(slot, _hash)| slot)).expect("Blockstore::set_roots should succeed"); blockstore.set_duplicate_confirmed_slots_and_hashes(rooted_slots.into_iter()).expect("Blockstore::set_duplicate_confirmed should succeed"); Some(cluster_root_bank) } else { None } }) } else if blockstore.is_root(slot) { Some(&bank) } else { None } }; if let Some(new_root_bank) = new_root_bank { root = new_root_bank.slot(); leader_schedule_cache.set_root(new_root_bank); let _ = bank_forks.set_root( root, &solana_runtime::accounts_background_service::AbsRequestSender::default(), None, ); if let Some(snapshot_config) = snapshot_config { let block_height = new_root_bank.block_height(); if snapshot_utils::should_take_full_snapshot( block_height, snapshot_config.full_snapshot_archive_interval_slots, ) { info!("Taking snapshot of new root bank that has crossed the full snapshot interval! slot: {}", root); *last_full_snapshot_slot = Some(root); new_root_bank.exhaustively_free_unused_resource(*last_full_snapshot_slot); last_free = Instant::now(); new_root_bank.update_accounts_hash_with_index_option( snapshot_config.accounts_hash_use_index, snapshot_config.accounts_hash_debug_verify, false, ); snapshot_utils::snapshot_bank( new_root_bank, new_root_bank.src.slot_deltas(&new_root_bank.src.roots()), &accounts_package_sender, &snapshot_config.bank_snapshots_dir, &snapshot_config.snapshot_archives_dir, snapshot_config.snapshot_version, snapshot_config.archive_format, None, Some(SnapshotType::FullSnapshot), ) .expect("Failed to snapshot bank while loading frozen banks"); trace!( "took bank snapshot for new root bank, block height: {}, slot: {}", block_height, root ); } } if last_free.elapsed() > Duration::from_secs(10) { // Must be called after `squash()`, so that AccountsDb knows what // the roots are for the cache flushing in exhaustively_free_unused_resource(). // This could take few secs; so update last_free later new_root_bank.exhaustively_free_unused_resource(*last_full_snapshot_slot); last_free = Instant::now(); } // Filter out all non descendants of the new root pending_slots .retain(|(_, pending_bank, _)| pending_bank.ancestors.contains_key(&root)); all_banks.retain(|_, bank| bank.ancestors.contains_key(&root)); } slots_elapsed += 1; trace!( "Bank for {}slot {} is complete", if root == slot { "root " } else { "" }, slot, ); process_next_slots( &bank, &meta, blockstore, leader_schedule_cache, &mut pending_slots, )?; if slot >= dev_halt_at_slot { bank.force_flush_accounts_cache(); let _ = bank.verify_bank_hash(false); break; } } } Ok(()) } // `roots` is sorted largest to smallest by root slot fn supermajority_root(roots: &[(Slot, u64)], total_epoch_stake: u64) -> Option<Slot> { if roots.is_empty() { return None; } // Find latest root let mut total = 0; let mut prev_root = roots[0].0; for (root, stake) in roots.iter() { assert!(*root <= prev_root); total += stake; if total as f64 / total_epoch_stake as f64 > VOTE_THRESHOLD_SIZE { return Some(*root); } prev_root = *root; } None } fn supermajority_root_from_vote_accounts( bank_slot: Slot, total_epoch_stake: u64, vote_accounts: &HashMap<Pubkey, (/*stake:*/ u64, VoteAccount)>, ) -> Option<Slot> { let mut roots_stakes: Vec<(Slot, u64)> = vote_accounts .iter() .filter_map(|(key, (stake, account))| { if *stake == 0 { return None; } match account.vote_state().as_ref() { Err(_) => { warn!( "Unable to get vote_state from account {} in bank: {}", key, bank_slot ); None } Ok(vote_state) => Some((vote_state.root_slot?, *stake)), } }) .collect(); // Sort from greatest to smallest slot roots_stakes.sort_unstable_by(|a, b| a.0.cmp(&b.0).reverse()); // Find latest root supermajority_root(&roots_stakes, total_epoch_stake) } // Processes and replays the contents of a single slot, returns Error // if failed to play the slot fn process_single_slot( blockstore: &Blockstore, bank: &Arc<Bank>, opts: &ProcessOptions, recyclers: &VerifyRecyclers, progress: &mut ConfirmationProgress, transaction_status_sender: Option<&TransactionStatusSender>, cache_block_meta_sender: Option<&CacheBlockMetaSender>, replay_vote_sender: Option<&ReplayVoteSender>, timing: &mut ExecuteTimings, ) -> result::Result<(), BlockstoreProcessorError> { // Mark corrupt slots as dead so validators don't replay this slot and // see AlreadyProcessed errors later in ReplayStage confirm_full_slot(blockstore, bank, opts, recyclers, progress, transaction_status_sender, replay_vote_sender, timing).map_err(|err| { let slot = bank.slot(); warn!("slot {} failed to verify: {}", slot, err); if blockstore.is_primary_access() { blockstore .set_dead_slot(slot) .expect("Failed to mark slot as dead in blockstore"); } else { assert!(blockstore.is_dead(slot), "Failed slot isn't dead and can't update due to being secondary blockstore access: {}", slot); } err })?; bank.freeze(); // all banks handled by this routine are created from complete slots blockstore.insert_bank_hash(bank.slot(), bank.hash(), false); cache_block_meta(bank, cache_block_meta_sender); Ok(()) } #[allow(clippy::large_enum_variant)] pub enum TransactionStatusMessage { Batch(TransactionStatusBatch), Freeze(Slot), } pub struct TransactionStatusBatch { pub bank: Arc<Bank>, pub transactions: Vec<SanitizedTransaction>, pub execution_results: Vec<TransactionExecutionResult>, pub balances: TransactionBalancesSet, pub token_balances: TransactionTokenBalancesSet, pub rent_debits: Vec<RentDebits>, } #[derive(Clone)] pub struct TransactionStatusSender { pub sender: Sender<TransactionStatusMessage>, } impl TransactionStatusSender { pub fn send_transaction_status_batch( &self, bank: Arc<Bank>, transactions: Vec<SanitizedTransaction>, execution_results: Vec<TransactionExecutionResult>, balances: TransactionBalancesSet, token_balances: TransactionTokenBalancesSet, rent_debits: Vec<RentDebits>, ) { let slot = bank.slot(); if let Err(e) = self .sender .send(TransactionStatusMessage::Batch(TransactionStatusBatch { bank, transactions, execution_results, balances, token_balances, rent_debits, })) { trace!( "Slot {} transaction_status send batch failed: {:?}", slot, e ); } } pub fn send_transaction_status_freeze_message(&self, bank: &Arc<Bank>) { let slot = bank.slot(); if let Err(e) = self.sender.send(TransactionStatusMessage::Freeze(slot)) { trace!( "Slot {} transaction_status send freeze message failed: {:?}", slot, e ); } } } pub type CacheBlockMetaSender = Sender<Arc<Bank>>; pub fn cache_block_meta(bank: &Arc<Bank>, cache_block_meta_sender: Option<&CacheBlockMetaSender>) { if let Some(cache_block_meta_sender) = cache_block_meta_sender { cache_block_meta_sender .send(bank.clone()) .unwrap_or_else(|err| warn!("cache_block_meta_sender failed: {:?}", err)); } } // used for tests only pub fn fill_blockstore_slot_with_ticks( blockstore: &Blockstore, ticks_per_slot: u64, slot: u64, parent_slot: u64, last_entry_hash: Hash, ) -> Hash { // Only slot 0 can be equal to the parent_slot assert!(slot.saturating_sub(1) >= parent_slot); let num_slots = (slot - parent_slot).max(1); let entries = create_ticks(num_slots * ticks_per_slot, 0, last_entry_hash); let last_entry_hash = entries.last().unwrap().hash; blockstore .write_entries( slot, 0, 0, ticks_per_slot, Some(parent_slot), true, &Arc::new(Keypair::new()), entries, 0, ) .unwrap(); last_entry_hash } /// Check the transaction execution results to see if any instruction errored by exceeding the max /// accounts data size limit for all slots. If yes, the whole block needs to be failed. fn check_accounts_data_size<'a>( execution_results: impl IntoIterator<Item = &'a TransactionExecutionResult>, ) -> Result<()> { if let Some(result) = execution_results .into_iter() .map(|execution_result| execution_result.flattened_result()) .find(|result| { matches!( result, Err(TransactionError::InstructionError( _, InstructionError::MaxAccountsDataSizeExceeded )), ) }) { return result; } Ok(()) } #[cfg(test)] pub mod tests { use { super::*, crate::genesis_utils::{ create_genesis_config, create_genesis_config_with_leader, GenesisConfigInfo, }, crossbeam_channel::unbounded, matches::assert_matches, rand::{thread_rng, Rng}, solana_entry::entry::{create_ticks, next_entry, next_entry_mut}, solana_runtime::genesis_utils::{ self, create_genesis_config_with_vote_accounts, ValidatorVoteKeypairs, }, solana_sdk::{ account::{AccountSharedData, WritableAccount}, epoch_schedule::EpochSchedule, hash::Hash, pubkey::Pubkey, signature::{Keypair, Signer}, system_instruction::SystemError, system_transaction, transaction::{Transaction, TransactionError}, }, solana_vote_program::{ self, vote_state::{VoteState, VoteStateVersions, MAX_LOCKOUT_HISTORY}, vote_transaction, }, std::{collections::BTreeSet, sync::RwLock}, tempfile::TempDir, trees::tr, }; fn test_process_blockstore( genesis_config: &GenesisConfig, blockstore: &Blockstore, opts: ProcessOptions, ) -> BlockstoreProcessorInner { let (accounts_package_sender, _) = unbounded(); process_blockstore( genesis_config, blockstore, Vec::new(), opts, None, None, accounts_package_sender, None, ) .unwrap() } #[test] fn test_process_blockstore_with_missing_hashes() { solana_logger::setup(); let hashes_per_tick = 2; let GenesisConfigInfo { mut genesis_config, .. } = create_genesis_config(10_000); genesis_config.poh_config.hashes_per_tick = Some(hashes_per_tick); let ticks_per_slot = genesis_config.ticks_per_slot; let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); let parent_slot = 0; let slot = 1; let entries = create_ticks(ticks_per_slot, hashes_per_tick - 1, blockhash); assert_matches!( blockstore.write_entries( slot, 0, 0, ticks_per_slot, Some(parent_slot), true, &Arc::new(Keypair::new()), entries, 0, ), Ok(_) ); let (bank_forks, ..) = test_process_blockstore( &genesis_config, &blockstore, ProcessOptions { poh_verify: true, ..ProcessOptions::default() }, ); assert_eq!(frozen_bank_slots(&bank_forks), vec![0]); } #[test] fn test_process_blockstore_with_invalid_slot_tick_count() { solana_logger::setup(); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); let ticks_per_slot = genesis_config.ticks_per_slot; // Create a new ledger with slot 0 full of ticks let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); // Write slot 1 with one tick missing let parent_slot = 0; let slot = 1; let entries = create_ticks(ticks_per_slot - 1, 0, blockhash); assert_matches!( blockstore.write_entries( slot, 0, 0, ticks_per_slot, Some(parent_slot), true, &Arc::new(Keypair::new()), entries, 0, ), Ok(_) ); // Should return slot 0, the last slot on the fork that is valid let (bank_forks, ..) = test_process_blockstore( &genesis_config, &blockstore, ProcessOptions { poh_verify: true, ..ProcessOptions::default() }, ); assert_eq!(frozen_bank_slots(&bank_forks), vec![0]); // Write slot 2 fully let _last_slot2_entry_hash = fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 2, 0, blockhash); let (bank_forks, ..) = test_process_blockstore( &genesis_config, &blockstore, ProcessOptions { poh_verify: true, ..ProcessOptions::default() }, ); // One valid fork, one bad fork. process_blockstore() should only return the valid fork assert_eq!(frozen_bank_slots(&bank_forks), vec![0, 2]); assert_eq!(bank_forks.working_bank().slot(), 2); assert_eq!(bank_forks.root(), 0); } #[test] fn test_process_blockstore_with_slot_with_trailing_entry() { solana_logger::setup(); let GenesisConfigInfo { mint_keypair, genesis_config, .. } = create_genesis_config(10_000); let ticks_per_slot = genesis_config.ticks_per_slot; let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); let mut entries = create_ticks(ticks_per_slot, 0, blockhash); let trailing_entry = { let keypair = Keypair::new(); let tx = system_transaction::transfer(&mint_keypair, &keypair.pubkey(), 1, blockhash); next_entry(&blockhash, 1, vec![tx]) }; entries.push(trailing_entry); // Tricks blockstore into writing the trailing entry by lying that there is one more tick // per slot. let parent_slot = 0; let slot = 1; assert_matches!( blockstore.write_entries( slot, 0, 0, ticks_per_slot + 1, Some(parent_slot), true, &Arc::new(Keypair::new()), entries, 0, ), Ok(_) ); let opts = ProcessOptions { poh_verify: true, accounts_db_test_hash_calculation: true, ..ProcessOptions::default() }; let (bank_forks, ..) = test_process_blockstore(&genesis_config, &blockstore, opts); assert_eq!(frozen_bank_slots(&bank_forks), vec![0]); } #[test] fn test_process_blockstore_with_incomplete_slot() { solana_logger::setup(); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); let ticks_per_slot = genesis_config.ticks_per_slot; /* Build a blockstore in the ledger with the following fork structure: slot 0 (all ticks) | slot 1 (all ticks but one) | slot 2 (all ticks) where slot 1 is incomplete (missing 1 tick at the end) */ // Create a new ledger with slot 0 full of ticks let (ledger_path, mut blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config); debug!("ledger_path: {:?}", ledger_path); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); // Write slot 1 // slot 1, points at slot 0. Missing one tick { let parent_slot = 0; let slot = 1; let mut entries = create_ticks(ticks_per_slot, 0, blockhash); blockhash = entries.last().unwrap().hash; // throw away last one entries.pop(); assert_matches!( blockstore.write_entries( slot, 0, 0, ticks_per_slot, Some(parent_slot), false, &Arc::new(Keypair::new()), entries, 0, ), Ok(_) ); } // slot 2, points at slot 1 fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 2, 1, blockhash); let opts = ProcessOptions { poh_verify: true, accounts_db_test_hash_calculation: true, ..ProcessOptions::default() }; let (bank_forks, ..) = test_process_blockstore(&genesis_config, &blockstore, opts); assert_eq!(frozen_bank_slots(&bank_forks), vec![0]); // slot 1 isn't "full", we stop at slot zero /* Add a complete slot such that the store looks like: slot 0 (all ticks) / \ slot 1 (all ticks but one) slot 3 (all ticks) | slot 2 (all ticks) */ let opts = ProcessOptions { poh_verify: true, accounts_db_test_hash_calculation: true, ..ProcessOptions::default() }; fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 3, 0, blockhash); // Slot 0 should not show up in the ending bank_forks_info let (bank_forks, ..) = test_process_blockstore(&genesis_config, &blockstore, opts); // slot 1 isn't "full", we stop at slot zero assert_eq!(frozen_bank_slots(&bank_forks), vec![0, 3]); } #[test] fn test_process_blockstore_with_two_forks_and_squash() { solana_logger::setup(); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); let ticks_per_slot = genesis_config.ticks_per_slot; // Create a new ledger with slot 0 full of ticks let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config); debug!("ledger_path: {:?}", ledger_path); let mut last_entry_hash = blockhash; /* Build a blockstore in the ledger with the following fork structure: slot 0 | slot 1 / \ slot 2 | / | slot 3 | | slot 4 <-- set_root(true) */ let blockstore = Blockstore::open(ledger_path.path()).unwrap(); // Fork 1, ending at slot 3 let last_slot1_entry_hash = fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 1, 0, last_entry_hash); last_entry_hash = fill_blockstore_slot_with_ticks( &blockstore, ticks_per_slot, 2, 1, last_slot1_entry_hash, ); let last_fork1_entry_hash = fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 3, 2, last_entry_hash); // Fork 2, ending at slot 4 let last_fork2_entry_hash = fill_blockstore_slot_with_ticks( &blockstore, ticks_per_slot, 4, 1, last_slot1_entry_hash, ); info!("last_fork1_entry.hash: {:?}", last_fork1_entry_hash); info!("last_fork2_entry.hash: {:?}", last_fork2_entry_hash); blockstore.set_roots(vec![0, 1, 4].iter()).unwrap(); let opts = ProcessOptions { poh_verify: true, accounts_db_test_hash_calculation: true, ..ProcessOptions::default() }; let (bank_forks, ..) = test_process_blockstore(&genesis_config, &blockstore, opts); // One fork, other one is ignored b/c not a descendant of the root assert_eq!(frozen_bank_slots(&bank_forks), vec![4]); assert!(&bank_forks[4] .parents() .iter() .map(|bank| bank.slot()) .next() .is_none()); // Ensure bank_forks holds the right banks verify_fork_infos(&bank_forks); assert_eq!(bank_forks.root(), 4); } #[test] fn test_process_blockstore_with_two_forks() { solana_logger::setup(); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); let ticks_per_slot = genesis_config.ticks_per_slot; // Create a new ledger with slot 0 full of ticks let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config); debug!("ledger_path: {:?}", ledger_path); let mut last_entry_hash = blockhash; /* Build a blockstore in the ledger with the following fork structure: slot 0 | slot 1 <-- set_root(true) / \ slot 2 | / | slot 3 | | slot 4 */ let blockstore = Blockstore::open(ledger_path.path()).unwrap(); // Fork 1, ending at slot 3 let last_slot1_entry_hash = fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 1, 0, last_entry_hash); last_entry_hash = fill_blockstore_slot_with_ticks( &blockstore, ticks_per_slot, 2, 1, last_slot1_entry_hash, ); let last_fork1_entry_hash = fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 3, 2, last_entry_hash); // Fork 2, ending at slot 4 let last_fork2_entry_hash = fill_blockstore_slot_with_ticks( &blockstore, ticks_per_slot, 4, 1, last_slot1_entry_hash, ); info!("last_fork1_entry.hash: {:?}", last_fork1_entry_hash); info!("last_fork2_entry.hash: {:?}", last_fork2_entry_hash); blockstore.set_roots(vec![0, 1].iter()).unwrap(); let opts = ProcessOptions { poh_verify: true, accounts_db_test_hash_calculation: true, ..ProcessOptions::default() }; let (bank_forks, ..) = test_process_blockstore(&genesis_config, &blockstore, opts); assert_eq!(frozen_bank_slots(&bank_forks), vec![1, 2, 3, 4]); assert_eq!(bank_forks.working_bank().slot(), 4); assert_eq!(bank_forks.root(), 1); assert_eq!( &bank_forks[3] .parents() .iter() .map(|bank| bank.slot()) .collect::<Vec<_>>(), &[2, 1] ); assert_eq!( &bank_forks[4] .parents() .iter() .map(|bank| bank.slot()) .collect::<Vec<_>>(), &[1] ); assert_eq!(bank_forks.root(), 1); // Ensure bank_forks holds the right banks verify_fork_infos(&bank_forks); } #[test] fn test_process_blockstore_with_dead_slot() { solana_logger::setup(); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); let ticks_per_slot = genesis_config.ticks_per_slot; let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config); debug!("ledger_path: {:?}", ledger_path); /* slot 0 | slot 1 / \ / \ slot 2 (dead) \ \ slot 3 */ let blockstore = Blockstore::open(ledger_path.path()).unwrap(); let slot1_blockhash = fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 1, 0, blockhash); fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 2, 1, slot1_blockhash); blockstore.set_dead_slot(2).unwrap(); fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 3, 1, slot1_blockhash); let (bank_forks, ..) = test_process_blockstore(&genesis_config, &blockstore, ProcessOptions::default()); assert_eq!(frozen_bank_slots(&bank_forks), vec![0, 1, 3]); assert_eq!(bank_forks.working_bank().slot(), 3); assert_eq!( &bank_forks[3] .parents() .iter() .map(|bank| bank.slot()) .collect::<Vec<_>>(), &[1, 0] ); verify_fork_infos(&bank_forks); } #[test] fn test_process_blockstore_with_dead_child() { solana_logger::setup(); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); let ticks_per_slot = genesis_config.ticks_per_slot; let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config); debug!("ledger_path: {:?}", ledger_path); /* slot 0 | slot 1 / \ / \ slot 2 \ / \ slot 4 (dead) slot 3 */ let blockstore = Blockstore::open(ledger_path.path()).unwrap(); let slot1_blockhash = fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 1, 0, blockhash); let slot2_blockhash = fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 2, 1, slot1_blockhash); fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 4, 2, slot2_blockhash); blockstore.set_dead_slot(4).unwrap(); fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 3, 1, slot1_blockhash); let (bank_forks, ..) = test_process_blockstore(&genesis_config, &blockstore, ProcessOptions::default()); // Should see the parent of the dead child assert_eq!(frozen_bank_slots(&bank_forks), vec![0, 1, 2, 3]); assert_eq!(bank_forks.working_bank().slot(), 3); assert_eq!( &bank_forks[3] .parents() .iter() .map(|bank| bank.slot()) .collect::<Vec<_>>(), &[1, 0] ); assert_eq!( &bank_forks[2] .parents() .iter() .map(|bank| bank.slot()) .collect::<Vec<_>>(), &[1, 0] ); assert_eq!(bank_forks.working_bank().slot(), 3); verify_fork_infos(&bank_forks); } #[test] fn test_root_with_all_dead_children() { solana_logger::setup(); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); let ticks_per_slot = genesis_config.ticks_per_slot; let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config); debug!("ledger_path: {:?}", ledger_path); /* slot 0 / \ / \ slot 1 (dead) slot 2 (dead) */ let blockstore = Blockstore::open(ledger_path.path()).unwrap(); fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 1, 0, blockhash); fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 2, 0, blockhash); blockstore.set_dead_slot(1).unwrap(); blockstore.set_dead_slot(2).unwrap(); let (bank_forks, ..) = test_process_blockstore(&genesis_config, &blockstore, ProcessOptions::default()); // Should see only the parent of the dead children assert_eq!(frozen_bank_slots(&bank_forks), vec![0]); verify_fork_infos(&bank_forks); } #[test] fn test_process_blockstore_epoch_boundary_root() { solana_logger::setup(); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); let ticks_per_slot = genesis_config.ticks_per_slot; // Create a new ledger with slot 0 full of ticks let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config); let mut last_entry_hash = blockhash; let blockstore = Blockstore::open(ledger_path.path()).unwrap(); // Let `last_slot` be the number of slots in the first two epochs let epoch_schedule = get_epoch_schedule(&genesis_config, Vec::new()); let last_slot = epoch_schedule.get_last_slot_in_epoch(1); // Create a single chain of slots with all indexes in the range [0, v + 1] for i in 1..=last_slot + 1 { last_entry_hash = fill_blockstore_slot_with_ticks( &blockstore, ticks_per_slot, i, i - 1, last_entry_hash, ); } // Set a root on the last slot of the last confirmed epoch let rooted_slots: Vec<Slot> = (0..=last_slot).collect(); blockstore.set_roots(rooted_slots.iter()).unwrap(); // Set a root on the next slot of the confirmed epoch blockstore .set_roots(std::iter::once(&(last_slot + 1))) .unwrap(); // Check that we can properly restart the ledger / leader scheduler doesn't fail let opts = ProcessOptions { poh_verify: true, accounts_db_test_hash_calculation: true, ..ProcessOptions::default() }; let (bank_forks, ..) = test_process_blockstore(&genesis_config, &blockstore, opts); // There is one fork, head is last_slot + 1 assert_eq!(frozen_bank_slots(&bank_forks), vec![last_slot + 1]); // The latest root should have purged all its parents assert!(&bank_forks[last_slot + 1] .parents() .iter() .map(|bank| bank.slot()) .next() .is_none()); } #[test] fn test_first_err() { assert_eq!(first_err(&[Ok(())]), Ok(())); assert_eq!( first_err(&[Ok(()), Err(TransactionError::AlreadyProcessed)]), Err(TransactionError::AlreadyProcessed) ); assert_eq!( first_err(&[ Ok(()), Err(TransactionError::AlreadyProcessed), Err(TransactionError::AccountInUse) ]), Err(TransactionError::AlreadyProcessed) ); assert_eq!( first_err(&[ Ok(()), Err(TransactionError::AccountInUse), Err(TransactionError::AlreadyProcessed) ]), Err(TransactionError::AccountInUse) ); assert_eq!( first_err(&[ Err(TransactionError::AccountInUse), Ok(()), Err(TransactionError::AlreadyProcessed) ]), Err(TransactionError::AccountInUse) ); } #[test] fn test_process_empty_entry_is_registered() { solana_logger::setup(); let GenesisConfigInfo { genesis_config, mint_keypair, .. } = create_genesis_config(2); let bank = Arc::new(Bank::new_for_tests(&genesis_config)); let keypair = Keypair::new(); let slot_entries = create_ticks(genesis_config.ticks_per_slot, 1, genesis_config.hash()); let tx = system_transaction::transfer( &mint_keypair, &keypair.pubkey(), 1, slot_entries.last().unwrap().hash, ); // First, ensure the TX is rejected because of the unregistered last ID assert_eq!( bank.process_transaction(&tx), Err(TransactionError::BlockhashNotFound) ); // Now ensure the TX is accepted despite pointing to the ID of an empty entry. process_entries_for_tests(&bank, slot_entries, true, None, None).unwrap(); assert_eq!(bank.process_transaction(&tx), Ok(())); } #[test] fn test_process_ledger_simple() { solana_logger::setup(); let leader_pubkey = solana_sdk::pubkey::new_rand(); let mint = 100; let hashes_per_tick = 10; let GenesisConfigInfo { mut genesis_config, mint_keypair, .. } = create_genesis_config_with_leader(mint, &leader_pubkey, 50); genesis_config.poh_config.hashes_per_tick = Some(hashes_per_tick); let (ledger_path, mut last_entry_hash) = create_new_tmp_ledger_auto_delete!(&genesis_config); debug!("ledger_path: {:?}", ledger_path); let deducted_from_mint = 3; let mut entries = vec![]; let blockhash = genesis_config.hash(); for _ in 0..deducted_from_mint { // Transfer one token from the mint to a random account let keypair = Keypair::new(); let tx = system_transaction::transfer(&mint_keypair, &keypair.pubkey(), 1, blockhash); let entry = next_entry_mut(&mut last_entry_hash, 1, vec![tx]); entries.push(entry); // Add a second Transaction that will produce a // InstructionError<0, ResultWithNegativeLamports> error when processed let keypair2 = Keypair::new(); let tx = system_transaction::transfer(&mint_keypair, &keypair2.pubkey(), 101, blockhash); let entry = next_entry_mut(&mut last_entry_hash, 1, vec![tx]); entries.push(entry); } let remaining_hashes = hashes_per_tick - entries.len() as u64; let tick_entry = next_entry_mut(&mut last_entry_hash, remaining_hashes, vec![]); entries.push(tick_entry); // Fill up the rest of slot 1 with ticks entries.extend(create_ticks( genesis_config.ticks_per_slot - 1, genesis_config.poh_config.hashes_per_tick.unwrap(), last_entry_hash, )); let last_blockhash = entries.last().unwrap().hash; let blockstore = Blockstore::open(ledger_path.path()).unwrap(); blockstore .write_entries( 1, 0, 0, genesis_config.ticks_per_slot, None, true, &Arc::new(Keypair::new()), entries, 0, ) .unwrap(); let opts = ProcessOptions { poh_verify: true, accounts_db_test_hash_calculation: true, ..ProcessOptions::default() }; let (bank_forks, ..) = test_process_blockstore(&genesis_config, &blockstore, opts); assert_eq!(frozen_bank_slots(&bank_forks), vec![0, 1]); assert_eq!(bank_forks.root(), 0); assert_eq!(bank_forks.working_bank().slot(), 1); let bank = bank_forks[1].clone(); assert_eq!( bank.get_balance(&mint_keypair.pubkey()), mint - deducted_from_mint ); assert_eq!(bank.tick_height(), 2 * genesis_config.ticks_per_slot); assert_eq!(bank.last_blockhash(), last_blockhash); } #[test] fn test_process_ledger_with_one_tick_per_slot() { let GenesisConfigInfo { mut genesis_config, .. } = create_genesis_config(123); genesis_config.ticks_per_slot = 1; let (ledger_path, _blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); let opts = ProcessOptions { poh_verify: true, accounts_db_test_hash_calculation: true, ..ProcessOptions::default() }; let (bank_forks, ..) = test_process_blockstore(&genesis_config, &blockstore, opts); assert_eq!(frozen_bank_slots(&bank_forks), vec![0]); let bank = bank_forks[0].clone(); assert_eq!(bank.tick_height(), 1); } #[test] fn test_process_ledger_options_override_threads() { let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(123); let (ledger_path, _blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); let opts = ProcessOptions { override_num_threads: Some(1), accounts_db_test_hash_calculation: true, ..ProcessOptions::default() }; test_process_blockstore(&genesis_config, &blockstore, opts); PAR_THREAD_POOL.with(|pool| { assert_eq!(pool.borrow().current_num_threads(), 1); }); } #[test] fn test_process_ledger_options_full_leader_cache() { let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(123); let (ledger_path, _blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); let opts = ProcessOptions { full_leader_cache: true, accounts_db_test_hash_calculation: true, ..ProcessOptions::default() }; let (_bank_forks, leader_schedule, _) = test_process_blockstore(&genesis_config, &blockstore, opts); assert_eq!(leader_schedule.max_schedules(), std::usize::MAX); } #[test] fn test_process_ledger_options_entry_callback() { let GenesisConfigInfo { genesis_config, mint_keypair, .. } = create_genesis_config(100); let (ledger_path, last_entry_hash) = create_new_tmp_ledger_auto_delete!(&genesis_config); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); let blockhash = genesis_config.hash(); let keypairs = [Keypair::new(), Keypair::new(), Keypair::new()]; let tx = system_transaction::transfer(&mint_keypair, &keypairs[0].pubkey(), 1, blockhash); let entry_1 = next_entry(&last_entry_hash, 1, vec![tx]); let tx = system_transaction::transfer(&mint_keypair, &keypairs[1].pubkey(), 1, blockhash); let entry_2 = next_entry(&entry_1.hash, 1, vec![tx]); let mut entries = vec![entry_1, entry_2]; entries.extend(create_ticks( genesis_config.ticks_per_slot, 0, last_entry_hash, )); blockstore .write_entries( 1, 0, 0, genesis_config.ticks_per_slot, None, true, &Arc::new(Keypair::new()), entries, 0, ) .unwrap(); let callback_counter: Arc<RwLock<usize>> = Arc::default(); let entry_callback = { let counter = callback_counter.clone(); let pubkeys: Vec<Pubkey> = keypairs.iter().map(|k| k.pubkey()).collect(); Arc::new(move |bank: &Bank| { let mut counter = counter.write().unwrap(); assert_eq!(bank.get_balance(&pubkeys[*counter]), 1); assert_eq!(bank.get_balance(&pubkeys[*counter + 1]), 0); *counter += 1; }) }; let opts = ProcessOptions { override_num_threads: Some(1), entry_callback: Some(entry_callback), accounts_db_test_hash_calculation: true, ..ProcessOptions::default() }; test_process_blockstore(&genesis_config, &blockstore, opts); assert_eq!(*callback_counter.write().unwrap(), 2); } #[test] fn test_process_entries_tick() { let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(1000); let bank = Arc::new(Bank::new_for_tests(&genesis_config)); // ensure bank can process a tick assert_eq!(bank.tick_height(), 0); let tick = next_entry(&genesis_config.hash(), 1, vec![]); assert_eq!( process_entries_for_tests(&bank, vec![tick], true, None, None), Ok(()) ); assert_eq!(bank.tick_height(), 1); } #[test] fn test_process_entries_2_entries_collision() { let GenesisConfigInfo { genesis_config, mint_keypair, .. } = create_genesis_config(1000); let bank = Arc::new(Bank::new_for_tests(&genesis_config)); let keypair1 = Keypair::new(); let keypair2 = Keypair::new(); let blockhash = bank.last_blockhash(); // ensure bank can process 2 entries that have a common account and no tick is registered let tx = system_transaction::transfer( &mint_keypair, &keypair1.pubkey(), 2, bank.last_blockhash(), ); let entry_1 = next_entry(&blockhash, 1, vec![tx]); let tx = system_transaction::transfer( &mint_keypair, &keypair2.pubkey(), 2, bank.last_blockhash(), ); let entry_2 = next_entry(&entry_1.hash, 1, vec![tx]); assert_eq!( process_entries_for_tests(&bank, vec![entry_1, entry_2], true, None, None), Ok(()) ); assert_eq!(bank.get_balance(&keypair1.pubkey()), 2); assert_eq!(bank.get_balance(&keypair2.pubkey()), 2); assert_eq!(bank.last_blockhash(), blockhash); } #[test] fn test_process_entries_2_txes_collision() { let GenesisConfigInfo { genesis_config, mint_keypair, .. } = create_genesis_config(1000); let bank = Arc::new(Bank::new_for_tests(&genesis_config)); let keypair1 = Keypair::new(); let keypair2 = Keypair::new(); let keypair3 = Keypair::new(); // fund: put 4 in each of 1 and 2 assert_matches!(bank.transfer(4, &mint_keypair, &keypair1.pubkey()), Ok(_)); assert_matches!(bank.transfer(4, &mint_keypair, &keypair2.pubkey()), Ok(_)); // construct an Entry whose 2nd transaction would cause a lock conflict with previous entry let entry_1_to_mint = next_entry( &bank.last_blockhash(), 1, vec![system_transaction::transfer( &keypair1, &mint_keypair.pubkey(), 1, bank.last_blockhash(), )], ); let entry_2_to_3_mint_to_1 = next_entry( &entry_1_to_mint.hash, 1, vec![ system_transaction::transfer( &keypair2, &keypair3.pubkey(), 2, bank.last_blockhash(), ), // should be fine system_transaction::transfer( &keypair1, &mint_keypair.pubkey(), 2, bank.last_blockhash(), ), // will collide ], ); assert_eq!( process_entries_for_tests( &bank, vec![entry_1_to_mint, entry_2_to_3_mint_to_1], false, None, None, ), Ok(()) ); assert_eq!(bank.get_balance(&keypair1.pubkey()), 1); assert_eq!(bank.get_balance(&keypair2.pubkey()), 2); assert_eq!(bank.get_balance(&keypair3.pubkey()), 2); } #[test] fn test_process_entries_2_txes_collision_and_error() { let GenesisConfigInfo { genesis_config, mint_keypair, .. } = create_genesis_config(1000); let bank = Arc::new(Bank::new_for_tests(&genesis_config)); let keypair1 = Keypair::new(); let keypair2 = Keypair::new(); let keypair3 = Keypair::new(); let keypair4 = Keypair::new(); // fund: put 4 in each of 1 and 2 assert_matches!(bank.transfer(4, &mint_keypair, &keypair1.pubkey()), Ok(_)); assert_matches!(bank.transfer(4, &mint_keypair, &keypair2.pubkey()), Ok(_)); assert_matches!(bank.transfer(4, &mint_keypair, &keypair4.pubkey()), Ok(_)); // construct an Entry whose 2nd transaction would cause a lock conflict with previous entry let entry_1_to_mint = next_entry( &bank.last_blockhash(), 1, vec![ system_transaction::transfer( &keypair1, &mint_keypair.pubkey(), 1, bank.last_blockhash(), ), system_transaction::transfer( &keypair4, &keypair4.pubkey(), 1, Hash::default(), // Should cause a transaction failure with BlockhashNotFound ), ], ); let entry_2_to_3_mint_to_1 = next_entry( &entry_1_to_mint.hash, 1, vec![ system_transaction::transfer( &keypair2, &keypair3.pubkey(), 2, bank.last_blockhash(), ), // should be fine system_transaction::transfer( &keypair1, &mint_keypair.pubkey(), 2, bank.last_blockhash(), ), // will collide ], ); assert!(process_entries_for_tests( &bank, vec![entry_1_to_mint.clone(), entry_2_to_3_mint_to_1.clone()], false, None, None, ) .is_err()); // First transaction in first entry succeeded, so keypair1 lost 1 lamport assert_eq!(bank.get_balance(&keypair1.pubkey()), 3); assert_eq!(bank.get_balance(&keypair2.pubkey()), 4); // Check all accounts are unlocked let txs1 = entry_1_to_mint.transactions; let txs2 = entry_2_to_3_mint_to_1.transactions; let batch1 = bank.prepare_entry_batch(txs1).unwrap(); for result in batch1.lock_results() { assert!(result.is_ok()); } // txs1 and txs2 have accounts that conflict, so we must drop txs1 first drop(batch1); let batch2 = bank.prepare_entry_batch(txs2).unwrap(); for result in batch2.lock_results() { assert!(result.is_ok()); } } #[test] fn test_process_entries_2nd_entry_collision_with_self_and_error() { solana_logger::setup(); let GenesisConfigInfo { genesis_config, mint_keypair, .. } = create_genesis_config(1000); let bank = Arc::new(Bank::new_for_tests(&genesis_config)); let keypair1 = Keypair::new(); let keypair2 = Keypair::new(); let keypair3 = Keypair::new(); // fund: put some money in each of 1 and 2 assert_matches!(bank.transfer(5, &mint_keypair, &keypair1.pubkey()), Ok(_)); assert_matches!(bank.transfer(4, &mint_keypair, &keypair2.pubkey()), Ok(_)); // 3 entries: first has a transfer, 2nd has a conflict with 1st, 3rd has a conflict with itself let entry_1_to_mint = next_entry( &bank.last_blockhash(), 1, vec![system_transaction::transfer( &keypair1, &mint_keypair.pubkey(), 1, bank.last_blockhash(), )], ); // should now be: // keypair1=4 // keypair2=4 // keypair3=0 let entry_2_to_3_and_1_to_mint = next_entry( &entry_1_to_mint.hash, 1, vec![ system_transaction::transfer( &keypair2, &keypair3.pubkey(), 2, bank.last_blockhash(), ), // should be fine system_transaction::transfer( &keypair1, &mint_keypair.pubkey(), 2, bank.last_blockhash(), ), // will collide with predecessor ], ); // should now be: // keypair1=2 // keypair2=2 // keypair3=2 let entry_conflict_itself = next_entry( &entry_2_to_3_and_1_to_mint.hash, 1, vec![ system_transaction::transfer( &keypair1, &keypair3.pubkey(), 1, bank.last_blockhash(), ), system_transaction::transfer( &keypair1, &keypair2.pubkey(), 1, bank.last_blockhash(), ), // should be fine ], ); // would now be: // keypair1=0 // keypair2=3 // keypair3=3 assert!(process_entries_for_tests( &bank, vec![ entry_1_to_mint, entry_2_to_3_and_1_to_mint, entry_conflict_itself, ], false, None, None, ) .is_err()); // last entry should have been aborted before par_execute_entries assert_eq!(bank.get_balance(&keypair1.pubkey()), 2); assert_eq!(bank.get_balance(&keypair2.pubkey()), 2); assert_eq!(bank.get_balance(&keypair3.pubkey()), 2); } #[test] fn test_process_entries_2_entries_par() { let GenesisConfigInfo { genesis_config, mint_keypair, .. } = create_genesis_config(1000); let bank = Arc::new(Bank::new_for_tests(&genesis_config)); let keypair1 = Keypair::new(); let keypair2 = Keypair::new(); let keypair3 = Keypair::new(); let keypair4 = Keypair::new(); //load accounts let tx = system_transaction::transfer( &mint_keypair, &keypair1.pubkey(), 1, bank.last_blockhash(), ); assert_eq!(bank.process_transaction(&tx), Ok(())); let tx = system_transaction::transfer( &mint_keypair, &keypair2.pubkey(), 1, bank.last_blockhash(), ); assert_eq!(bank.process_transaction(&tx), Ok(())); // ensure bank can process 2 entries that do not have a common account and no tick is registered let blockhash = bank.last_blockhash(); let tx = system_transaction::transfer(&keypair1, &keypair3.pubkey(), 1, bank.last_blockhash()); let entry_1 = next_entry(&blockhash, 1, vec![tx]); let tx = system_transaction::transfer(&keypair2, &keypair4.pubkey(), 1, bank.last_blockhash()); let entry_2 = next_entry(&entry_1.hash, 1, vec![tx]); assert_eq!( process_entries_for_tests(&bank, vec![entry_1, entry_2], true, None, None), Ok(()) ); assert_eq!(bank.get_balance(&keypair3.pubkey()), 1); assert_eq!(bank.get_balance(&keypair4.pubkey()), 1); assert_eq!(bank.last_blockhash(), blockhash); } #[test] fn test_process_entry_tx_random_execution_with_error() { let GenesisConfigInfo { genesis_config, mint_keypair, .. } = create_genesis_config(1_000_000_000); let bank = Arc::new(Bank::new_for_tests(&genesis_config)); const NUM_TRANSFERS_PER_ENTRY: usize = 8; const NUM_TRANSFERS: usize = NUM_TRANSFERS_PER_ENTRY * 32; // large enough to scramble locks and results let keypairs: Vec<_> = (0..NUM_TRANSFERS * 2).map(|_| Keypair::new()).collect(); // give everybody one lamport for keypair in &keypairs { bank.transfer(1, &mint_keypair, &keypair.pubkey()) .expect("funding failed"); } let mut hash = bank.last_blockhash(); let present_account_key = Keypair::new(); let present_account = AccountSharedData::new(1, 10, &Pubkey::default()); bank.store_account(&present_account_key.pubkey(), &present_account); let entries: Vec<_> = (0..NUM_TRANSFERS) .step_by(NUM_TRANSFERS_PER_ENTRY) .map(|i| { let mut transactions = (0..NUM_TRANSFERS_PER_ENTRY) .map(|j| { system_transaction::transfer( &keypairs[i + j], &keypairs[i + j + NUM_TRANSFERS].pubkey(), 1, bank.last_blockhash(), ) }) .collect::<Vec<_>>(); transactions.push(system_transaction::create_account( &mint_keypair, &present_account_key, // puts a TX error in results bank.last_blockhash(), 1, 0, &solana_sdk::pubkey::new_rand(), )); next_entry_mut(&mut hash, 0, transactions) }) .collect(); assert_eq!( process_entries_for_tests(&bank, entries, true, None, None), Ok(()) ); } #[test] fn test_process_entry_tx_random_execution_no_error() { // entropy multiplier should be big enough to provide sufficient entropy // but small enough to not take too much time while executing the test. let entropy_multiplier: usize = 25; let initial_lamports = 100; // number of accounts need to be in multiple of 4 for correct // execution of the test. let num_accounts = entropy_multiplier * 4; let GenesisConfigInfo { genesis_config, mint_keypair, .. } = create_genesis_config((num_accounts + 1) as u64 * initial_lamports); let bank = Arc::new(Bank::new_for_tests(&genesis_config)); let mut keypairs: Vec<Keypair> = vec![]; for _ in 0..num_accounts { let keypair = Keypair::new(); let create_account_tx = system_transaction::transfer( &mint_keypair, &keypair.pubkey(), 0, bank.last_blockhash(), ); assert_eq!(bank.process_transaction(&create_account_tx), Ok(())); assert_matches!( bank.transfer(initial_lamports, &mint_keypair, &keypair.pubkey()), Ok(_) ); keypairs.push(keypair); } let mut tx_vector: Vec<Transaction> = vec![]; for i in (0..num_accounts).step_by(4) { tx_vector.append(&mut vec![ system_transaction::transfer( &keypairs[i + 1], &keypairs[i].pubkey(), initial_lamports, bank.last_blockhash(), ), system_transaction::transfer( &keypairs[i + 3], &keypairs[i + 2].pubkey(), initial_lamports, bank.last_blockhash(), ), ]); } // Transfer lamports to each other let entry = next_entry(&bank.last_blockhash(), 1, tx_vector); assert_eq!( process_entries_for_tests(&bank, vec![entry], true, None, None), Ok(()) ); bank.squash(); // Even number keypair should have balance of 2 * initial_lamports and // odd number keypair should have balance of 0, which proves // that even in case of random order of execution, overall state remains // consistent. for (i, keypair) in keypairs.iter().enumerate() { if i % 2 == 0 { assert_eq!(bank.get_balance(&keypair.pubkey()), 2 * initial_lamports); } else { assert_eq!(bank.get_balance(&keypair.pubkey()), 0); } } } #[test] fn test_process_entries_2_entries_tick() { let GenesisConfigInfo { genesis_config, mint_keypair, .. } = create_genesis_config(1000); let bank = Arc::new(Bank::new_for_tests(&genesis_config)); let keypair1 = Keypair::new(); let keypair2 = Keypair::new(); let keypair3 = Keypair::new(); let keypair4 = Keypair::new(); //load accounts let tx = system_transaction::transfer( &mint_keypair, &keypair1.pubkey(), 1, bank.last_blockhash(), ); assert_eq!(bank.process_transaction(&tx), Ok(())); let tx = system_transaction::transfer( &mint_keypair, &keypair2.pubkey(), 1, bank.last_blockhash(), ); assert_eq!(bank.process_transaction(&tx), Ok(())); let blockhash = bank.last_blockhash(); while blockhash == bank.last_blockhash() { bank.register_tick(&Hash::default()); } // ensure bank can process 2 entries that do not have a common account and tick is registered let tx = system_transaction::transfer(&keypair2, &keypair3.pubkey(), 1, blockhash); let entry_1 = next_entry(&blockhash, 1, vec![tx]); let tick = next_entry(&entry_1.hash, 1, vec![]); let tx = system_transaction::transfer(&keypair1, &keypair4.pubkey(), 1, bank.last_blockhash()); let entry_2 = next_entry(&tick.hash, 1, vec![tx]); assert_eq!( process_entries_for_tests( &bank, vec![entry_1, tick, entry_2.clone()], true, None, None ), Ok(()) ); assert_eq!(bank.get_balance(&keypair3.pubkey()), 1); assert_eq!(bank.get_balance(&keypair4.pubkey()), 1); // ensure that an error is returned for an empty account (keypair2) let tx = system_transaction::transfer(&keypair2, &keypair3.pubkey(), 1, bank.last_blockhash()); let entry_3 = next_entry(&entry_2.hash, 1, vec![tx]); assert_eq!( process_entries_for_tests(&bank, vec![entry_3], true, None, None), Err(TransactionError::AccountNotFound) ); } #[test] fn test_update_transaction_statuses() { // Make sure instruction errors still update the signature cache let GenesisConfigInfo { genesis_config, mint_keypair, .. } = create_genesis_config(11_000); let bank = Arc::new(Bank::new_for_tests(&genesis_config)); let pubkey = solana_sdk::pubkey::new_rand(); bank.transfer(1_000, &mint_keypair, &pubkey).unwrap(); assert_eq!(bank.transaction_count(), 1); assert_eq!(bank.get_balance(&pubkey), 1_000); assert_eq!( bank.transfer(10_001, &mint_keypair, &pubkey), Err(TransactionError::InstructionError( 0, SystemError::ResultWithNegativeLamports.into(), )) ); assert_eq!( bank.transfer(10_001, &mint_keypair, &pubkey), Err(TransactionError::AlreadyProcessed) ); // Make sure other errors don't update the signature cache let tx = system_transaction::transfer(&mint_keypair, &pubkey, 1000, Hash::default()); let signature = tx.signatures[0]; // Should fail with blockhash not found assert_eq!( bank.process_transaction(&tx).map(|_| signature), Err(TransactionError::BlockhashNotFound) ); // Should fail again with blockhash not found assert_eq!( bank.process_transaction(&tx).map(|_| signature), Err(TransactionError::BlockhashNotFound) ); } #[test] fn test_update_transaction_statuses_fail() { let GenesisConfigInfo { genesis_config, mint_keypair, .. } = create_genesis_config(11_000); let bank = Arc::new(Bank::new_for_tests(&genesis_config)); let keypair1 = Keypair::new(); let keypair2 = Keypair::new(); let success_tx = system_transaction::transfer( &mint_keypair, &keypair1.pubkey(), 1, bank.last_blockhash(), ); let fail_tx = system_transaction::transfer( &mint_keypair, &keypair2.pubkey(), 2, bank.last_blockhash(), ); let entry_1_to_mint = next_entry( &bank.last_blockhash(), 1, vec![ success_tx, fail_tx.clone(), // will collide ], ); assert_eq!( process_entries_for_tests(&bank, vec![entry_1_to_mint], false, None, None), Err(TransactionError::AccountInUse) ); // Should not see duplicate signature error assert_eq!(bank.process_transaction(&fail_tx), Ok(())); } #[test] fn test_halt_at_slot_starting_snapshot_root() { let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(123); // Create roots at slots 0, 1 let forks = tr(0) / tr(1); let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); blockstore.add_tree( forks, false, true, genesis_config.ticks_per_slot, genesis_config.hash(), ); blockstore.set_roots(vec![0, 1].iter()).unwrap(); // Specify halting at slot 0 let opts = ProcessOptions { poh_verify: true, dev_halt_at_slot: Some(0), accounts_db_test_hash_calculation: true, ..ProcessOptions::default() }; let (bank_forks, ..) = test_process_blockstore(&genesis_config, &blockstore, opts); // Should be able to fetch slot 0 because we specified halting at slot 0, even // if there is a greater root at slot 1. assert!(bank_forks.get(0).is_some()); } #[test] fn test_process_blockstore_from_root() { let GenesisConfigInfo { mut genesis_config, .. } = create_genesis_config(123); let ticks_per_slot = 1; genesis_config.ticks_per_slot = ticks_per_slot; let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); /* Build a blockstore in the ledger with the following fork structure: slot 0 (all ticks) | slot 1 (all ticks) | slot 2 (all ticks) | slot 3 (all ticks) -> root | slot 4 (all ticks) | slot 5 (all ticks) -> root | slot 6 (all ticks) */ let mut last_hash = blockhash; for i in 0..6 { last_hash = fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, i + 1, i, last_hash); } blockstore.set_roots(vec![3, 5].iter()).unwrap(); // Set up bank1 let mut bank_forks = BankForks::new(Bank::new_for_tests(&genesis_config)); let bank0 = bank_forks.get(0).unwrap().clone(); let opts = ProcessOptions { poh_verify: true, accounts_db_test_hash_calculation: true, ..ProcessOptions::default() }; let recyclers = VerifyRecyclers::default(); process_bank_0(&bank0, &blockstore, &opts, &recyclers, None); let bank1 = bank_forks.insert(Bank::new_from_parent(&bank0, &Pubkey::default(), 1)); confirm_full_slot( &blockstore, &bank1, &opts, &recyclers, &mut ConfirmationProgress::new(bank0.last_blockhash()), None, None, &mut ExecuteTimings::default(), ) .unwrap(); bank_forks.set_root( 1, &solana_runtime::accounts_background_service::AbsRequestSender::default(), None, ); // Test process_blockstore_from_root() from slot 1 onwards let (accounts_package_sender, _) = unbounded(); let (bank_forks, ..) = do_process_blockstore_from_root( &blockstore, bank_forks, &opts, &recyclers, None, None, None, accounts_package_sender, None, ) .unwrap(); assert_eq!(frozen_bank_slots(&bank_forks), vec![5, 6]); assert_eq!(bank_forks.working_bank().slot(), 6); assert_eq!(bank_forks.root(), 5); // Verify the parents of the head of the fork assert_eq!( &bank_forks[6] .parents() .iter() .map(|bank| bank.slot()) .collect::<Vec<_>>(), &[5] ); // Check that bank forks has the correct banks verify_fork_infos(&bank_forks); } /// Test that processing the blockstore is aware of incremental snapshots. When processing the /// blockstore from a root, like what happens when loading from a snapshot, there may be new /// roots that cross a full snapshot interval. In these cases, a bank snapshot must be taken, /// so that a full snapshot archive is created and available by the time the background /// services spin up. /// /// For this test, process enough roots to cross the full snapshot interval multiple times. /// Ensure afterwards that the snapshots were created. #[test] fn test_process_blockstore_from_root_with_snapshots() { solana_logger::setup(); let GenesisConfigInfo { mut genesis_config, .. } = create_genesis_config(123); let ticks_per_slot = 1; genesis_config.ticks_per_slot = ticks_per_slot; let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); const ROOT_INTERVAL_SLOTS: Slot = 2; const FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS: Slot = ROOT_INTERVAL_SLOTS * 5; const LAST_SLOT: Slot = FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS * 4; let mut last_hash = blockhash; for i in 1..=LAST_SLOT { last_hash = fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, i, i - 1, last_hash); } let roots_to_set = (0..=LAST_SLOT) .step_by(ROOT_INTERVAL_SLOTS as usize) .collect_vec(); blockstore.set_roots(roots_to_set.iter()).unwrap(); // Set up bank1 let mut bank_forks = BankForks::new(Bank::new_for_tests(&genesis_config)); let bank0 = bank_forks.get(0).unwrap().clone(); let opts = ProcessOptions { poh_verify: true, accounts_db_test_hash_calculation: true, ..ProcessOptions::default() }; let recyclers = VerifyRecyclers::default(); process_bank_0(&bank0, &blockstore, &opts, &recyclers, None); let slot_start_processing = 1; let bank = bank_forks.insert(Bank::new_from_parent( &bank0, &Pubkey::default(), slot_start_processing, )); confirm_full_slot( &blockstore, &bank, &opts, &recyclers, &mut ConfirmationProgress::new(bank0.last_blockhash()), None, None, &mut ExecuteTimings::default(), ) .unwrap(); bank_forks.set_root( 1, &solana_runtime::accounts_background_service::AbsRequestSender::default(), None, ); let bank_snapshots_tempdir = TempDir::new().unwrap(); let snapshot_config = SnapshotConfig { full_snapshot_archive_interval_slots: FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, bank_snapshots_dir: bank_snapshots_tempdir.path().to_path_buf(), ..SnapshotConfig::default() }; let (accounts_package_sender, accounts_package_receiver) = unbounded(); do_process_blockstore_from_root( &blockstore, bank_forks, &opts, &recyclers, None, None, Some(&snapshot_config), accounts_package_sender.clone(), None, ) .unwrap(); // The `drop()` is necessary here in order to call `.iter()` on the channel below drop(accounts_package_sender); // Ensure all the AccountsPackages were created and sent to the AccountsPackageReceiver let received_accounts_package_slots = accounts_package_receiver .iter() .map(|accounts_package| accounts_package.slot) .collect::<Vec<_>>(); let expected_slots = (slot_start_processing..=LAST_SLOT) .filter(|slot| slot % FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS == 0) .collect::<Vec<_>>(); assert_eq!(received_accounts_package_slots, expected_slots); // Ensure all the bank snapshots were created let bank_snapshots = snapshot_utils::get_bank_snapshots(&bank_snapshots_tempdir); let mut bank_snapshot_slots = bank_snapshots .into_iter() .map(|bank_snapshot| bank_snapshot.slot) .collect::<Vec<_>>(); bank_snapshot_slots.sort_unstable(); assert_eq!(bank_snapshot_slots, expected_slots); } #[test] #[ignore] fn test_process_entries_stress() { // this test throws lots of rayon threads at process_entries() // finds bugs in very low-layer stuff solana_logger::setup(); let GenesisConfigInfo { genesis_config, mint_keypair, .. } = create_genesis_config(1_000_000_000); let mut bank = Arc::new(Bank::new_for_tests(&genesis_config)); const NUM_TRANSFERS_PER_ENTRY: usize = 8; const NUM_TRANSFERS: usize = NUM_TRANSFERS_PER_ENTRY * 32; let keypairs: Vec<_> = (0..NUM_TRANSFERS * 2).map(|_| Keypair::new()).collect(); // give everybody one lamport for keypair in &keypairs { bank.transfer(1, &mint_keypair, &keypair.pubkey()) .expect("funding failed"); } let present_account_key = Keypair::new(); let present_account = AccountSharedData::new(1, 10, &Pubkey::default()); bank.store_account(&present_account_key.pubkey(), &present_account); let mut i = 0; let mut hash = bank.last_blockhash(); let mut root: Option<Arc<Bank>> = None; loop { let entries: Vec<_> = (0..NUM_TRANSFERS) .step_by(NUM_TRANSFERS_PER_ENTRY) .map(|i| { next_entry_mut(&mut hash, 0, { let mut transactions = (i..i + NUM_TRANSFERS_PER_ENTRY) .map(|i| { system_transaction::transfer( &keypairs[i], &keypairs[i + NUM_TRANSFERS].pubkey(), 1, bank.last_blockhash(), ) }) .collect::<Vec<_>>(); transactions.push(system_transaction::create_account( &mint_keypair, &present_account_key, // puts a TX error in results bank.last_blockhash(), 100, 100, &solana_sdk::pubkey::new_rand(), )); transactions }) }) .collect(); info!("paying iteration {}", i); process_entries_for_tests(&bank, entries, true, None, None).expect("paying failed"); let entries: Vec<_> = (0..NUM_TRANSFERS) .step_by(NUM_TRANSFERS_PER_ENTRY) .map(|i| { next_entry_mut( &mut hash, 0, (i..i + NUM_TRANSFERS_PER_ENTRY) .map(|i| { system_transaction::transfer( &keypairs[i + NUM_TRANSFERS], &keypairs[i].pubkey(), 1, bank.last_blockhash(), ) }) .collect::<Vec<_>>(), ) }) .collect(); info!("refunding iteration {}", i); process_entries_for_tests(&bank, entries, true, None, None).expect("refunding failed"); // advance to next block process_entries_for_tests( &bank, (0..bank.ticks_per_slot()) .map(|_| next_entry_mut(&mut hash, 1, vec![])) .collect::<Vec<_>>(), true, None, None, ) .expect("process ticks failed"); if i % 16 == 0 { if let Some(old_root) = root { old_root.squash(); } root = Some(bank.clone()); } i += 1; bank = Arc::new(Bank::new_from_parent( &bank, &Pubkey::default(), bank.slot() + thread_rng().gen_range(1, 3), )); } } #[test] fn test_process_ledger_ticks_ordering() { let GenesisConfigInfo { genesis_config, mint_keypair, .. } = create_genesis_config(100); let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); let genesis_hash = genesis_config.hash(); let keypair = Keypair::new(); // Simulate a slot of virtual ticks, creates a new blockhash let mut entries = create_ticks(genesis_config.ticks_per_slot, 1, genesis_hash); // The new blockhash is going to be the hash of the last tick in the block let new_blockhash = entries.last().unwrap().hash; // Create an transaction that references the new blockhash, should still // be able to find the blockhash if we process transactions all in the same // batch let tx = system_transaction::transfer(&mint_keypair, &keypair.pubkey(), 1, new_blockhash); let entry = next_entry(&new_blockhash, 1, vec![tx]); entries.push(entry); process_entries_for_tests(&bank0, entries, true, None, None).unwrap(); assert_eq!(bank0.get_balance(&keypair.pubkey()), 1) } fn get_epoch_schedule( genesis_config: &GenesisConfig, account_paths: Vec<PathBuf>, ) -> EpochSchedule { let bank = Bank::new_with_paths_for_tests( genesis_config, account_paths, None, None, AccountSecondaryIndexes::default(), false, AccountShrinkThreshold::default(), false, ); *bank.epoch_schedule() } fn frozen_bank_slots(bank_forks: &BankForks) -> Vec<Slot> { let mut slots: Vec<_> = bank_forks.frozen_banks().keys().cloned().collect(); slots.sort_unstable(); slots } // Check that `bank_forks` contains all the ancestors and banks for each fork identified in // `bank_forks_info` fn verify_fork_infos(bank_forks: &BankForks) { for slot in frozen_bank_slots(bank_forks) { let head_bank = &bank_forks[slot]; let mut parents = head_bank.parents(); parents.push(head_bank.clone()); // Ensure the tip of each fork and all its parents are in the given bank_forks for parent in parents { let parent_bank = &bank_forks[parent.slot()]; assert_eq!(parent_bank.slot(), parent.slot()); assert!(parent_bank.is_frozen()); } } } #[test] fn test_get_first_error() { let GenesisConfigInfo { genesis_config, mint_keypair, .. } = create_genesis_config(1_000_000_000); let bank = Arc::new(Bank::new_for_tests(&genesis_config)); let present_account_key = Keypair::new(); let present_account = AccountSharedData::new(1, 10, &Pubkey::default()); bank.store_account(&present_account_key.pubkey(), &present_account); let keypair = Keypair::new(); // Create array of two transactions which throw different errors let account_not_found_tx = system_transaction::transfer( &keypair, &solana_sdk::pubkey::new_rand(), 42, bank.last_blockhash(), ); let account_not_found_sig = account_not_found_tx.signatures[0]; let invalid_blockhash_tx = system_transaction::transfer( &mint_keypair, &solana_sdk::pubkey::new_rand(), 42, Hash::default(), ); let txs = vec![account_not_found_tx, invalid_blockhash_tx]; let batch = bank.prepare_batch_for_tests(txs); let ( TransactionResults { fee_collection_results, .. }, _balances, ) = batch.bank().load_execute_and_commit_transactions( &batch, MAX_PROCESSING_AGE, false, false, false, &mut ExecuteTimings::default(), ); let (err, signature) = get_first_error(&batch, fee_collection_results).unwrap(); assert_eq!(err.unwrap_err(), TransactionError::AccountNotFound); assert_eq!(signature, account_not_found_sig); } #[test] fn test_replay_vote_sender() { let validator_keypairs: Vec<_> = (0..10).map(|_| ValidatorVoteKeypairs::new_rand()).collect(); let GenesisConfigInfo { genesis_config, voting_keypair: _, .. } = create_genesis_config_with_vote_accounts( 1_000_000_000, &validator_keypairs, vec![100; validator_keypairs.len()], ); let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); bank0.freeze(); let bank1 = Arc::new(Bank::new_from_parent( &bank0, &solana_sdk::pubkey::new_rand(), 1, )); // The new blockhash is going to be the hash of the last tick in the block let bank_1_blockhash = bank1.last_blockhash(); // Create an transaction that references the new blockhash, should still // be able to find the blockhash if we process transactions all in the same // batch let mut expected_successful_voter_pubkeys = BTreeSet::new(); let vote_txs: Vec<_> = validator_keypairs .iter() .enumerate() .map(|(i, validator_keypairs)| { if i % 3 == 0 { // These votes are correct expected_successful_voter_pubkeys .insert(validator_keypairs.vote_keypair.pubkey()); vote_transaction::new_vote_transaction( vec![0], bank0.hash(), bank_1_blockhash, &validator_keypairs.node_keypair, &validator_keypairs.vote_keypair, &validator_keypairs.vote_keypair, None, ) } else if i % 3 == 1 { // These have the wrong authorized voter vote_transaction::new_vote_transaction( vec![0], bank0.hash(), bank_1_blockhash, &validator_keypairs.node_keypair, &validator_keypairs.vote_keypair, &Keypair::new(), None, ) } else { // These have an invalid vote for non-existent bank 2 vote_transaction::new_vote_transaction( vec![bank1.slot() + 1], bank0.hash(), bank_1_blockhash, &validator_keypairs.node_keypair, &validator_keypairs.vote_keypair, &validator_keypairs.vote_keypair, None, ) } }) .collect(); let entry = next_entry(&bank_1_blockhash, 1, vote_txs); let (replay_vote_sender, replay_vote_receiver) = unbounded(); let _ = process_entries_for_tests(&bank1, vec![entry], true, None, Some(&replay_vote_sender)); let successes: BTreeSet<Pubkey> = replay_vote_receiver .try_iter() .map(|(vote_pubkey, _, _)| vote_pubkey) .collect(); assert_eq!(successes, expected_successful_voter_pubkeys); } fn make_slot_with_vote_tx( blockstore: &Blockstore, ticks_per_slot: u64, tx_landed_slot: Slot, parent_slot: Slot, parent_blockhash: &Hash, vote_tx: Transaction, slot_leader_keypair: &Arc<Keypair>, ) { // Add votes to `last_slot` so that `root` will be confirmed let vote_entry = next_entry(parent_blockhash, 1, vec![vote_tx]); let mut entries = create_ticks(ticks_per_slot, 0, vote_entry.hash); entries.insert(0, vote_entry); blockstore .write_entries( tx_landed_slot, 0, 0, ticks_per_slot, Some(parent_slot), true, slot_leader_keypair, entries, 0, ) .unwrap(); } fn run_test_process_blockstore_with_supermajority_root(blockstore_root: Option<Slot>) { solana_logger::setup(); /* Build fork structure: slot 0 | slot 1 <- (blockstore root) / \ slot 2 | | | slot 4 | slot 5 | `expected_root_slot` / \ ... minor fork / `last_slot` | `really_last_slot` */ let starting_fork_slot = 5; let mut main_fork = tr(starting_fork_slot); let mut main_fork_ref = main_fork.root_mut().get_mut(); // Make enough slots to make a root slot > blockstore_root let expected_root_slot = starting_fork_slot + blockstore_root.unwrap_or(0); let really_expected_root_slot = expected_root_slot + 1; let last_main_fork_slot = expected_root_slot + MAX_LOCKOUT_HISTORY as u64 + 1; let really_last_main_fork_slot = last_main_fork_slot + 1; // Make `minor_fork` let last_minor_fork_slot = really_last_main_fork_slot + 1; let minor_fork = tr(last_minor_fork_slot); // Make 'main_fork` for slot in starting_fork_slot + 1..last_main_fork_slot { if slot - 1 == expected_root_slot { main_fork_ref.push_front(minor_fork.clone()); } main_fork_ref.push_front(tr(slot)); main_fork_ref = main_fork_ref.front_mut().unwrap().get_mut(); } let forks = tr(0) / (tr(1) / (tr(2) / (tr(4))) / main_fork); let validator_keypairs = ValidatorVoteKeypairs::new_rand(); let GenesisConfigInfo { genesis_config, .. } = genesis_utils::create_genesis_config_with_vote_accounts( 10_000, &[&validator_keypairs], vec![100], ); let ticks_per_slot = genesis_config.ticks_per_slot(); let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); blockstore.add_tree(forks, false, true, ticks_per_slot, genesis_config.hash()); if let Some(blockstore_root) = blockstore_root { blockstore .set_roots(std::iter::once(&blockstore_root)) .unwrap(); } let opts = ProcessOptions { poh_verify: true, accounts_db_test_hash_calculation: true, ..ProcessOptions::default() }; let (bank_forks, ..) = test_process_blockstore(&genesis_config, &blockstore, opts.clone()); // prepare to add votes let last_vote_bank_hash = bank_forks.get(last_main_fork_slot - 1).unwrap().hash(); let last_vote_blockhash = bank_forks .get(last_main_fork_slot - 1) .unwrap() .last_blockhash(); let slots: Vec<_> = (expected_root_slot..last_main_fork_slot).collect(); let vote_tx = vote_transaction::new_vote_transaction( slots, last_vote_bank_hash, last_vote_blockhash, &validator_keypairs.node_keypair, &validator_keypairs.vote_keypair, &validator_keypairs.vote_keypair, None, ); // Add votes to `last_slot` so that `root` will be confirmed let leader_keypair = Arc::new(validator_keypairs.node_keypair); make_slot_with_vote_tx( &blockstore, ticks_per_slot, last_main_fork_slot, last_main_fork_slot - 1, &last_vote_blockhash, vote_tx, &leader_keypair, ); let (bank_forks, ..) = test_process_blockstore(&genesis_config, &blockstore, opts.clone()); assert_eq!(bank_forks.root(), expected_root_slot); assert_eq!( bank_forks.frozen_banks().len() as u64, last_minor_fork_slot - really_expected_root_slot + 1 ); // Minor fork at `last_main_fork_slot + 1` was above the `expected_root_slot` // so should not have been purged // // Fork at slot 2 was purged because it was below the `expected_root_slot` for slot in 0..=last_minor_fork_slot { // this slot will be created below if slot == really_last_main_fork_slot { continue; } if slot >= expected_root_slot { let bank = bank_forks.get(slot).unwrap(); assert_eq!(bank.slot(), slot); assert!(bank.is_frozen()); } else { assert!(bank_forks.get(slot).is_none()); } } // really prepare to add votes let last_vote_bank_hash = bank_forks.get(last_main_fork_slot).unwrap().hash(); let last_vote_blockhash = bank_forks .get(last_main_fork_slot) .unwrap() .last_blockhash(); let slots: Vec<_> = vec![last_main_fork_slot]; let vote_tx = vote_transaction::new_vote_transaction( slots, last_vote_bank_hash, last_vote_blockhash, &leader_keypair, &validator_keypairs.vote_keypair, &validator_keypairs.vote_keypair, None, ); // Add votes to `really_last_slot` so that `root` will be confirmed again make_slot_with_vote_tx( &blockstore, ticks_per_slot, really_last_main_fork_slot, last_main_fork_slot, &last_vote_blockhash, vote_tx, &leader_keypair, ); let (bank_forks, ..) = test_process_blockstore(&genesis_config, &blockstore, opts); assert_eq!(bank_forks.root(), really_expected_root_slot); } #[test] fn test_process_blockstore_with_supermajority_root_without_blockstore_root() { run_test_process_blockstore_with_supermajority_root(None); } #[test] fn test_process_blockstore_with_supermajority_root_with_blockstore_root() { run_test_process_blockstore_with_supermajority_root(Some(1)) } #[test] #[allow(clippy::field_reassign_with_default)] fn test_supermajority_root_from_vote_accounts() { let convert_to_vote_accounts = |roots_stakes: Vec<(Slot, u64)>| -> HashMap<Pubkey, (u64, VoteAccount)> { roots_stakes .into_iter() .map(|(root, stake)| { let mut vote_state = VoteState::default(); vote_state.root_slot = Some(root); let mut vote_account = AccountSharedData::new( 1, VoteState::size_of(), &solana_vote_program::id(), ); let versioned = VoteStateVersions::new_current(vote_state); VoteState::serialize(&versioned, vote_account.data_as_mut_slice()).unwrap(); ( solana_sdk::pubkey::new_rand(), (stake, VoteAccount::from(vote_account)), ) }) .collect() }; let total_stake = 10; let slot = 100; // Supermajority root should be None assert!( supermajority_root_from_vote_accounts(slot, total_stake, &HashMap::default()).is_none() ); // Supermajority root should be None let roots_stakes = vec![(8, 1), (3, 1), (4, 1), (8, 1)]; let accounts = convert_to_vote_accounts(roots_stakes); assert!(supermajority_root_from_vote_accounts(slot, total_stake, &accounts).is_none()); // Supermajority root should be 4, has 7/10 of the stake let roots_stakes = vec![(8, 1), (3, 1), (4, 1), (8, 5)]; let accounts = convert_to_vote_accounts(roots_stakes); assert_eq!( supermajority_root_from_vote_accounts(slot, total_stake, &accounts).unwrap(), 4 ); // Supermajority root should be 8, it has 7/10 of the stake let roots_stakes = vec![(8, 1), (3, 1), (4, 1), (8, 6)]; let accounts = convert_to_vote_accounts(roots_stakes); assert_eq!( supermajority_root_from_vote_accounts(slot, total_stake, &accounts).unwrap(), 8 ); } }
35.489367
167
0.560286
48413ce8346f65b48ac00a92d3ca893b80a6c9b6
166
use crate::*; use frame_support::dispatch::{DispatchError, Dispatchable}; use frame_support::{assert_noop, assert_ok}; use mock::*; use parity_scale_codec::Encode;
20.75
59
0.76506
dd7ae1b2bc890a856baf71b0623a42470b83e4b1
4,762
#[doc = "Register `lo_config_2420` reader"] pub struct R(crate::R<LO_CONFIG_2420_SPEC>); impl core::ops::Deref for R { type Target = crate::R<LO_CONFIG_2420_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl From<crate::R<LO_CONFIG_2420_SPEC>> for R { #[inline(always)] fn from(reader: crate::R<LO_CONFIG_2420_SPEC>) -> Self { R(reader) } } #[doc = "Register `lo_config_2420` writer"] pub struct W(crate::W<LO_CONFIG_2420_SPEC>); impl core::ops::Deref for W { type Target = crate::W<LO_CONFIG_2420_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::ops::DerefMut for W { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl From<crate::W<LO_CONFIG_2420_SPEC>> for W { #[inline(always)] fn from(writer: crate::W<LO_CONFIG_2420_SPEC>) -> Self { W(writer) } } #[doc = "Field `adpll_sdm_dither_en_2420` reader - "] pub struct ADPLL_SDM_DITHER_EN_2420_R(crate::FieldReader<bool, bool>); impl ADPLL_SDM_DITHER_EN_2420_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { ADPLL_SDM_DITHER_EN_2420_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for ADPLL_SDM_DITHER_EN_2420_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `adpll_sdm_dither_en_2420` writer - "] pub struct ADPLL_SDM_DITHER_EN_2420_W<'a> { w: &'a mut W, } impl<'a> ADPLL_SDM_DITHER_EN_2420_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 12)) | ((value as u32 & 0x01) << 12); self.w } } #[doc = "Field `kcal_ratio_2420` reader - "] pub struct KCAL_RATIO_2420_R(crate::FieldReader<u16, u16>); impl KCAL_RATIO_2420_R { #[inline(always)] pub(crate) fn new(bits: u16) -> Self { KCAL_RATIO_2420_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for KCAL_RATIO_2420_R { type Target = crate::FieldReader<u16, u16>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `kcal_ratio_2420` writer - "] pub struct KCAL_RATIO_2420_W<'a> { w: &'a mut W, } impl<'a> KCAL_RATIO_2420_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u16) -> &'a mut W { self.w.bits = (self.w.bits & !0x03ff) | (value as u32 & 0x03ff); self.w } } impl R { #[doc = "Bit 12"] #[inline(always)] pub fn adpll_sdm_dither_en_2420(&self) -> ADPLL_SDM_DITHER_EN_2420_R { ADPLL_SDM_DITHER_EN_2420_R::new(((self.bits >> 12) & 0x01) != 0) } #[doc = "Bits 0:9"] #[inline(always)] pub fn kcal_ratio_2420(&self) -> KCAL_RATIO_2420_R { KCAL_RATIO_2420_R::new((self.bits & 0x03ff) as u16) } } impl W { #[doc = "Bit 12"] #[inline(always)] pub fn adpll_sdm_dither_en_2420(&mut self) -> ADPLL_SDM_DITHER_EN_2420_W { ADPLL_SDM_DITHER_EN_2420_W { w: self } } #[doc = "Bits 0:9"] #[inline(always)] pub fn kcal_ratio_2420(&mut self) -> KCAL_RATIO_2420_W { KCAL_RATIO_2420_W { w: self } } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.0.bits(bits); self } } #[doc = "lo_config_2420.\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [lo_config_2420](index.html) module"] pub struct LO_CONFIG_2420_SPEC; impl crate::RegisterSpec for LO_CONFIG_2420_SPEC { type Ux = u32; } #[doc = "`read()` method returns [lo_config_2420::R](R) reader structure"] impl crate::Readable for LO_CONFIG_2420_SPEC { type Reader = R; } #[doc = "`write(|w| ..)` method takes [lo_config_2420::W](W) writer structure"] impl crate::Writable for LO_CONFIG_2420_SPEC { type Writer = W; } #[doc = "`reset()` method sets lo_config_2420 to value 0"] impl crate::Resettable for LO_CONFIG_2420_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0 } }
31.536424
410
0.622848
1cb1507d94a52a6f8b24da05c08b0c7cfafd3d8d
5,494
use crate::model::browse::Browsable; use crate::model::browse::BrowseResult; use serde::de::DeserializeOwned; use std::fmt; use std::marker::PhantomData; use serde::de::{self, Deserialize, Deserializer, MapAccess, SeqAccess, Visitor}; // Browse result fields in musicbrainz api v2 are prefixed with resource type : // this impl provide a generic browse result deserializer impl<'de, T> Deserialize<'de> for BrowseResult<T> where T: DeserializeOwned + Browsable, { fn deserialize<D>(deserializer: D) -> Result<BrowseResult<T>, D::Error> where D: Deserializer<'de>, T: Browsable, { enum Field<T> { Count, Offset, Entities(PhantomData<T>), }; impl<'de, T> Deserialize<'de> for Field<T> where T: Browsable, { fn deserialize<D>(deserializer: D) -> Result<Field<T>, D::Error> where D: Deserializer<'de>, T: Browsable, { struct FieldVisitor<T>(PhantomData<T>); impl<'de, T> Visitor<'de> for FieldVisitor<T> where T: Browsable, { type Value = Field<T>; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter.write_str("`count` or `offset`, `entities`") } fn visit_str<E>(self, value: &str) -> Result<Field<T>, E> where E: de::Error, T: Browsable, { match value { field if field == T::COUNT_FIELD => Ok(Field::Count), field if field == T::OFFSET_FIELD => Ok(Field::Offset), field if field == T::ENTITIES_FIELD => { Ok(Field::Entities(PhantomData::<T>)) } _ => Err(de::Error::unknown_field(value, FIELDS)), } } } deserializer.deserialize_identifier(FieldVisitor(PhantomData::<T>)) } } struct BrowseResultVisitor<T> { phatom: PhantomData<T>, }; impl<'de, T> Visitor<'de> for BrowseResultVisitor<T> where T: Browsable + Deserialize<'de>, { type Value = BrowseResult<T>; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter.write_str("struct Browsable<T>") } fn visit_seq<V>(self, mut seq: V) -> Result<BrowseResult<T>, V::Error> where V: SeqAccess<'de>, { let count = seq .next_element()? .ok_or_else(|| de::Error::invalid_length(0, &self))?; let offset = seq .next_element()? .ok_or_else(|| de::Error::invalid_length(1, &self))?; let entities = seq .next_element()? .ok_or_else(|| de::Error::invalid_length(2, &self))?; Ok(BrowseResult { count, offset, entities, }) } fn visit_map<V>(self, mut map: V) -> Result<BrowseResult<T>, V::Error> where T: Browsable + Deserialize<'de>, V: MapAccess<'de>, { let mut count: Option<i32> = None; let mut offset: Option<i32> = None; let mut entities: Option<Vec<T>> = None; while let Some(key) = map.next_key::<Field<T>>()? { match key { Field::Count => { if count.is_some() { return Err(de::Error::duplicate_field("count")); } count = Some(map.next_value()?); } Field::Offset => { if offset.is_some() { return Err(de::Error::duplicate_field("offset")); } offset = Some(map.next_value()?); } Field::Entities(_t) => { if entities.is_some() { return Err(de::Error::duplicate_field("entities")); } entities = Some(map.next_value()?); } } } let count = count.ok_or_else(|| de::Error::missing_field("count"))?; let offset = offset.ok_or_else(|| de::Error::missing_field("offset"))?; let entities = entities.ok_or_else(|| de::Error::missing_field("entities"))?; Ok(BrowseResult { count, offset, entities, }) } } const FIELDS: &[&str] = &["count", "offset", "artists"]; deserializer.deserialize_struct( "BrowseResult", FIELDS, BrowseResultVisitor { phatom: PhantomData::<T>, }, ) } }
36.144737
93
0.425555
09bca4849ac7aa740a478a1dadd58327dd4b2b2b
13,093
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License.. use crate::io::{self, BufWriter, IoSlice, Write}; use crate::sys_common::memchr; /// Private helper struct for implementing the line-buffered writing logic. /// This shim temporarily wraps a BufWriter, and uses its internals to /// implement a line-buffered writer (specifically by using the internal /// methods like write_to_buf and flush_buf). In this way, a more /// efficient abstraction can be created than one that only had access to /// `write` and `flush`, without needlessly duplicating a lot of the /// implementation details of BufWriter. This also allows existing /// `BufWriters` to be temporarily given line-buffering logic; this is what /// enables Stdout to be alternately in line-buffered or block-buffered mode. #[derive(Debug)] pub struct LineWriterShim<'a, W: Write> { buffer: &'a mut BufWriter<W>, } impl<'a, W: Write> LineWriterShim<'a, W> { pub fn new(buffer: &'a mut BufWriter<W>) -> Self { Self { buffer } } /// Get a reference to the inner writer (that is, the writer /// wrapped by the BufWriter). fn inner(&self) -> &W { self.buffer.get_ref() } /// Get a mutable reference to the inner writer (that is, the writer /// wrapped by the BufWriter). Be careful with this writer, as writes to /// it will bypass the buffer. fn inner_mut(&mut self) -> &mut W { self.buffer.get_mut() } /// Get the content currently buffered in self.buffer fn buffered(&self) -> &[u8] { self.buffer.buffer() } /// Flush the buffer iff the last byte is a newline (indicating that an /// earlier write only succeeded partially, and we want to retry flushing /// the buffered line before continuing with a subsequent write) fn flush_if_completed_line(&mut self) -> io::Result<()> { match self.buffered().last().copied() { Some(b'\n') => self.buffer.flush_buf(), _ => Ok(()), } } } impl<'a, W: Write> Write for LineWriterShim<'a, W> { /// Write some data into this BufReader with line buffering. This means /// that, if any newlines are present in the data, the data up to the last /// newline is sent directly to the underlying writer, and data after it /// is buffered. Returns the number of bytes written. /// /// This function operates on a "best effort basis"; in keeping with the /// convention of `Write::write`, it makes at most one attempt to write /// new data to the underlying writer. If that write only reports a partial /// success, the remaining data will be buffered. /// /// Because this function attempts to send completed lines to the underlying /// writer, it will also flush the existing buffer if it ends with a /// newline, even if the incoming data does not contain any newlines. fn write(&mut self, buf: &[u8]) -> io::Result<usize> { let newline_idx = match memchr::memrchr(b'\n', buf) { // If there are no new newlines (that is, if this write is less than // one line), just do a regular buffered write (which may flush if // we exceed the inner buffer's size) None => { self.flush_if_completed_line()?; return self.buffer.write(buf); } // Otherwise, arrange for the lines to be written directly to the // inner writer. Some(newline_idx) => newline_idx + 1, }; // Flush existing content to prepare for our write. We have to do this // before attempting to write `buf` in order to maintain consistency; // if we add `buf` to the buffer then try to flush it all at once, // we're obligated to return Ok(), which would mean suppressing any // errors that occur during flush. self.buffer.flush_buf()?; // This is what we're going to try to write directly to the inner // writer. The rest will be buffered, if nothing goes wrong. let lines = &buf[..newline_idx]; // Write `lines` directly to the inner writer. In keeping with the // `write` convention, make at most one attempt to add new (unbuffered) // data. Because this write doesn't touch the BufWriter state directly, // and the buffer is known to be empty, we don't need to worry about // self.buffer.panicked here. let flushed = self.inner_mut().write(lines)?; // If buffer returns Ok(0), propagate that to the caller without // doing additional buffering; otherwise we're just guaranteeing // an "ErrorKind::WriteZero" later. if flushed == 0 { return Ok(0); } // Now that the write has succeeded, buffer the rest (or as much of // the rest as possible). If there were any unwritten newlines, we // only buffer out to the last unwritten newline that fits in the // buffer; this helps prevent flushing partial lines on subsequent // calls to LineWriterShim::write. // Handle the cases in order of most-common to least-common, under // the presumption that most writes succeed in totality, and that most // writes are smaller than the buffer. // - Is this a partial line (ie, no newlines left in the unwritten tail) // - If not, does the data out to the last unwritten newline fit in // the buffer? // - If not, scan for the last newline that *does* fit in the buffer let tail = if flushed >= newline_idx { &buf[flushed..] } else if newline_idx - flushed <= self.buffer.capacity() { &buf[flushed..newline_idx] } else { let scan_area = &buf[flushed..]; let scan_area = &scan_area[..self.buffer.capacity()]; match memchr::memrchr(b'\n', scan_area) { Some(newline_idx) => &scan_area[..newline_idx + 1], None => scan_area, } }; let buffered = self.buffer.write_to_buf(tail); Ok(flushed + buffered) } fn flush(&mut self) -> io::Result<()> { self.buffer.flush() } /// Write some vectored data into this BufReader with line buffering. This /// means that, if any newlines are present in the data, the data up to /// and including the buffer containing the last newline is sent directly /// to the inner writer, and the data after it is buffered. Returns the /// number of bytes written. /// /// This function operates on a "best effort basis"; in keeping with the /// convention of `Write::write`, it makes at most one attempt to write /// new data to the underlying writer. /// /// Because this function attempts to send completed lines to the underlying /// writer, it will also flush the existing buffer if it contains any /// newlines. /// /// Because sorting through an array of `IoSlice` can be a bit convoluted, /// This method differs from write in the following ways: /// /// - It attempts to write the full content of all the buffers up to and /// including the one containing the last newline. This means that it /// may attempt to write a partial line, that buffer has data past the /// newline. /// - If the write only reports partial success, it does not attempt to /// find the precise location of the written bytes and buffer the rest. /// /// If the underlying vector doesn't support vectored writing, we instead /// simply write the first non-empty buffer with `write`. This way, we /// get the benefits of more granular partial-line handling without losing /// anything in efficiency fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> { // If there's no specialized behavior for write_vectored, just use // write. This has the benefit of more granular partial-line handling. if !self.is_write_vectored() { return match bufs.iter().find(|buf| !buf.is_empty()) { Some(buf) => self.write(buf), None => Ok(0), }; } // Find the buffer containing the last newline let last_newline_buf_idx = bufs .iter() .enumerate() .rev() .find_map(|(i, buf)| memchr::memchr(b'\n', buf).map(|_| i)); // If there are no new newlines (that is, if this write is less than // one line), just do a regular buffered write let last_newline_buf_idx = match last_newline_buf_idx { // No newlines; just do a normal buffered write None => { self.flush_if_completed_line()?; return self.buffer.write_vectored(bufs); } Some(i) => i, }; // Flush existing content to prepare for our write self.buffer.flush_buf()?; // This is what we're going to try to write directly to the inner // writer. The rest will be buffered, if nothing goes wrong. let (lines, tail) = bufs.split_at(last_newline_buf_idx + 1); // Write `lines` directly to the inner writer. In keeping with the // `write` convention, make at most one attempt to add new (unbuffered) // data. Because this write doesn't touch the BufWriter state directly, // and the buffer is known to be empty, we don't need to worry about // self.panicked here. let flushed = self.inner_mut().write_vectored(lines)?; // If inner returns Ok(0), propagate that to the caller without // doing additional buffering; otherwise we're just guaranteeing // an "ErrorKind::WriteZero" later. if flushed == 0 { return Ok(0); } // Don't try to reconstruct the exact amount written; just bail // in the event of a partial write let lines_len = lines.iter().map(|buf| buf.len()).sum(); if flushed < lines_len { return Ok(flushed); } // Now that the write has succeeded, buffer the rest (or as much of the // rest as possible) let buffered: usize = tail .iter() .filter(|buf| !buf.is_empty()) .map(|buf| self.buffer.write_to_buf(buf)) .take_while(|&n| n > 0) .sum(); Ok(flushed + buffered) } fn is_write_vectored(&self) -> bool { self.inner().is_write_vectored() } /// Write some data into this BufReader with line buffering. This means /// that, if any newlines are present in the data, the data up to the last /// newline is sent directly to the underlying writer, and data after it /// is buffered. /// /// Because this function attempts to send completed lines to the underlying /// writer, it will also flush the existing buffer if it contains any /// newlines, even if the incoming data does not contain any newlines. fn write_all(&mut self, buf: &[u8]) -> io::Result<()> { match memchr::memrchr(b'\n', buf) { // If there are no new newlines (that is, if this write is less than // one line), just do a regular buffered write (which may flush if // we exceed the inner buffer's size) None => { self.flush_if_completed_line()?; self.buffer.write_all(buf) } Some(newline_idx) => { let (lines, tail) = buf.split_at(newline_idx + 1); if self.buffered().is_empty() { self.inner_mut().write_all(lines)?; } else { // If there is any buffered data, we add the incoming lines // to that buffer before flushing, which saves us at least // one write call. We can't really do this with `write`, // since we can't do this *and* not suppress errors *and* // report a consistent state to the caller in a return // value, but here in write_all it's fine. self.buffer.write_all(lines)?; self.buffer.flush_buf()?; } self.buffer.write_all(tail) } } } }
44.534014
80
0.618728
1a5e4d47745b9d39d7822f38a73a39d083596011
9,758
#[doc = "Reader of register UCB3IV"] pub type R = crate::R<u16, super::UCB3IV>; #[doc = "Writer for register UCB3IV"] pub type W = crate::W<u16, super::UCB3IV>; #[doc = "Register UCB3IV `reset()`'s with value 0"] impl crate::ResetValue for super::UCB3IV { type Type = u16; #[inline(always)] fn reset_value() -> Self::Type { 0 } } #[doc = "15:0\\] eUSCI_B interrupt vector value\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] #[repr(u16)] pub enum UCIV_A { #[doc = "0: No interrupt pending"] NONE = 0, #[doc = "2: Interrupt Source: Arbitration lost; Interrupt Flag: UCALIFG; Interrupt Priority: Highest"] UCALIFG = 2, #[doc = "4: Interrupt Source: Not acknowledgment; Interrupt Flag: UCNACKIFG"] UCNACKIFG = 4, #[doc = "6: Interrupt Source: Start condition received; Interrupt Flag: UCSTTIFG"] UCSTTIFG = 6, #[doc = "8: Interrupt Source: Stop condition received; Interrupt Flag: UCSTPIFG"] UCSTPIFG = 8, #[doc = "10: Interrupt Source: Slave 3 Data received; Interrupt Flag: UCRXIFG3"] UCRXIFG3 = 10, #[doc = "12: Interrupt Source: Slave 3 Transmit buffer empty; Interrupt Flag: UCTXIFG3"] UCTXIFG3 = 12, #[doc = "14: Interrupt Source: Slave 2 Data received; Interrupt Flag: UCRXIFG2"] UCRXIFG2 = 14, #[doc = "16: Interrupt Source: Slave 2 Transmit buffer empty; Interrupt Flag: UCTXIFG2"] UCTXIFG2 = 16, #[doc = "18: Interrupt Source: Slave 1 Data received; Interrupt Flag: UCRXIFG1"] UCRXIFG1 = 18, #[doc = "20: Interrupt Source: Slave 1 Transmit buffer empty; Interrupt Flag: UCTXIFG1"] UCTXIFG1 = 20, #[doc = "22: Interrupt Source: Data received; Interrupt Flag: UCRXIFG0"] UCRXIFG0 = 22, #[doc = "24: Interrupt Source: Transmit buffer empty; Interrupt Flag: UCTXIFG0"] UCTXIFG0 = 24, #[doc = "26: Interrupt Source: Byte counter zero; Interrupt Flag: UCBCNTIFG"] UCBCNTIFG = 26, #[doc = "28: Interrupt Source: Clock low timeout; Interrupt Flag: UCCLTOIFG"] UCCLTOIFG = 28, #[doc = "30: Interrupt Source: Nineth bit position; Interrupt Flag: UCBIT9IFG; Priority: Lowest"] UCBIT9IFG = 30, } impl From<UCIV_A> for u16 { #[inline(always)] fn from(variant: UCIV_A) -> Self { variant as _ } } #[doc = "Reader of field `UCIV`"] pub type UCIV_R = crate::R<u16, UCIV_A>; impl UCIV_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> crate::Variant<u16, UCIV_A> { use crate::Variant::*; match self.bits { 0 => Val(UCIV_A::NONE), 2 => Val(UCIV_A::UCALIFG), 4 => Val(UCIV_A::UCNACKIFG), 6 => Val(UCIV_A::UCSTTIFG), 8 => Val(UCIV_A::UCSTPIFG), 10 => Val(UCIV_A::UCRXIFG3), 12 => Val(UCIV_A::UCTXIFG3), 14 => Val(UCIV_A::UCRXIFG2), 16 => Val(UCIV_A::UCTXIFG2), 18 => Val(UCIV_A::UCRXIFG1), 20 => Val(UCIV_A::UCTXIFG1), 22 => Val(UCIV_A::UCRXIFG0), 24 => Val(UCIV_A::UCTXIFG0), 26 => Val(UCIV_A::UCBCNTIFG), 28 => Val(UCIV_A::UCCLTOIFG), 30 => Val(UCIV_A::UCBIT9IFG), i => Res(i), } } #[doc = "Checks if the value of the field is `NONE`"] #[inline(always)] pub fn is_none(&self) -> bool { *self == UCIV_A::NONE } #[doc = "Checks if the value of the field is `UCALIFG`"] #[inline(always)] pub fn is_ucalifg(&self) -> bool { *self == UCIV_A::UCALIFG } #[doc = "Checks if the value of the field is `UCNACKIFG`"] #[inline(always)] pub fn is_ucnackifg(&self) -> bool { *self == UCIV_A::UCNACKIFG } #[doc = "Checks if the value of the field is `UCSTTIFG`"] #[inline(always)] pub fn is_ucsttifg(&self) -> bool { *self == UCIV_A::UCSTTIFG } #[doc = "Checks if the value of the field is `UCSTPIFG`"] #[inline(always)] pub fn is_ucstpifg(&self) -> bool { *self == UCIV_A::UCSTPIFG } #[doc = "Checks if the value of the field is `UCRXIFG3`"] #[inline(always)] pub fn is_ucrxifg3(&self) -> bool { *self == UCIV_A::UCRXIFG3 } #[doc = "Checks if the value of the field is `UCTXIFG3`"] #[inline(always)] pub fn is_uctxifg3(&self) -> bool { *self == UCIV_A::UCTXIFG3 } #[doc = "Checks if the value of the field is `UCRXIFG2`"] #[inline(always)] pub fn is_ucrxifg2(&self) -> bool { *self == UCIV_A::UCRXIFG2 } #[doc = "Checks if the value of the field is `UCTXIFG2`"] #[inline(always)] pub fn is_uctxifg2(&self) -> bool { *self == UCIV_A::UCTXIFG2 } #[doc = "Checks if the value of the field is `UCRXIFG1`"] #[inline(always)] pub fn is_ucrxifg1(&self) -> bool { *self == UCIV_A::UCRXIFG1 } #[doc = "Checks if the value of the field is `UCTXIFG1`"] #[inline(always)] pub fn is_uctxifg1(&self) -> bool { *self == UCIV_A::UCTXIFG1 } #[doc = "Checks if the value of the field is `UCRXIFG0`"] #[inline(always)] pub fn is_ucrxifg0(&self) -> bool { *self == UCIV_A::UCRXIFG0 } #[doc = "Checks if the value of the field is `UCTXIFG0`"] #[inline(always)] pub fn is_uctxifg0(&self) -> bool { *self == UCIV_A::UCTXIFG0 } #[doc = "Checks if the value of the field is `UCBCNTIFG`"] #[inline(always)] pub fn is_ucbcntifg(&self) -> bool { *self == UCIV_A::UCBCNTIFG } #[doc = "Checks if the value of the field is `UCCLTOIFG`"] #[inline(always)] pub fn is_uccltoifg(&self) -> bool { *self == UCIV_A::UCCLTOIFG } #[doc = "Checks if the value of the field is `UCBIT9IFG`"] #[inline(always)] pub fn is_ucbit9ifg(&self) -> bool { *self == UCIV_A::UCBIT9IFG } } #[doc = "Write proxy for field `UCIV`"] pub struct UCIV_W<'a> { w: &'a mut W, } impl<'a> UCIV_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: UCIV_A) -> &'a mut W { unsafe { self.bits(variant.into()) } } #[doc = "No interrupt pending"] #[inline(always)] pub fn none(self) -> &'a mut W { self.variant(UCIV_A::NONE) } #[doc = "Interrupt Source: Arbitration lost; Interrupt Flag: UCALIFG; Interrupt Priority: Highest"] #[inline(always)] pub fn ucalifg(self) -> &'a mut W { self.variant(UCIV_A::UCALIFG) } #[doc = "Interrupt Source: Not acknowledgment; Interrupt Flag: UCNACKIFG"] #[inline(always)] pub fn ucnackifg(self) -> &'a mut W { self.variant(UCIV_A::UCNACKIFG) } #[doc = "Interrupt Source: Start condition received; Interrupt Flag: UCSTTIFG"] #[inline(always)] pub fn ucsttifg(self) -> &'a mut W { self.variant(UCIV_A::UCSTTIFG) } #[doc = "Interrupt Source: Stop condition received; Interrupt Flag: UCSTPIFG"] #[inline(always)] pub fn ucstpifg(self) -> &'a mut W { self.variant(UCIV_A::UCSTPIFG) } #[doc = "Interrupt Source: Slave 3 Data received; Interrupt Flag: UCRXIFG3"] #[inline(always)] pub fn ucrxifg3(self) -> &'a mut W { self.variant(UCIV_A::UCRXIFG3) } #[doc = "Interrupt Source: Slave 3 Transmit buffer empty; Interrupt Flag: UCTXIFG3"] #[inline(always)] pub fn uctxifg3(self) -> &'a mut W { self.variant(UCIV_A::UCTXIFG3) } #[doc = "Interrupt Source: Slave 2 Data received; Interrupt Flag: UCRXIFG2"] #[inline(always)] pub fn ucrxifg2(self) -> &'a mut W { self.variant(UCIV_A::UCRXIFG2) } #[doc = "Interrupt Source: Slave 2 Transmit buffer empty; Interrupt Flag: UCTXIFG2"] #[inline(always)] pub fn uctxifg2(self) -> &'a mut W { self.variant(UCIV_A::UCTXIFG2) } #[doc = "Interrupt Source: Slave 1 Data received; Interrupt Flag: UCRXIFG1"] #[inline(always)] pub fn ucrxifg1(self) -> &'a mut W { self.variant(UCIV_A::UCRXIFG1) } #[doc = "Interrupt Source: Slave 1 Transmit buffer empty; Interrupt Flag: UCTXIFG1"] #[inline(always)] pub fn uctxifg1(self) -> &'a mut W { self.variant(UCIV_A::UCTXIFG1) } #[doc = "Interrupt Source: Data received; Interrupt Flag: UCRXIFG0"] #[inline(always)] pub fn ucrxifg0(self) -> &'a mut W { self.variant(UCIV_A::UCRXIFG0) } #[doc = "Interrupt Source: Transmit buffer empty; Interrupt Flag: UCTXIFG0"] #[inline(always)] pub fn uctxifg0(self) -> &'a mut W { self.variant(UCIV_A::UCTXIFG0) } #[doc = "Interrupt Source: Byte counter zero; Interrupt Flag: UCBCNTIFG"] #[inline(always)] pub fn ucbcntifg(self) -> &'a mut W { self.variant(UCIV_A::UCBCNTIFG) } #[doc = "Interrupt Source: Clock low timeout; Interrupt Flag: UCCLTOIFG"] #[inline(always)] pub fn uccltoifg(self) -> &'a mut W { self.variant(UCIV_A::UCCLTOIFG) } #[doc = "Interrupt Source: Nineth bit position; Interrupt Flag: UCBIT9IFG; Priority: Lowest"] #[inline(always)] pub fn ucbit9ifg(self) -> &'a mut W { self.variant(UCIV_A::UCBIT9IFG) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u16) -> &'a mut W { self.w.bits = (self.w.bits & !0xffff) | ((value as u16) & 0xffff); self.w } } impl R { #[doc = "Bits 0:15 - 15:0\\] eUSCI_B interrupt vector value"] #[inline(always)] pub fn uciv(&self) -> UCIV_R { UCIV_R::new((self.bits & 0xffff) as u16) } } impl W { #[doc = "Bits 0:15 - 15:0\\] eUSCI_B interrupt vector value"] #[inline(always)] pub fn uciv(&mut self) -> UCIV_W { UCIV_W { w: self } } }
35.100719
106
0.592437
f4b1c7f1f06df811bd62b7adea98030387d20525
6,293
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. /* * Inline assembly support. */ use ast; use codemap::Span; use ext::base; use ext::base::*; use parse; use parse::token; enum State { Asm, Outputs, Inputs, Clobbers, Options } fn next_state(s: State) -> Option<State> { match s { Asm => Some(Outputs), Outputs => Some(Inputs), Inputs => Some(Clobbers), Clobbers => Some(Options), Options => None } } pub fn expand_asm(cx: @ExtCtxt, sp: Span, tts: &[ast::token_tree]) -> base::MacResult { let p = parse::new_parser_from_tts(cx.parse_sess(), cx.cfg(), tts.to_owned()); let mut asm = @""; let mut asm_str_style = None; let mut outputs = ~[]; let mut inputs = ~[]; let mut cons = ~""; let mut volatile = false; let mut alignstack = false; let mut dialect = ast::asm_att; let mut state = Asm; // Not using labeled break to get us through one round of bootstrapping. let mut continue_ = true; while continue_ { match state { Asm => { let (s, style) = expr_to_str(cx, p.parse_expr(), "inline assembly must be a string literal."); asm = s; asm_str_style = Some(style); } Outputs => { while *p.token != token::EOF && *p.token != token::COLON && *p.token != token::MOD_SEP { if outputs.len() != 0 { p.eat(&token::COMMA); } let (constraint, _str_style) = p.parse_str(); if constraint.starts_with("+") { cx.span_unimpl(*p.last_span, "'+' (read+write) output operand constraint modifier"); } else if !constraint.starts_with("=") { cx.span_err(*p.last_span, "output operand constraint lacks '='"); } p.expect(&token::LPAREN); let out = p.parse_expr(); p.expect(&token::RPAREN); outputs.push((constraint, out)); } } Inputs => { while *p.token != token::EOF && *p.token != token::COLON && *p.token != token::MOD_SEP { if inputs.len() != 0 { p.eat(&token::COMMA); } let (constraint, _str_style) = p.parse_str(); if constraint.starts_with("=") { cx.span_err(*p.last_span, "input operand constraint contains '='"); } else if constraint.starts_with("+") { cx.span_err(*p.last_span, "input operand constraint contains '+'"); } p.expect(&token::LPAREN); let input = p.parse_expr(); p.expect(&token::RPAREN); inputs.push((constraint, input)); } } Clobbers => { let mut clobs = ~[]; while *p.token != token::EOF && *p.token != token::COLON && *p.token != token::MOD_SEP { if clobs.len() != 0 { p.eat(&token::COMMA); } let (s, _str_style) = p.parse_str(); let clob = format!("~\\{{}\\}", s); clobs.push(clob); } cons = clobs.connect(","); } Options => { let (option, _str_style) = p.parse_str(); if "volatile" == option { volatile = true; } else if "alignstack" == option { alignstack = true; } else if "intel" == option { dialect = ast::asm_intel; } if *p.token == token::COMMA { p.eat(&token::COMMA); } } } while *p.token == token::COLON || *p.token == token::MOD_SEP || *p.token == token::EOF { state = if *p.token == token::COLON { p.bump(); match next_state(state) { Some(x) => x, None => { continue_ = false; break } } } else if *p.token == token::MOD_SEP { p.bump(); let s = match next_state(state) { Some(x) => x, None => { continue_ = false; break } }; match next_state(s) { Some(x) => x, None => { continue_ = false; break } } } else if *p.token == token::EOF { continue_ = false; break; } else { state }; } } MRExpr(@ast::Expr { id: ast::DUMMY_NODE_ID, node: ast::ExprInlineAsm(ast::inline_asm { asm: asm, asm_str_style: asm_str_style.unwrap(), clobbers: cons.to_managed(), inputs: inputs, outputs: outputs, volatile: volatile, alignstack: alignstack, dialect: dialect }), span: sp }) }
31
94
0.411886
fc6f365f39329c2e9c3841ebdde034b8caefca7b
4,111
mod _1500_design_a_file_sharing_system; // mod _1502_can_make_arithmetic_progression_from_sequence; // mod _1503_last_moment_before_all_ants_fall_out_of_a_plank; // mod _1504_count_submatrices_with_all_ones; // mod _1507_reformat_date; // mod _1508_range_sum_of_sorted_subarray_sums; // mod _1509_minimum_difference_between_largest_and_smallest_value_in_three_moves; // mod _1510_stone_game_4; // mod _1512_number_of_good_pairs; // mod _1513_number_of_substrings_with_only_1s; // mod _1514_path_with_maximum_probability; // mod _1515_best_position_for_a_service_center; // mod _1518_water_bottles; // mod _1519_number_of_nodes_in_the_sub_tree_with_the_same_label; // mod _1520_maximum_number_of_non_overlapping_substrings; // mod _1521_find_a_value_of_mysterious_function_closest_to_target; // mod _1523_cound_odd_numbers_in_interval_range; // mod _1524_number_of_sub_arrays_with_odd_sum; // mod _1525_number_of_good_ways_to_split_a_string; // mod _1526_minimum_number_of_increments_on_subarrays_to_form_a_target_array; // mod _1528_shuffle_string; // mod _1529_bulb_switcher_5; // mod _1530_number_of_good_leaf_nodes_pairs; // mod _1531_string_compression_2; // mod _1533_find_the_index_of_the_large_integer; // mod _1534_count_good_triplets; // mod _1535_find_the_winner_of_an_array_game; // mod _1536_minimum_swaps_to_arrange_a_binary_grid; // mod _1537_get_the_maximum_score; // mod _1538_guess_the_majority_in_an_hidden_array; // mod _1539_kth_missing_positive_number; // mod _1540_can_convert_string_in_k_moves; // mod _1541_minimum_insertions_to_balance_a_parentheses_string; // mod _1542_find_longest_awesome_substring; // mod _1544_make_the_string_great; // mod _1545_find_kth_bit_in_nth_binary_stringrs; // mod _1546_maximum_number_of_non_overlapping_subarrays_with_sum_equals_target; // mod _1547_minimum_cost_to_cut_a_stick; // mod _1548_the_most_similar_path_in_a_graph; // mod _1550_three_consecutive_odds; // mod _1551_minimum_operations_to_make_array_equalrs; // mod _1552_magnetic_force_between_two_balls; // mod _1553_minimum_number_of_days_to_eat_n_oranges; // mod _1554_strings_differ_by_one_character; // mod _1556_thousand_separator; // mod _1557_minimum_number_of_vertices_to_reach_all_nodes; // mod _1558_minimum_numbers_of_function_calls_to_make_target_array; // mod _1559_detect_cycles_in_2d_grid; // mod _1560_most_visited_sector_in_a_circular_track; // mod _1561_maximum_number_of_coins_you_can_get; // mod _1562_find_latest_group_of_size_m; // mod _1563_stone_game_5; // mod _1564_pub_boxes_into_the_warehouse_1; // mod _1566_detect_pattern_of_length_m_repeated_k_or_more_times; // mod _1567_maximum_length_of_subarray_with_positive_product; // mod _1568_minimum_number_of_days_to_disconnect_island; // mod _1569_number_of_ways_to_reorder_array_to_get_same_bst; // mod _1570_dot_product_of_two_sparse_vectors; // mod _1572_matrix_diagonal_sum; // mod _1573_number_of_ways_to_split_a_string; // mod _1574_shortest_subarray_to_be_removed_to_make_array_sorted; // mod _1575_count_all_possible_routes; // mod _1576_replace_all_to_avoid_consecutive_repeating_characters; // mod _1577_number_of_ways_where_square_of_number_is_equal_to_product_of_two_numbers; // mod _1578_minimum_deleteion_cost_to_avoid_repeating_letters; // mod _1579_remove_max_number_of_edges_to_keep_graph_fully_traversable; // mod _1580_put_boxes_into_the_warehouse_2; // mod _1582_special_positions_in_a_binary_matrix; // mod _1583_count_unhappy_friends; // mod _1584_min_cost_to_connect_all_points; // mod _1585_check_if_string_is_transformable_with_substring_sort_operations; // mod _1586_binary_search_tree_iterator_2; // mod _1588_sum_of_all_odd_length_subarrays; // mod _1589_maximum_sum_obtained_of_any_permutation; // mod _1590_make_sum_divisible_by_p; // mod _1591_strange_printer_2; // mod _1592_rearrange_spaces_between_words; // mod _1593_split_a_string_into_the_max_number_of_unique_substrings; // mod _1594_maximum_non_negative_product_in_a_matrix; // mod _1595_minimum_cost_to_connect_two_groups_of_points; // mod _1598_crawler_log_folder; // mod _1599_maximum_profit_of_operating_a_centennial_wheel;
25.067073
83
0.881051
8f8246c4cc9c265300c6234d9555d38da07861bb
779
use std::env::current_dir; use std::fs::create_dir_all; use cosmwasm_schema::{export_schema, remove_schemas, schema_for}; use queue::contract::{ CountResponse, HandleMsg, InitMsg, Item, ListResponse, QueryMsg, SumResponse, }; fn main() { let mut out_dir = current_dir().unwrap(); out_dir.push("schema"); create_dir_all(&out_dir).unwrap(); remove_schemas(&out_dir).unwrap(); export_schema(&schema_for!(InitMsg), &out_dir); export_schema(&schema_for!(HandleMsg), &out_dir); export_schema(&schema_for!(QueryMsg), &out_dir); export_schema(&schema_for!(Item), &out_dir); export_schema(&schema_for!(CountResponse), &out_dir); export_schema(&schema_for!(SumResponse), &out_dir); export_schema(&schema_for!(ListResponse), &out_dir); }
32.458333
81
0.716303
90a65591718fb6af1bb5f146589675516b487460
1,784
// https://kbknapp.dev/shell-completions/ use clap::IntoApp; use clap_complete::{ shells::{Bash, Elvish, Fish, PowerShell, Zsh}, Generator, }; use std::io::Error; use std::{env, path::PathBuf}; include!("src/cli.rs"); fn main() -> Result<(), Error> { let out_dir = match env::var_os("CARGO_MANIFEST_DIR") { None => return Ok(()), Some(out_dir) => out_dir, }; let app = Cli::command(); let bin_name = app.get_name().to_string(); let shell: Box<dyn Generator> = match env::var("SHELL") { Ok(s) if s.contains("bash") => Box::new(Bash), Ok(s) if s.contains("fish") => Box::new(Fish), Ok(s) if s.contains("zsh") => Box::new(Zsh), Ok(s) if s.contains("elvish") => Box::new(Elvish), Ok(s) if s.contains("powershell") => Box::new(PowerShell), Ok(_) | Err(_) => { println!( "cargo:warning=Your shell could not be detected from the $SHELL environment variable so no shell completions were generated. Check the build.rs file if you want to see how this was generated.", ); println!("cargo:warning=Raise an issue if this doesn't work for you",); return Ok(()); } }; let mut path = PathBuf::from(out_dir); path.set_file_name(shell.file_name(&bin_name)); // Check if tab completions file already exists and return if so if path.is_file() { return Ok(()); } // This is an attempt at being smart. Instead, one could just generate completion scripts for all of the shells in a completions/ directory and have the user choose the appropriate one. shell.generate(&app, &mut std::fs::File::create(path.clone())?); println!( "cargo:warning={} completion file is generated: {path:?}", app.get_name() ); println!("cargo:warning=enable this by running `source {path:?}`"); Ok(()) }
33.037037
201
0.644058
f711839cc6593f8ec5984cae5a64b2fbd48baa44
1,418
//! # 986. Binary tree cameras //! //! Given the root of a binary tree, you may install cameras a any node //! to monitor all adjacent nodes. Return the minimum number of cameras //! required to monitor all nodes use std::{cell::RefCell, rc::Rc}; use crate::collections::TreeNode; pub fn min_camera_cover(root: Option<Rc<RefCell<TreeNode>>>) -> i32 { // Start from one above the root, in case it needs a camera itself let head = Some(Rc::new(RefCell::new(TreeNode { val: 0, left: None, right: root }))); fn helper(root: &Option<Rc<RefCell<TreeNode>>>, cams: &mut i32) -> Cam { use Cam::{IsCamera, NeedCamera, NotReq}; match root { Some(node) => { let (l, r) = (helper(&node.borrow().left, cams), helper(&node.borrow().right, cams)); match (l, r) { (NeedCamera, _) | (_, NeedCamera) => { *cams += 1; IsCamera }, (IsCamera, _) | (_, IsCamera) => NotReq, _ => NeedCamera, } } None => NotReq } } let mut cams = 0; helper(&head, &mut cams); cams } enum Cam { IsCamera, NeedCamera, NotReq } #[test] fn example_1() { let root = TreeNode::root_from_slice(&[ Some(0), Some(0), Some(0), None, None, Some(0), None, None, None ]); assert_eq!(min_camera_cover(root), 1); } #[test] fn example_2() { let root = TreeNode::root_from_slice(&[ Some(0), Some(0), Some(0), Some(0), None, Some(0), None, None, None, None, None ]); assert_eq!(min_camera_cover(root), 2); }
26.259259
89
0.631876
28df9fdee165cdf8fce5a71aec36be90d7dca458
3,716
use std::{ env, fs, io::{self, prelude::*}, path::{Path, PathBuf}, }; use walkdir::WalkDir; use zip::{self, read::ZipFile, ZipArchive}; use crate::{ info, oof, utils::{self, Bytes}, }; pub fn unpack_archive<R>(mut archive: ZipArchive<R>, into: &Path, flags: &oof::Flags) -> crate::Result<Vec<PathBuf>> where R: Read + Seek, { let mut unpacked_files = vec![]; for idx in 0..archive.len() { let mut file = archive.by_index(idx)?; let file_path = match file.enclosed_name() { Some(path) => path.to_owned(), None => continue, }; let file_path = into.join(file_path); if file_path.exists() && !utils::user_wants_to_overwrite(&file_path, flags)? { continue; } check_for_comments(&file); match (&*file.name()).ends_with('/') { _is_dir @ true => { println!("File {} extracted to \"{}\"", idx, file_path.display()); fs::create_dir_all(&file_path)?; } _is_file @ false => { if let Some(path) = file_path.parent() { if !path.exists() { fs::create_dir_all(&path)?; } } info!("{:?} extracted. ({})", file_path.display(), Bytes::new(file.size())); let mut output_file = fs::File::create(&file_path)?; io::copy(&mut file, &mut output_file)?; } } #[cfg(unix)] __unix_set_permissions(&file_path, &file); let file_path = fs::canonicalize(file_path.clone())?; unpacked_files.push(file_path); } Ok(unpacked_files) } pub fn build_archive_from_paths<W>(input_filenames: &[PathBuf], writer: W) -> crate::Result<W> where W: Write + Seek, { let mut writer = zip::ZipWriter::new(writer); let options = zip::write::FileOptions::default(); // Vec of any filename that failed the UTF-8 check let invalid_unicode_filenames: Vec<PathBuf> = input_filenames .iter() .map(|path| (path, path.to_str())) .filter(|(_, x)| x.is_none()) .map(|(a, _)| a.to_path_buf()) .collect(); if !invalid_unicode_filenames.is_empty() { panic!("invalid unicode filenames found, cannot be supported by Zip:\n {:#?}", invalid_unicode_filenames); } for filename in input_filenames { let previous_location = utils::cd_into_same_dir_as(filename)?; // Safe unwrap, input shall be treated before let filename = filename.file_name().unwrap(); for entry in WalkDir::new(filename) { let entry = entry?; let path = &entry.path(); println!("Compressing '{}'.", utils::to_utf(path)); if path.is_dir() { continue; } writer.start_file(path.to_str().unwrap().to_owned(), options)?; // TODO: check if isn't there a function that already does this for us...... // TODO: better error messages let file_bytes = fs::read(entry.path())?; writer.write_all(&*file_bytes)?; } env::set_current_dir(previous_location)?; } let bytes = writer.finish()?; Ok(bytes) } fn check_for_comments(file: &ZipFile) { let comment = file.comment(); if !comment.is_empty() { info!("Found comment in {}: {}", file.name(), comment); } } #[cfg(unix)] fn __unix_set_permissions(file_path: &Path, file: &ZipFile) { use std::os::unix::fs::PermissionsExt; if let Some(mode) = file.unix_mode() { fs::set_permissions(&file_path, fs::Permissions::from_mode(mode)).unwrap(); } }
29.492063
116
0.556243
915f30a548f62bfac7f8b1b9cdf98730b3dffd99
4,698
/* ************************************************************************ ** ** This file is part of rsp2, and is licensed under EITHER the MIT license ** ** or the Apache 2.0 license, at your option. ** ** ** ** http://www.apache.org/licenses/LICENSE-2.0 ** ** http://opensource.org/licenses/MIT ** ** ** ** Be aware that not all of rsp2 is provided under this permissive license, ** ** and that the project as a whole is licensed under the GPL 3.0. ** ** ************************************************************************ */ use std::mem; use super::{V2, V3, V4}; /// Zero-cost transformations from sequences of arrays into sequences of `Vn`. /// /// # Safety /// /// The default impls effectively perform `transmute`, and some of the generic /// impls assume that it is safe to perform pointer casts between Self and `Self::En`. /// (this may be done even on pointers to pointers, or smart pointers and etc.) pub unsafe trait Envee { type En: ?Sized; /// Casts a sequence of arrays into `V2`/`V3`/`V4`s. #[inline(always)] fn envee(self) -> Self::En where Self: Sized, Self::En: Sized { unsafe { mem::transmute_copy(&mem::ManuallyDrop::new(self)) } } /// Borrow a sequence of arrays as `V2`/`V3`/`V4`s. /// /// This method exists for the convenience of autoref. (Contrast with `(&self).envee()`) #[inline(always)] fn envee_ref(&self) -> &Self::En { self.envee() } /// Mutably borrow a sequence of arrays as `V2`/`V3`/`V4`s. /// /// This method exists for the convenience of autoref. (Contrast with `(&mut self).envee()`) #[inline(always)] fn envee_mut(&mut self) -> &mut Self::En { self.envee() } } /// Zero-cost transformations from sequences of `Vn` into sequences of arrays. /// /// # Safety /// /// The default impls effectively perform `transmute`, and some of the generic /// impls assume that it is safe to perform pointer casts between Self and `Self::Un`. /// (this may be done even on pointers to pointers, or smart pointers and etc.) pub unsafe trait Unvee { type Un: ?Sized; /// Casts a sequence of `V2`/`V3`/`V4`s into arrays. #[inline(always)] fn unvee(self) -> Self::Un where Self: Sized, Self::Un: Sized { unsafe { mem::transmute_copy(&mem::ManuallyDrop::new(self)) } } /// Borrow a sequence of `V2`/`V3`/`V4`s as arrays. /// /// This method exists for the convenience of autoref. (Contrast with `(&self).unvee()`) #[inline(always)] fn unvee_ref(&self) -> &Self::Un { self.unvee() } /// Mutably borrow a sequence of `V2`/`V3`/`V4`s as arrays. /// /// This method exists for the convenience of autoref. (Contrast with `(&mut self).unvee()`) #[inline(always)] fn unvee_mut(&mut self) -> &mut Self::Un { self.unvee() } } gen_each!{ @{Vn_n} for_each!( {$Vn:ident $n:tt} ) => { unsafe impl<X> Envee for [[X;$n]] { type En = [$Vn<X>]; } unsafe impl<X> Unvee for [$Vn<X>] { type Un = [[X;$n]]; } unsafe impl<X> Envee for Vec<[X;$n]> { type En = Vec<$Vn<X>>; } unsafe impl<X> Unvee for Vec<$Vn<X>> { type Un = Vec<[X;$n]>; } } } gen_each!{ @{Vn_n} @{0...8} for_each!( {$Vn:ident $n:tt} {$k:tt} ) => { unsafe impl<X> Envee for [[X;$n]; $k] { type En = [$Vn<X>; $k]; } unsafe impl<X> Unvee for [$Vn<X>; $k] { type Un = [[X;$n]; $k]; } } } mod envee_generic_impls { use super::*; use std::rc::{Rc, Weak as RcWeak}; use std::sync::{Arc, Weak as ArcWeak}; use std::cell::RefCell; gen_each!{ [ {Envee En} {Unvee Un} ] for_each!( {$Envee:ident $En:ident} ) => { unsafe impl<'a, V: $Envee + ?Sized> $Envee for &'a V { type $En = &'a V::$En; } unsafe impl<'a, V: $Envee + ?Sized> $Envee for &'a mut V { type $En = &'a mut V::$En; } unsafe impl< V: $Envee + ?Sized> $Envee for Box<V> { type $En = Box<V::$En>; } unsafe impl< V: $Envee + ?Sized> $Envee for Rc<V> { type $En = Rc<V::$En>; } unsafe impl< V: $Envee + ?Sized> $Envee for RcWeak<V> { type $En = RcWeak<V::$En>; } unsafe impl< V: $Envee + ?Sized> $Envee for Arc<V> { type $En = Arc<V::$En>; } unsafe impl< V: $Envee + ?Sized> $Envee for ArcWeak<V> { type $En = ArcWeak<V::$En>; } unsafe impl< V: $Envee + ?Sized> $Envee for RefCell<V> { type $En = RefCell<V::$En>; } } } }
40.852174
101
0.525117
eddcced2d8c8ed52fecfa992f02b6dd9774b9410
10,044
use crate::event::{Callback, Event, EventResult, EventTrigger}; use crate::view::{View, ViewWrapper}; use crate::Cursive; use crate::With; use std::rc::Rc; /// A wrapper view that can react to events. /// /// This view registers a set of callbacks tied to specific events, to be run /// in certain conditions. /// /// * Some callbacks are called only for events ignored by the wrapped view. /// /// (those registered by [`on_event`] or [`on_event_inner`]) /// * Others are processed first, and can control whether the child view should /// be given the event (those registered by [`on_pre_event`] or /// [`on_pre_event_inner`]). /// /// "Inner" callbacks ([`on_event_inner`] and [`on_pre_event_inner`]) are given /// a reference to the inner wrapped view (but not to the `Cursive` root). They /// can then return another callback, taking only a `&mut Cursive` root as /// argument. /// /// "Simple" callbacks ([`on_event`] and [`on_pre_event`]) skip this first /// phase and are only called with a `&mut Cursive`. /// /// [`on_event`]: OnEventView::on_event /// [`on_pre_event`]: OnEventView::on_pre_event /// [`on_event_inner`]: OnEventView::on_event_inner /// [`on_pre_event_inner`]: OnEventView::on_pre_event_inner /// /// # Examples /// /// ``` /// # use cursive::event;; /// # use cursive::views::{OnEventView, TextView}; /// let view = OnEventView::new(TextView::new("This view has an event!")) /// .on_event('q', |s| s.quit()) /// .on_event(event::Key::Esc, |s| s.quit()); /// ``` pub struct OnEventView<T: View> { view: T, callbacks: Vec<(EventTrigger, Action<T>)>, } type InnerCallback<T> = Rc<Box<dyn Fn(&mut T, &Event) -> Option<EventResult>>>; struct Action<T> { phase: TriggerPhase, callback: InnerCallback<T>, } impl<T> Clone for Action<T> { fn clone(&self) -> Self { Action { phase: self.phase.clone(), callback: Rc::clone(&self.callback), } } } #[derive(PartialEq, Clone)] enum TriggerPhase { BeforeChild, AfterChild, } impl<T: View> OnEventView<T> { /// Wraps the given view in a new OnEventView. pub fn new(view: T) -> Self { OnEventView { view, callbacks: Vec::new(), } } /// Registers a callback when the given event is ignored by the child. /// /// Chainable variant. /// /// # Examples /// /// /// ```rust /// # use cursive::views::{OnEventView, DummyView}; /// # use cursive::event::{Key, EventTrigger}; /// let view = OnEventView::new(DummyView) /// .on_event('q', |s| s.quit()) /// .on_event(Key::Esc, |s| { /// s.pop_layer(); /// }) /// .on_event(EventTrigger::mouse(), |s| { /// s.add_layer(DummyView); /// }); /// ``` pub fn on_event<F, E>(self, trigger: E, cb: F) -> Self where E: Into<EventTrigger>, F: 'static + Fn(&mut Cursive), { self.with(|s| s.set_on_event(trigger, cb)) } /// Registers a callback when the given event is received. /// /// The child will never receive this event. /// /// Chainable variant. pub fn on_pre_event<F, E>(self, trigger: E, cb: F) -> Self where E: Into<EventTrigger>, F: 'static + Fn(&mut Cursive), { self.with(|s| s.set_on_pre_event(trigger, cb)) } /// Registers a callback when the given event is received. /// /// The given callback will be run before the child view sees the event. /// /// * If the result is `None`, then the child view is given the event as /// usual. /// * Otherwise, it bypasses the child view and directly processes the /// result. /// /// Chainable variant. pub fn on_pre_event_inner<F, E>(self, trigger: E, cb: F) -> Self where E: Into<EventTrigger>, F: Fn(&mut T, &Event) -> Option<EventResult> + 'static, { self.with(|s| s.set_on_pre_event_inner(trigger, cb)) } /// Registers a callback when the given event is ignored by the child. /// /// This is an advanced method to get more control. /// [`on_event`] may be easier to use. /// /// If the child view ignores the event, `cb` will be called with the /// child view as argument. /// If the result is not `None`, it will be processed as well. /// /// Chainable variant. /// /// [`on_event`]: OnEventView::on_event() /// /// # Examples /// /// ```rust /// # use cursive::views::{DummyView, OnEventView}; /// # use cursive::event::{Event, EventTrigger, MouseEvent, EventResult}; /// let view = OnEventView::new(DummyView) /// .on_event_inner( /// EventTrigger::mouse(), /// |d: &mut DummyView, e: &Event| { /// if let &Event::Mouse { event: MouseEvent::Press(_), .. } = e { /// // Do something on mouse press /// Some(EventResult::with_cb(|s| { /// s.pop_layer(); /// })) /// } else { /// // Otherwise, don't do anything /// None /// } /// } /// ); /// ``` pub fn on_event_inner<F, E>(self, trigger: E, cb: F) -> Self where E: Into<EventTrigger>, F: Fn(&mut T, &Event) -> Option<EventResult> + 'static, { self.with(|s| s.set_on_event_inner(trigger, cb)) } /// Registers a callback when the given event is ignored by the child. pub fn set_on_event<F, E>(&mut self, trigger: E, cb: F) where E: Into<EventTrigger>, F: Fn(&mut Cursive) + 'static, { let cb = Callback::from_fn(cb); let action = move |_: &mut T, _: &Event| { Some(EventResult::Consumed(Some(cb.clone()))) }; self.set_on_event_inner(trigger, action); } /// Registers a callback when the given event is received. /// /// The child will never receive this event. pub fn set_on_pre_event<F, E>(&mut self, trigger: E, cb: F) where E: Into<EventTrigger>, F: 'static + Fn(&mut Cursive), { let cb = Callback::from_fn(cb); // We want to clone the Callback every time we call the closure let action = move |_: &mut T, _: &Event| { Some(EventResult::Consumed(Some(cb.clone()))) }; self.set_on_pre_event_inner(trigger, action); } /// Registers a callback when the given event is received. /// /// The given callback will be run before the child view sees the event. /// /// * If the result is `None`, then the child view is given the event as /// usual. /// * Otherwise, it bypasses the child view and directly processes the /// result. pub fn set_on_pre_event_inner<F, E>(&mut self, trigger: E, cb: F) where E: Into<EventTrigger>, F: Fn(&mut T, &Event) -> Option<EventResult> + 'static, { self.callbacks.push(( trigger.into(), Action { phase: TriggerPhase::BeforeChild, callback: Rc::new(Box::new(cb)), }, )); } /// Registers a callback when the given event is ignored by the child. /// /// If the child view ignores the event, `cb` will be called with the /// child view as argument. /// If the result is not `None`, it will be processed as well. pub fn set_on_event_inner<F, E>(&mut self, trigger: E, cb: F) where E: Into<EventTrigger>, F: Fn(&mut T, &Event) -> Option<EventResult> + 'static, { self.callbacks.push(( trigger.into(), Action { phase: TriggerPhase::AfterChild, callback: Rc::new(Box::new(cb)), }, )); } /// Remove any callbacks defined for this view. pub fn clear_callbacks(&mut self) { self.callbacks.clear(); } inner_getters!(self.view: T); } impl<T: View> ViewWrapper for OnEventView<T> { wrap_impl!(self.view: T); fn wrap_on_event(&mut self, event: Event) -> EventResult { // Until we have better closure capture, define captured members separately. let callbacks = &self.callbacks; let view = &mut self.view; // * First, check all pre-child callbacks. Combine them. // If any gets triggered and returns Some(...), stop right there. // * Otherwise, give the event to the child view. // If it returns EventResult::Consumed, stop right there. // * Finally, check all post-child callbacks. Combine them. // And just return the result. // First step: check pre-child callbacks .iter() .filter(|&(_, action)| action.phase == TriggerPhase::BeforeChild) .filter(|&(trigger, _)| trigger.apply(&event)) .filter_map(|(_, action)| (*action.callback)(view, &event)) .fold(None, |s, r| match s { // Return `Some()` if any pre-callback was present. None => Some(r), Some(c) => Some(c.and(r)), }) .unwrap_or_else(|| { // If it was None, it means no pre-callback was triggered. // So let's give the view a chance! view.on_event(event.clone()) }) .or_else(|| { // No pre-child, and the child itself ignored the event? // Let's have a closer look then, shall we? callbacks .iter() .filter(|&(_, action)| { action.phase == TriggerPhase::AfterChild }) .filter(|&(trigger, _)| trigger.apply(&event)) .filter_map(|(_, action)| (*action.callback)(view, &event)) .fold(EventResult::Ignored, EventResult::and) }) } }
33.258278
84
0.548984
9cba65754585875934bbe097828fac297c05e5fd
1,614
//! This module contains the functions for //! sending a payload //! for a TCP connection. //! //! *This module is available only if MultiCrusty is built with //! the `"transport"` feature or the `"transport_tcp"` feature.* use crate::binary::struct_trait::{send::Send, session::Session}; use std::boxed::Box; use std::error::Error; use std::io::Write; use std::marker; use std::net::TcpStream; use std::panic; type TcpData = [u8; 128]; /// Send a value of type `T` over tcp. Returns the /// continuation of the session `S` and the continuation /// of the TcpStream. May fail. /// /// *This function is available only if MultiCrusty is built with /// the `"transport"` feature or the `"transport_tcp"` feature.* #[cfg_attr( doc_cfg, doc(cfg(any(feature = "transport", feature = "transport_tcp"))) )] pub fn send_tcp<T, S>( x: T, // Need to force x and data to be of the same type every time but for choice/offer data: &TcpData, s: Send<(T, TcpData), S>, mut stream: TcpStream, tcp: bool, ) -> Result<(S, TcpStream), Box<dyn Error>> where T: marker::Send, S: Session, { let (here, there) = S::new(); match s.channel.send(((x, *data), there)) { Ok(()) => { match tcp { true => { // stream.shutdown(Shutdown::Read)?; // TODO: Force stream to be write only. // Needed? stream.write_all(data)?; Ok((here, stream)) } false => Ok((here, stream)), } } Err(e) => panic!("{}", e.to_string()), } }
29.345455
96
0.571871
e4fd66a0537c1a9629a3aa27a07ba995fae7f960
29,323
// Copyright (c) 2011 Jan Kokemüller // Copyright (c) 2020 Sebastian Dröge <[email protected]> // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. use std::fmt; use crate::ebur128::Channel; use crate::utils::Sample; /// BS.1770 filter and optional sample/true peak measurement context. pub struct Filter { channels: u32, /// BS.1770 filter coefficients (numerator). b: [f64; 5], /// BS.1770 filter coefficients (denominator). a: [f64; 5], /// One filter state per channel. filter_state: Box<[[f64; 5]]>, /// Whether to measure sample peak. calculate_sample_peak: bool, /// Previously measured sample peak. sample_peak: Box<[f64]>, /// True peak measurement if enabled. tp: Option<crate::true_peak::TruePeak>, /// Previously measured true peak. true_peak: Box<[f64]>, } impl fmt::Debug for Filter { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Filter") .field("channels", &self.channels) .field("b", &self.b) .field("a", &self.a) .field("filter_state", &self.filter_state) .field("calculate_sample_peak", &self.calculate_sample_peak) .field("sample_peak", &self.sample_peak) .field("calculate_true_peak", &self.tp.is_some()) .field("true_peak", &self.true_peak) .finish() } } #[allow(non_snake_case)] fn filter_coefficients(rate: f64) -> ([f64; 5], [f64; 5]) { let f0 = 1681.974450955533; let G = 3.999843853973347; let Q = 0.7071752369554196; let K = f64::tan(std::f64::consts::PI * f0 / rate); let Vh = f64::powf(10.0, G / 20.0); let Vb = f64::powf(Vh, 0.4996667741545416); let mut pb = [0.0, 0.0, 0.0]; let mut pa = [1.0, 0.0, 0.0]; let rb = [1.0, -2.0, 1.0]; let mut ra = [1.0, 0.0, 0.0]; let a0 = 1.0 + K / Q + K * K; pb[0] = (Vh + Vb * K / Q + K * K) / a0; pb[1] = 2.0 * (K * K - Vh) / a0; pb[2] = (Vh - Vb * K / Q + K * K) / a0; pa[1] = 2.0 * (K * K - 1.0) / a0; pa[2] = (1.0 - K / Q + K * K) / a0; let f0 = 38.13547087602444; let Q = 0.5003270373238773; let K = f64::tan(std::f64::consts::PI * f0 / rate); ra[1] = 2.0 * (K * K - 1.0) / (1.0 + K / Q + K * K); ra[2] = (1.0 - K / Q + K * K) / (1.0 + K / Q + K * K); ( // Numerator [ pb[0] * rb[0], pb[0] * rb[1] + pb[1] * rb[0], pb[0] * rb[2] + pb[1] * rb[1] + pb[2] * rb[0], pb[1] * rb[2] + pb[2] * rb[1], pb[2] * rb[2], ], // Denominator [ pa[0] * ra[0], pa[0] * ra[1] + pa[1] * ra[0], pa[0] * ra[2] + pa[1] * ra[1] + pa[2] * ra[0], pa[1] * ra[2] + pa[2] * ra[1], pa[2] * ra[2], ], ) } impl Filter { pub fn new( rate: u32, channels: u32, calculate_sample_peak: bool, calculate_true_peak: bool, ) -> Self { assert!(rate > 0); assert!(channels > 0); let (b, a) = filter_coefficients(rate as f64); let tp = if calculate_true_peak { crate::true_peak::TruePeak::new(rate, channels) } else { None }; Filter { channels, b, a, filter_state: vec![[0.0; 5]; channels as usize].into_boxed_slice(), calculate_sample_peak, sample_peak: vec![0.0; channels as usize].into_boxed_slice(), tp, true_peak: vec![0.0; channels as usize].into_boxed_slice(), } } pub fn reset_peaks(&mut self) { for v in &mut *self.sample_peak { *v = 0.0; } for v in &mut *self.true_peak { *v = 0.0; } } pub fn reset(&mut self) { self.reset_peaks(); for f in &mut *self.filter_state { // TODO: Use slice::fill() once stabilized for v in &mut *f { *v = 0.0; } } if let Some(ref mut tp) = self.tp { tp.reset(); } } pub fn sample_peak(&self) -> &[f64] { &*self.sample_peak } pub fn true_peak(&self) -> &[f64] { &*self.true_peak } pub fn process<'a, T: Sample + 'a, S: crate::Samples<'a, T>>( &mut self, src: &S, dest: &mut [f64], dest_index: usize, channel_map: &[crate::ebur128::Channel], ) { assert!(dest.len() % self.channels as usize == 0); assert!(channel_map.len() == self.channels as usize); assert!(src.channels() == self.channels as usize); assert!(self.filter_state.len() == self.channels as usize); ftz::with_ftz(|ftz| { if self.calculate_sample_peak { assert!(self.sample_peak.len() == self.channels as usize); for (c, sample_peak) in self.sample_peak.iter_mut().enumerate() { let mut max = 0.0; assert!(c < src.channels()); src.foreach_sample(c, |sample| { let v = sample.as_f64_raw().abs(); if v > max { max = v; } }); max /= T::MAX_AMPLITUDE; if max > *sample_peak { *sample_peak = max; } } } if let Some(ref mut tp) = self.tp { assert!(self.true_peak.len() == self.channels as usize); tp.check_true_peak(src, &mut *self.true_peak); } let dest_stride = dest.len() / self.channels as usize; assert!(dest_index + src.frames() <= dest_stride); for (c, (channel_map, dest)) in channel_map .iter() .zip(dest.chunks_exact_mut(dest_stride)) .enumerate() { if *channel_map == crate::ebur128::Channel::Unused { continue; } assert!(c < src.channels()); let Filter { ref mut filter_state, ref a, ref b, .. } = *self; let filter_state = &mut filter_state[c]; src.foreach_sample_zipped(c, dest[dest_index..].iter_mut(), |src, dest| { filter_state[0] = (*src).to_sample::<f64>() - a[1] * filter_state[1] - a[2] * filter_state[2] - a[3] * filter_state[3] - a[4] * filter_state[4]; *dest = b[0] * filter_state[0] + b[1] * filter_state[1] + b[2] * filter_state[2] + b[3] * filter_state[3] + b[4] * filter_state[4]; filter_state[4] = filter_state[3]; filter_state[3] = filter_state[2]; filter_state[2] = filter_state[1]; filter_state[1] = filter_state[0]; }); if ftz.is_none() { for v in filter_state { if v.abs() < std::f64::EPSILON { *v = 0.0; } } } } }); } pub fn calc_gating_block( frames_per_block: usize, audio_data: &[f64], audio_data_index: usize, channel_map: &[Channel], ) -> f64 { let mut sum = 0.0; let channels = channel_map.len(); assert!(audio_data.len() % channels == 0); let audio_data_stride = audio_data.len() / channels; assert!(audio_data_index <= audio_data_stride); for (c, (channel, audio_data)) in channel_map .iter() .zip(audio_data.chunks_exact(audio_data_stride)) .enumerate() { if *channel == Channel::Unused { continue; } assert!(c < channels); assert!(audio_data_index <= audio_data.len()); let mut channel_sum = 0.0; // XXX: Don't use channel_sum += sum() here because that gives slightly different // results than the C version because of rounding errors if audio_data_index < frames_per_block { for frame in &audio_data[..audio_data_index] { channel_sum += *frame * *frame; } for frame in &audio_data[(audio_data.len() - frames_per_block + audio_data_index)..] { channel_sum += *frame * *frame; } } else { for frame in &audio_data[(audio_data_index - frames_per_block)..audio_data_index] { channel_sum += *frame * *frame; } } match channel { Channel::LeftSurround | Channel::RightSurround | Channel::Mp060 | Channel::Mm060 | Channel::Mp090 | Channel::Mm090 => { channel_sum *= 1.41; } Channel::DualMono => { channel_sum *= 2.0; } _ => (), } sum += channel_sum; } sum /= frames_per_block as f64; sum } } #[cfg(all( any(target_arch = "x86", target_arch = "x86_64"), target_feature = "sse2" ))] mod ftz { #[cfg(target_arch = "x86")] use std::arch::x86::{_mm_getcsr, _mm_setcsr, _MM_FLUSH_ZERO_ON}; #[cfg(target_arch = "x86_64")] use std::arch::x86_64::{_mm_getcsr, _mm_setcsr, _MM_FLUSH_ZERO_ON}; pub struct Ftz(u32); impl Ftz { unsafe fn new() -> Self { let csr = _mm_getcsr(); _mm_setcsr(csr | _MM_FLUSH_ZERO_ON); Ftz(csr) } } impl Drop for Ftz { fn drop(&mut self) { unsafe { _mm_setcsr(self.0); } } } pub fn with_ftz<F: FnOnce(Option<&Ftz>) -> T, T>(func: F) -> T { // Safety: MXCSR is unset in any case when Ftz goes out of scope and the closure also can't // mem::forget() it to prevent running the Drop impl. unsafe { let ftz = Ftz::new(); func(Some(&ftz)) } } } #[cfg(not(any(all( any(target_arch = "x86", target_arch = "x86_64"), target_feature = "sse2" )),))] mod ftz { pub enum Ftz {} pub fn with_ftz<F: FnOnce(Option<&Ftz>) -> T, T>(func: F) -> T { func(None) } } #[cfg(feature = "c-tests")] use std::os::raw::c_void; #[cfg(feature = "c-tests")] extern "C" { pub fn filter_create_c( rate: u32, channels: u32, calculate_sample_peak: i32, calculate_true_peak: i32, ) -> *mut c_void; pub fn filter_reset_peaks_c(filter: *mut c_void); pub fn filter_sample_peak_c(filter: *const c_void) -> *const f64; pub fn filter_true_peak_c(filter: *const c_void) -> *const f64; pub fn filter_process_short_c( filter: *mut c_void, frames: usize, src: *const i16, dest: *mut f64, channel_map: *const u32, ); pub fn filter_process_int_c( filter: *mut c_void, frames: usize, src: *const i32, dest: *mut f64, channel_map: *const u32, ); pub fn filter_process_float_c( filter: *mut c_void, frames: usize, src: *const f32, dest: *mut f64, channel_map: *const u32, ); pub fn filter_process_double_c( filter: *mut c_void, frames: usize, src: *const f64, dest: *mut f64, channel_map: *const u32, ); pub fn filter_destroy_c(filter: *mut c_void); pub fn calc_gating_block_c( frames_per_block: usize, audio_data: *const f64, audio_data_frames: usize, audio_data_index: usize, channel_map: *const u32, channels: usize, ) -> f64; } #[cfg(feature = "c-tests")] #[cfg(test)] mod tests { use super::*; use crate::tests::Signal; use float_eq::assert_float_eq; use quickcheck_macros::quickcheck; #[allow(clippy::too_many_arguments)] fn compare_results( calculate_sample_peak: bool, calculate_true_peak: bool, sp: &[f64], tp: &[f64], sp_c: &[f64], tp_c: &[f64], data_out: &[f64], data_out_c: &[f64], ) { if calculate_sample_peak { for (i, (r, c)) in sp.iter().zip(sp_c.iter()).enumerate() { assert_float_eq!( *r, *c, ulps <= 2, "Rust and C implementation differ at sample peak {}", i ); } } if calculate_true_peak { for (i, (r, c)) in tp.iter().zip(tp_c.iter()).enumerate() { assert_float_eq!( *r, *c, // For a performance-boost, filter is defined as f32, causing slightly lower precision abs <= 0.000004, "Rust and C implementation differ at true peak {}", i ); } } for (i, (r, c)) in data_out.iter().zip(data_out_c.iter()).enumerate() { assert_float_eq!( *r, *c, ulps <= 2, "Rust and C implementation differ at sample {}", i ); } } #[quickcheck] fn compare_c_impl_i16( signal: Signal<i16>, calculate_sample_peak: bool, calculate_true_peak: bool, ) { // Maximum of 400ms but our input is up to 5000ms, so distribute it evenly // by shrinking accordingly. let frames = signal.data.len() / signal.channels as usize; let frames = std::cmp::min(2 * frames / 25, 4 * ((signal.rate as usize + 5) / 10)); let mut data_out = vec![0.0f64; frames * signal.channels as usize]; let mut data_out_c = vec![0.0f64; frames * signal.channels as usize]; let channel_map_c = vec![1; signal.channels as usize]; let channel_map = vec![Channel::Left; signal.channels as usize]; let (sp, tp) = { let mut f = Filter::new( signal.rate, signal.channels, calculate_sample_peak, calculate_true_peak, ); let mut data_out_tmp = vec![0.0f64; frames * signal.channels as usize]; f.process( &crate::Interleaved::new( &signal.data[..(frames * signal.channels as usize)], signal.channels as usize, ) .unwrap(), &mut data_out_tmp, 0, &channel_map, ); for (c, src) in data_out_tmp.chunks_exact(frames).enumerate() { for (i, src) in src.iter().enumerate() { data_out[i * signal.channels as usize + c] = *src; } } (Vec::from(f.sample_peak()), Vec::from(f.true_peak())) }; let (sp_c, tp_c) = unsafe { use std::slice; let f = filter_create_c( signal.rate, signal.channels, if calculate_sample_peak { 1 } else { 0 }, if calculate_true_peak { 1 } else { 0 }, ); filter_process_short_c( f, frames, signal.data[..(frames * signal.channels as usize)].as_ptr(), data_out_c.as_mut_ptr(), channel_map_c.as_ptr(), ); let sp = Vec::from(slice::from_raw_parts( filter_sample_peak_c(f), signal.channels as usize, )); let tp = Vec::from(slice::from_raw_parts( filter_true_peak_c(f), signal.channels as usize, )); filter_destroy_c(f); (sp, tp) }; compare_results( calculate_sample_peak, calculate_true_peak, &sp, &tp, &sp_c, &tp_c, &data_out, &data_out_c, ); } #[quickcheck] fn compare_c_impl_i32( signal: Signal<i32>, calculate_sample_peak: bool, calculate_true_peak: bool, ) { // Maximum of 400ms but our input is up to 5000ms, so distribute it evenly // by shrinking accordingly. let frames = signal.data.len() / signal.channels as usize; let frames = std::cmp::min(2 * frames / 25, 4 * ((signal.rate as usize + 5) / 10)); let mut data_out = vec![0.0f64; frames * signal.channels as usize]; let mut data_out_c = vec![0.0f64; frames * signal.channels as usize]; let channel_map_c = vec![1; signal.channels as usize]; let channel_map = vec![Channel::Left; signal.channels as usize]; let (sp, tp) = { let mut f = Filter::new( signal.rate, signal.channels, calculate_sample_peak, calculate_true_peak, ); let mut data_out_tmp = vec![0.0f64; frames * signal.channels as usize]; f.process( &crate::Interleaved::new( &signal.data[..(frames * signal.channels as usize)], signal.channels as usize, ) .unwrap(), &mut data_out_tmp, 0, &channel_map, ); for (c, src) in data_out_tmp.chunks_exact(frames).enumerate() { for (i, src) in src.iter().enumerate() { data_out[i * signal.channels as usize + c] = *src; } } (Vec::from(f.sample_peak()), Vec::from(f.true_peak())) }; let (sp_c, tp_c) = unsafe { use std::slice; let f = filter_create_c( signal.rate, signal.channels, if calculate_sample_peak { 1 } else { 0 }, if calculate_true_peak { 1 } else { 0 }, ); filter_process_int_c( f, frames, signal.data[..(frames * signal.channels as usize)].as_ptr(), data_out_c.as_mut_ptr(), channel_map_c.as_ptr(), ); let sp = Vec::from(slice::from_raw_parts( filter_sample_peak_c(f), signal.channels as usize, )); let tp = Vec::from(slice::from_raw_parts( filter_true_peak_c(f), signal.channels as usize, )); filter_destroy_c(f); (sp, tp) }; compare_results( calculate_sample_peak, calculate_true_peak, &sp, &tp, &sp_c, &tp_c, &data_out, &data_out_c, ); } #[quickcheck] fn compare_c_impl_f32( signal: Signal<f32>, calculate_sample_peak: bool, calculate_true_peak: bool, ) { // Maximum of 400ms but our input is up to 5000ms, so distribute it evenly // by shrinking accordingly. let frames = signal.data.len() / signal.channels as usize; let frames = std::cmp::min(2 * frames / 25, 4 * ((signal.rate as usize + 5) / 10)); let mut data_out = vec![0.0f64; frames * signal.channels as usize]; let mut data_out_c = vec![0.0f64; frames * signal.channels as usize]; let channel_map_c = vec![1; signal.channels as usize]; let channel_map = vec![Channel::Left; signal.channels as usize]; let (sp, tp) = { let mut f = Filter::new( signal.rate, signal.channels, calculate_sample_peak, calculate_true_peak, ); let mut data_out_tmp = vec![0.0f64; frames * signal.channels as usize]; f.process( &crate::Interleaved::new( &signal.data[..(frames * signal.channels as usize)], signal.channels as usize, ) .unwrap(), &mut data_out_tmp, 0, &channel_map, ); for (c, src) in data_out_tmp.chunks_exact(frames).enumerate() { for (i, src) in src.iter().enumerate() { data_out[i * signal.channels as usize + c] = *src; } } (Vec::from(f.sample_peak()), Vec::from(f.true_peak())) }; let (sp_c, tp_c) = unsafe { use std::slice; let f = filter_create_c( signal.rate, signal.channels, if calculate_sample_peak { 1 } else { 0 }, if calculate_true_peak { 1 } else { 0 }, ); filter_process_float_c( f, frames, signal.data[..(frames * signal.channels as usize)].as_ptr(), data_out_c.as_mut_ptr(), channel_map_c.as_ptr(), ); let sp = Vec::from(slice::from_raw_parts( filter_sample_peak_c(f), signal.channels as usize, )); let tp = Vec::from(slice::from_raw_parts( filter_true_peak_c(f), signal.channels as usize, )); filter_destroy_c(f); (sp, tp) }; compare_results( calculate_sample_peak, calculate_true_peak, &sp, &tp, &sp_c, &tp_c, &data_out, &data_out_c, ); } #[quickcheck] fn compare_c_impl_f64( signal: Signal<f64>, calculate_sample_peak: bool, calculate_true_peak: bool, ) { // Maximum of 400ms but our input is up to 5000ms, so distribute it evenly // by shrinking accordingly. let frames = signal.data.len() / signal.channels as usize; let frames = std::cmp::min(2 * frames / 25, 4 * ((signal.rate as usize + 5) / 10)); let mut data_out = vec![0.0f64; frames * signal.channels as usize]; let mut data_out_c = vec![0.0f64; frames * signal.channels as usize]; let channel_map_c = vec![1; signal.channels as usize]; let channel_map = vec![Channel::Left; signal.channels as usize]; let (sp, tp) = { let mut f = Filter::new( signal.rate, signal.channels, calculate_sample_peak, calculate_true_peak, ); let mut data_out_tmp = vec![0.0f64; frames * signal.channels as usize]; f.process( &crate::Interleaved::new( &signal.data[..(frames * signal.channels as usize)], signal.channels as usize, ) .unwrap(), &mut data_out_tmp, 0, &channel_map, ); for (c, src) in data_out_tmp.chunks_exact(frames).enumerate() { for (i, src) in src.iter().enumerate() { data_out[i * signal.channels as usize + c] = *src; } } (Vec::from(f.sample_peak()), Vec::from(f.true_peak())) }; let (sp_c, tp_c) = unsafe { use std::slice; let f = filter_create_c( signal.rate, signal.channels, if calculate_sample_peak { 1 } else { 0 }, if calculate_true_peak { 1 } else { 0 }, ); filter_process_double_c( f, frames, signal.data[..(frames * signal.channels as usize)].as_ptr(), data_out_c.as_mut_ptr(), channel_map_c.as_ptr(), ); let sp = Vec::from(slice::from_raw_parts( filter_sample_peak_c(f), signal.channels as usize, )); let tp = Vec::from(slice::from_raw_parts( filter_true_peak_c(f), signal.channels as usize, )); filter_destroy_c(f); (sp, tp) }; compare_results( calculate_sample_peak, calculate_true_peak, &sp, &tp, &sp_c, &tp_c, &data_out, &data_out_c, ); } #[derive(Clone, Debug)] struct GatingBlock { frames_per_block: usize, audio_data: Vec<f64>, audio_data_index: usize, channels: u32, } impl quickcheck::Arbitrary for GatingBlock { fn arbitrary<G: quickcheck::Gen>(g: &mut G) -> Self { use rand::Rng; let channels = g.gen_range(1, 16); let rate = 48_000; let samples_in_100ms = (rate + 5) / 10; let (frames_per_block, window) = if g.gen() { (4 * samples_in_100ms, 400) } else { (30 * samples_in_100ms, 3000) }; let mut audio_data_frames = rate * window / 1000; if audio_data_frames % samples_in_100ms != 0 { // round up to multiple of samples_in_100ms audio_data_frames = (audio_data_frames + samples_in_100ms) - (audio_data_frames % samples_in_100ms); } let mut audio_data = vec![0.0; audio_data_frames * channels as usize]; for v in &mut audio_data { *v = g.gen_range(-1.0, 1.0); } let audio_data_index = g.gen_range(0, audio_data_frames) * channels as usize; GatingBlock { frames_per_block, audio_data, audio_data_index, channels, } } } fn default_channel_map_c(channels: u32) -> Vec<u32> { match channels { 4 => vec![1, 2, 4, 5], 5 => vec![1, 2, 3, 4, 5], _ => { let mut v = vec![0; channels as usize]; let set_channels = std::cmp::min(channels as usize, 6); v[0..set_channels].copy_from_slice(&[1, 2, 3, 0, 4, 5][..set_channels]); v } } } #[quickcheck] fn compare_c_impl_calc_gating_block(block: GatingBlock) { let channel_map = crate::ebur128::default_channel_map(block.channels); let channel_map_c = default_channel_map_c(block.channels); let energy = { let mut audio_data = vec![0.0; block.audio_data.len()]; let frames = block.audio_data.len() / block.channels as usize; for (c, dest) in audio_data.chunks_exact_mut(frames).enumerate() { for (i, dest) in dest.iter_mut().enumerate() { *dest = block.audio_data[i * block.channels as usize + c]; } } Filter::calc_gating_block( block.frames_per_block, &audio_data, block.audio_data_index / block.channels as usize, &channel_map, ) }; let energy_c = unsafe { calc_gating_block_c( block.frames_per_block, block.audio_data.as_ptr(), block.audio_data.len() / block.channels as usize, block.audio_data_index, channel_map_c.as_ptr(), block.channels as usize, ) }; assert_float_eq!(energy, energy_c, ulps <= 2); } }
31.09544
106
0.487024
1caff220cb22f7461b741c642d56504c0d2f3f70
105,820
//! A library for build scripts to compile custom C code //! //! This library is intended to be used as a `build-dependencies` entry in //! `Cargo.toml`: //! //! ```toml //! [build-dependencies] //! cc = "1.0" //! ``` //! //! The purpose of this crate is to provide the utility functions necessary to //! compile C code into a static archive which is then linked into a Rust crate. //! Configuration is available through the `Build` struct. //! //! This crate will automatically detect situations such as cross compilation or //! other environment variables set by Cargo and will build code appropriately. //! //! The crate is not limited to C code, it can accept any source code that can //! be passed to a C or C++ compiler. As such, assembly files with extensions //! `.s` (gcc/clang) and `.asm` (MSVC) can also be compiled. //! //! [`Build`]: struct.Build.html //! //! # Parallelism //! //! To parallelize computation, enable the `parallel` feature for the crate. //! //! ```toml //! [build-dependencies] //! cc = { version = "1.0", features = ["parallel"] } //! ``` //! To specify the max number of concurrent compilation jobs, set the `NUM_JOBS` //! environment variable to the desired amount. //! //! Cargo will also set this environment variable when executed with the `-jN` flag. //! //! If `NUM_JOBS` is not set, the `RAYON_NUM_THREADS` environment variable can //! also specify the build parallelism. //! //! # Examples //! //! Use the `Build` struct to compile `src/foo.c`: //! //! ```no_run //! fn main() { //! cc::Build::new() //! .file("src/foo.c") //! .define("FOO", Some("bar")) //! .include("src") //! .compile("foo"); //! } //! ``` #![doc(html_root_url = "https://docs.rs/cc/1.0")] #![cfg_attr(test, deny(warnings))] #![allow(deprecated)] #![deny(missing_docs)] use std::collections::HashMap; use std::env; use std::ffi::{OsStr, OsString}; use std::fmt::{self, Display}; use std::fs; use std::io::{self, BufRead, BufReader, Read, Write}; use std::path::{Path, PathBuf}; use std::process::{Child, Command, Stdio}; use std::sync::{Arc, Mutex}; use std::thread::{self, JoinHandle}; // These modules are all glue to support reading the MSVC version from // the registry and from COM interfaces #[cfg(windows)] mod registry; #[cfg(windows)] #[macro_use] mod winapi; #[cfg(windows)] mod com; #[cfg(windows)] mod setup_config; pub mod windows_registry; /// A builder for compilation of a native static library. /// /// A `Build` is the main type of the `cc` crate and is used to control all the /// various configuration options and such of a compile. You'll find more /// documentation on each method itself. #[derive(Clone, Debug)] pub struct Build { include_directories: Vec<PathBuf>, definitions: Vec<(String, Option<String>)>, objects: Vec<PathBuf>, flags: Vec<String>, flags_supported: Vec<String>, known_flag_support_status: Arc<Mutex<HashMap<String, bool>>>, ar_flags: Vec<String>, no_default_flags: bool, files: Vec<PathBuf>, cpp: bool, cpp_link_stdlib: Option<Option<String>>, cpp_set_stdlib: Option<String>, cuda: bool, target: Option<String>, host: Option<String>, out_dir: Option<PathBuf>, opt_level: Option<String>, debug: Option<bool>, force_frame_pointer: Option<bool>, env: Vec<(OsString, OsString)>, compiler: Option<PathBuf>, archiver: Option<PathBuf>, cargo_metadata: bool, pic: Option<bool>, use_plt: Option<bool>, static_crt: Option<bool>, shared_flag: Option<bool>, static_flag: Option<bool>, warnings_into_errors: bool, warnings: Option<bool>, extra_warnings: Option<bool>, env_cache: Arc<Mutex<HashMap<String, Option<String>>>>, } /// Represents the types of errors that may occur while using cc-rs. #[derive(Clone, Debug)] enum ErrorKind { /// Error occurred while performing I/O. IOError, /// Invalid architecture supplied. ArchitectureInvalid, /// Environment variable not found, with the var in question as extra info. EnvVarNotFound, /// Error occurred while using external tools (ie: invocation of compiler). ToolExecError, /// Error occurred due to missing external tools. ToolNotFound, } /// Represents an internal error that occurred, with an explanation. #[derive(Clone, Debug)] pub struct Error { /// Describes the kind of error that occurred. kind: ErrorKind, /// More explanation of error that occurred. message: String, } impl Error { fn new(kind: ErrorKind, message: &str) -> Error { Error { kind: kind, message: message.to_owned(), } } } impl From<io::Error> for Error { fn from(e: io::Error) -> Error { Error::new(ErrorKind::IOError, &format!("{}", e)) } } impl Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{:?}: {}", self.kind, self.message) } } impl std::error::Error for Error {} /// Configuration used to represent an invocation of a C compiler. /// /// This can be used to figure out what compiler is in use, what the arguments /// to it are, and what the environment variables look like for the compiler. /// This can be used to further configure other build systems (e.g. forward /// along CC and/or CFLAGS) or the `to_command` method can be used to run the /// compiler itself. #[derive(Clone, Debug)] pub struct Tool { path: PathBuf, cc_wrapper_path: Option<PathBuf>, cc_wrapper_args: Vec<OsString>, args: Vec<OsString>, env: Vec<(OsString, OsString)>, family: ToolFamily, cuda: bool, removed_args: Vec<OsString>, } /// Represents the family of tools this tool belongs to. /// /// Each family of tools differs in how and what arguments they accept. /// /// Detection of a family is done on best-effort basis and may not accurately reflect the tool. #[derive(Copy, Clone, Debug, PartialEq)] enum ToolFamily { /// Tool is GNU Compiler Collection-like. Gnu, /// Tool is Clang-like. It differs from the GCC in a sense that it accepts superset of flags /// and its cross-compilation approach is different. Clang, /// Tool is the MSVC cl.exe. Msvc { clang_cl: bool }, } impl ToolFamily { /// What the flag to request debug info for this family of tools look like fn add_debug_flags(&self, cmd: &mut Tool) { match *self { ToolFamily::Msvc { .. } => { cmd.push_cc_arg("-Z7".into()); } ToolFamily::Gnu | ToolFamily::Clang => { cmd.push_cc_arg("-g".into()); } } } /// What the flag to force frame pointers. fn add_force_frame_pointer(&self, cmd: &mut Tool) { match *self { ToolFamily::Gnu | ToolFamily::Clang => { cmd.push_cc_arg("-fno-omit-frame-pointer".into()); } _ => (), } } /// What the flags to enable all warnings fn warnings_flags(&self) -> &'static str { match *self { ToolFamily::Msvc { .. } => "-W4", ToolFamily::Gnu | ToolFamily::Clang => "-Wall", } } /// What the flags to enable extra warnings fn extra_warnings_flags(&self) -> Option<&'static str> { match *self { ToolFamily::Msvc { .. } => None, ToolFamily::Gnu | ToolFamily::Clang => Some("-Wextra"), } } /// What the flag to turn warning into errors fn warnings_to_errors_flag(&self) -> &'static str { match *self { ToolFamily::Msvc { .. } => "-WX", ToolFamily::Gnu | ToolFamily::Clang => "-Werror", } } fn verbose_stderr(&self) -> bool { *self == ToolFamily::Clang } } /// Represents an object. /// /// This is a source file -> object file pair. #[derive(Clone, Debug)] struct Object { src: PathBuf, dst: PathBuf, } impl Object { /// Create a new source file -> object file pair. fn new(src: PathBuf, dst: PathBuf) -> Object { Object { src: src, dst: dst } } } impl Build { /// Construct a new instance of a blank set of configuration. /// /// This builder is finished with the [`compile`] function. /// /// [`compile`]: struct.Build.html#method.compile pub fn new() -> Build { Build { include_directories: Vec::new(), definitions: Vec::new(), objects: Vec::new(), flags: Vec::new(), flags_supported: Vec::new(), known_flag_support_status: Arc::new(Mutex::new(HashMap::new())), ar_flags: Vec::new(), no_default_flags: false, files: Vec::new(), shared_flag: None, static_flag: None, cpp: false, cpp_link_stdlib: None, cpp_set_stdlib: None, cuda: false, target: None, host: None, out_dir: None, opt_level: None, debug: None, force_frame_pointer: None, env: Vec::new(), compiler: None, archiver: None, cargo_metadata: true, pic: None, use_plt: None, static_crt: None, warnings: None, extra_warnings: None, warnings_into_errors: false, env_cache: Arc::new(Mutex::new(HashMap::new())), } } /// Add a directory to the `-I` or include path for headers /// /// # Example /// /// ```no_run /// use std::path::Path; /// /// let library_path = Path::new("/path/to/library"); /// /// cc::Build::new() /// .file("src/foo.c") /// .include(library_path) /// .include("src") /// .compile("foo"); /// ``` pub fn include<P: AsRef<Path>>(&mut self, dir: P) -> &mut Build { self.include_directories.push(dir.as_ref().to_path_buf()); self } /// Specify a `-D` variable with an optional value. /// /// # Example /// /// ```no_run /// cc::Build::new() /// .file("src/foo.c") /// .define("FOO", "BAR") /// .define("BAZ", None) /// .compile("foo"); /// ``` pub fn define<'a, V: Into<Option<&'a str>>>(&mut self, var: &str, val: V) -> &mut Build { self.definitions .push((var.to_string(), val.into().map(|s| s.to_string()))); self } /// Add an arbitrary object file to link in pub fn object<P: AsRef<Path>>(&mut self, obj: P) -> &mut Build { self.objects.push(obj.as_ref().to_path_buf()); self } /// Add an arbitrary flag to the invocation of the compiler /// /// # Example /// /// ```no_run /// cc::Build::new() /// .file("src/foo.c") /// .flag("-ffunction-sections") /// .compile("foo"); /// ``` pub fn flag(&mut self, flag: &str) -> &mut Build { self.flags.push(flag.to_string()); self } /// Add an arbitrary flag to the invocation of the compiler /// /// # Example /// /// ```no_run /// cc::Build::new() /// .file("src/foo.c") /// .file("src/bar.c") /// .ar_flag("/NODEFAULTLIB:libc.dll") /// .compile("foo"); /// ``` pub fn ar_flag(&mut self, flag: &str) -> &mut Build { self.ar_flags.push(flag.to_string()); self } fn ensure_check_file(&self) -> Result<PathBuf, Error> { let out_dir = self.get_out_dir()?; let src = if self.cuda { assert!(self.cpp); out_dir.join("flag_check.cu") } else if self.cpp { out_dir.join("flag_check.cpp") } else { out_dir.join("flag_check.c") }; if !src.exists() { let mut f = fs::File::create(&src)?; write!(f, "int main(void) {{ return 0; }}")?; } Ok(src) } /// Run the compiler to test if it accepts the given flag. /// /// For a convenience method for setting flags conditionally, /// see `flag_if_supported()`. /// /// It may return error if it's unable to run the compilier with a test file /// (e.g. the compiler is missing or a write to the `out_dir` failed). /// /// Note: Once computed, the result of this call is stored in the /// `known_flag_support` field. If `is_flag_supported(flag)` /// is called again, the result will be read from the hash table. pub fn is_flag_supported(&self, flag: &str) -> Result<bool, Error> { let mut known_status = self.known_flag_support_status.lock().unwrap(); if let Some(is_supported) = known_status.get(flag).cloned() { return Ok(is_supported); } let out_dir = self.get_out_dir()?; let src = self.ensure_check_file()?; let obj = out_dir.join("flag_check"); let target = self.get_target()?; let host = self.get_host()?; let mut cfg = Build::new(); cfg.flag(flag) .target(&target) .opt_level(0) .host(&host) .debug(false) .cpp(self.cpp) .cuda(self.cuda); let mut compiler = cfg.try_get_compiler()?; // Clang uses stderr for verbose output, which yields a false positive // result if the CFLAGS/CXXFLAGS include -v to aid in debugging. if compiler.family.verbose_stderr() { compiler.remove_arg("-v".into()); } let mut cmd = compiler.to_command(); let is_arm = target.contains("aarch64") || target.contains("arm"); let clang = compiler.family == ToolFamily::Clang; command_add_output_file( &mut cmd, &obj, self.cuda, target.contains("msvc"), clang, false, is_arm, ); // We need to explicitly tell msvc not to link and create an exe // in the root directory of the crate if target.contains("msvc") && !self.cuda { cmd.arg("-c"); } cmd.arg(&src); let output = cmd.output()?; let is_supported = output.stderr.is_empty(); known_status.insert(flag.to_owned(), is_supported); Ok(is_supported) } /// Add an arbitrary flag to the invocation of the compiler if it supports it /// /// # Example /// /// ```no_run /// cc::Build::new() /// .file("src/foo.c") /// .flag_if_supported("-Wlogical-op") // only supported by GCC /// .flag_if_supported("-Wunreachable-code") // only supported by clang /// .compile("foo"); /// ``` pub fn flag_if_supported(&mut self, flag: &str) -> &mut Build { self.flags_supported.push(flag.to_string()); self } /// Set the `-shared` flag. /// /// When enabled, the compiler will produce a shared object which can /// then be linked with other objects to form an executable. /// /// # Example /// /// ```no_run /// cc::Build::new() /// .file("src/foo.c") /// .shared_flag(true) /// .compile("libfoo.so"); /// ``` pub fn shared_flag(&mut self, shared_flag: bool) -> &mut Build { self.shared_flag = Some(shared_flag); self } /// Set the `-static` flag. /// /// When enabled on systems that support dynamic linking, this prevents /// linking with the shared libraries. /// /// # Example /// /// ```no_run /// cc::Build::new() /// .file("src/foo.c") /// .shared_flag(true) /// .static_flag(true) /// .compile("foo"); /// ``` pub fn static_flag(&mut self, static_flag: bool) -> &mut Build { self.static_flag = Some(static_flag); self } /// Disables the generation of default compiler flags. The default compiler /// flags may cause conflicts in some cross compiling scenarios. /// /// Setting the `CRATE_CC_NO_DEFAULTS` environment variable has the same /// effect as setting this to `true`. The presence of the environment /// variable and the value of `no_default_flags` will be OR'd together. pub fn no_default_flags(&mut self, no_default_flags: bool) -> &mut Build { self.no_default_flags = no_default_flags; self } /// Add a file which will be compiled pub fn file<P: AsRef<Path>>(&mut self, p: P) -> &mut Build { self.files.push(p.as_ref().to_path_buf()); self } /// Add files which will be compiled pub fn files<P>(&mut self, p: P) -> &mut Build where P: IntoIterator, P::Item: AsRef<Path>, { for file in p.into_iter() { self.file(file); } self } /// Set C++ support. /// /// The other `cpp_*` options will only become active if this is set to /// `true`. pub fn cpp(&mut self, cpp: bool) -> &mut Build { self.cpp = cpp; self } /// Set CUDA C++ support. /// /// Enabling CUDA will pass the detected C/C++ toolchain as an argument to /// the CUDA compiler, NVCC. NVCC itself accepts some limited GNU-like args; /// any other arguments for the C/C++ toolchain will be redirected using /// "-Xcompiler" flags. /// /// If enabled, this also implicitly enables C++ support. pub fn cuda(&mut self, cuda: bool) -> &mut Build { self.cuda = cuda; if cuda { self.cpp = true; } self } /// Set warnings into errors flag. /// /// Disabled by default. /// /// Warning: turning warnings into errors only make sense /// if you are a developer of the crate using cc-rs. /// Some warnings only appear on some architecture or /// specific version of the compiler. Any user of this crate, /// or any other crate depending on it, could fail during /// compile time. /// /// # Example /// /// ```no_run /// cc::Build::new() /// .file("src/foo.c") /// .warnings_into_errors(true) /// .compile("libfoo.a"); /// ``` pub fn warnings_into_errors(&mut self, warnings_into_errors: bool) -> &mut Build { self.warnings_into_errors = warnings_into_errors; self } /// Set warnings flags. /// /// Adds some flags: /// - "-Wall" for MSVC. /// - "-Wall", "-Wextra" for GNU and Clang. /// /// Enabled by default. /// /// # Example /// /// ```no_run /// cc::Build::new() /// .file("src/foo.c") /// .warnings(false) /// .compile("libfoo.a"); /// ``` pub fn warnings(&mut self, warnings: bool) -> &mut Build { self.warnings = Some(warnings); self.extra_warnings = Some(warnings); self } /// Set extra warnings flags. /// /// Adds some flags: /// - nothing for MSVC. /// - "-Wextra" for GNU and Clang. /// /// Enabled by default. /// /// # Example /// /// ```no_run /// // Disables -Wextra, -Wall remains enabled: /// cc::Build::new() /// .file("src/foo.c") /// .extra_warnings(false) /// .compile("libfoo.a"); /// ``` pub fn extra_warnings(&mut self, warnings: bool) -> &mut Build { self.extra_warnings = Some(warnings); self } /// Set the standard library to link against when compiling with C++ /// support. /// /// The default value of this property depends on the current target: On /// OS X `Some("c++")` is used, when compiling for a Visual Studio based /// target `None` is used and for other targets `Some("stdc++")` is used. /// If the `CXXSTDLIB` environment variable is set, its value will /// override the default value. /// /// A value of `None` indicates that no automatic linking should happen, /// otherwise cargo will link against the specified library. /// /// The given library name must not contain the `lib` prefix. /// /// Common values: /// - `stdc++` for GNU /// - `c++` for Clang /// /// # Example /// /// ```no_run /// cc::Build::new() /// .file("src/foo.c") /// .shared_flag(true) /// .cpp_link_stdlib("stdc++") /// .compile("libfoo.so"); /// ``` pub fn cpp_link_stdlib<'a, V: Into<Option<&'a str>>>( &mut self, cpp_link_stdlib: V, ) -> &mut Build { self.cpp_link_stdlib = Some(cpp_link_stdlib.into().map(|s| s.into())); self } /// Force the C++ compiler to use the specified standard library. /// /// Setting this option will automatically set `cpp_link_stdlib` to the same /// value. /// /// The default value of this option is always `None`. /// /// This option has no effect when compiling for a Visual Studio based /// target. /// /// This option sets the `-stdlib` flag, which is only supported by some /// compilers (clang, icc) but not by others (gcc). The library will not /// detect which compiler is used, as such it is the responsibility of the /// caller to ensure that this option is only used in conjuction with a /// compiler which supports the `-stdlib` flag. /// /// A value of `None` indicates that no specific C++ standard library should /// be used, otherwise `-stdlib` is added to the compile invocation. /// /// The given library name must not contain the `lib` prefix. /// /// Common values: /// - `stdc++` for GNU /// - `c++` for Clang /// /// # Example /// /// ```no_run /// cc::Build::new() /// .file("src/foo.c") /// .cpp_set_stdlib("c++") /// .compile("libfoo.a"); /// ``` pub fn cpp_set_stdlib<'a, V: Into<Option<&'a str>>>( &mut self, cpp_set_stdlib: V, ) -> &mut Build { let cpp_set_stdlib = cpp_set_stdlib.into(); self.cpp_set_stdlib = cpp_set_stdlib.map(|s| s.into()); self.cpp_link_stdlib(cpp_set_stdlib); self } /// Configures the target this configuration will be compiling for. /// /// This option is automatically scraped from the `TARGET` environment /// variable by build scripts, so it's not required to call this function. /// /// # Example /// /// ```no_run /// cc::Build::new() /// .file("src/foo.c") /// .target("aarch64-linux-android") /// .compile("foo"); /// ``` pub fn target(&mut self, target: &str) -> &mut Build { self.target = Some(target.to_string()); self } /// Configures the host assumed by this configuration. /// /// This option is automatically scraped from the `HOST` environment /// variable by build scripts, so it's not required to call this function. /// /// # Example /// /// ```no_run /// cc::Build::new() /// .file("src/foo.c") /// .host("arm-linux-gnueabihf") /// .compile("foo"); /// ``` pub fn host(&mut self, host: &str) -> &mut Build { self.host = Some(host.to_string()); self } /// Configures the optimization level of the generated object files. /// /// This option is automatically scraped from the `OPT_LEVEL` environment /// variable by build scripts, so it's not required to call this function. pub fn opt_level(&mut self, opt_level: u32) -> &mut Build { self.opt_level = Some(opt_level.to_string()); self } /// Configures the optimization level of the generated object files. /// /// This option is automatically scraped from the `OPT_LEVEL` environment /// variable by build scripts, so it's not required to call this function. pub fn opt_level_str(&mut self, opt_level: &str) -> &mut Build { self.opt_level = Some(opt_level.to_string()); self } /// Configures whether the compiler will emit debug information when /// generating object files. /// /// This option is automatically scraped from the `DEBUG` environment /// variable by build scripts, so it's not required to call this function. pub fn debug(&mut self, debug: bool) -> &mut Build { self.debug = Some(debug); self } /// Configures whether the compiler will emit instructions to store /// frame pointers during codegen. /// /// This option is automatically enabled when debug information is emitted. /// Otherwise the target platform compiler's default will be used. /// You can use this option to force a specific setting. pub fn force_frame_pointer(&mut self, force: bool) -> &mut Build { self.force_frame_pointer = Some(force); self } /// Configures the output directory where all object files and static /// libraries will be located. /// /// This option is automatically scraped from the `OUT_DIR` environment /// variable by build scripts, so it's not required to call this function. pub fn out_dir<P: AsRef<Path>>(&mut self, out_dir: P) -> &mut Build { self.out_dir = Some(out_dir.as_ref().to_owned()); self } /// Configures the compiler to be used to produce output. /// /// This option is automatically determined from the target platform or a /// number of environment variables, so it's not required to call this /// function. pub fn compiler<P: AsRef<Path>>(&mut self, compiler: P) -> &mut Build { self.compiler = Some(compiler.as_ref().to_owned()); self } /// Configures the tool used to assemble archives. /// /// This option is automatically determined from the target platform or a /// number of environment variables, so it's not required to call this /// function. pub fn archiver<P: AsRef<Path>>(&mut self, archiver: P) -> &mut Build { self.archiver = Some(archiver.as_ref().to_owned()); self } /// Define whether metadata should be emitted for cargo allowing it to /// automatically link the binary. Defaults to `true`. /// /// The emitted metadata is: /// /// - `rustc-link-lib=static=`*compiled lib* /// - `rustc-link-search=native=`*target folder* /// - When target is MSVC, the ATL-MFC libs are added via `rustc-link-search=native=` /// - When C++ is enabled, the C++ stdlib is added via `rustc-link-lib` /// pub fn cargo_metadata(&mut self, cargo_metadata: bool) -> &mut Build { self.cargo_metadata = cargo_metadata; self } /// Configures whether the compiler will emit position independent code. /// /// This option defaults to `false` for `windows-gnu` and bare metal targets and /// to `true` for all other targets. pub fn pic(&mut self, pic: bool) -> &mut Build { self.pic = Some(pic); self } /// Configures whether the Procedure Linkage Table is used for indirect /// calls into shared libraries. /// /// The PLT is used to provide features like lazy binding, but introduces /// a small performance loss due to extra pointer indirection. Setting /// `use_plt` to `false` can provide a small performance increase. /// /// Note that skipping the PLT requires a recent version of GCC/Clang. /// /// This only applies to ELF targets. It has no effect on other platforms. pub fn use_plt(&mut self, use_plt: bool) -> &mut Build { self.use_plt = Some(use_plt); self } /// Configures whether the /MT flag or the /MD flag will be passed to msvc build tools. /// /// This option defaults to `false`, and affect only msvc targets. pub fn static_crt(&mut self, static_crt: bool) -> &mut Build { self.static_crt = Some(static_crt); self } #[doc(hidden)] pub fn __set_env<A, B>(&mut self, a: A, b: B) -> &mut Build where A: AsRef<OsStr>, B: AsRef<OsStr>, { self.env .push((a.as_ref().to_owned(), b.as_ref().to_owned())); self } /// Run the compiler, generating the file `output` /// /// This will return a result instead of panicing; see compile() for the complete description. pub fn try_compile(&self, output: &str) -> Result<(), Error> { let (lib_name, gnu_lib_name) = if output.starts_with("lib") && output.ends_with(".a") { (&output[3..output.len() - 2], output.to_owned()) } else { let mut gnu = String::with_capacity(5 + output.len()); gnu.push_str("lib"); gnu.push_str(&output); gnu.push_str(".a"); (output, gnu) }; let dst = self.get_out_dir()?; let mut objects = Vec::new(); for file in self.files.iter() { let obj = dst.join(file).with_extension("o"); let obj = if !obj.starts_with(&dst) { dst.join(obj.file_name().ok_or_else(|| { Error::new(ErrorKind::IOError, "Getting object file details failed.") })?) } else { obj }; match obj.parent() { Some(s) => fs::create_dir_all(s)?, None => { return Err(Error::new( ErrorKind::IOError, "Getting object file details failed.", )); } }; objects.push(Object::new(file.to_path_buf(), obj)); } self.compile_objects(&objects)?; self.assemble(lib_name, &dst.join(gnu_lib_name), &objects)?; if self.get_target()?.contains("msvc") { let compiler = self.get_base_compiler()?; let atlmfc_lib = compiler .env() .iter() .find(|&&(ref var, _)| var.as_os_str() == OsStr::new("LIB")) .and_then(|&(_, ref lib_paths)| { env::split_paths(lib_paths).find(|path| { let sub = Path::new("atlmfc/lib"); path.ends_with(sub) || path.parent().map_or(false, |p| p.ends_with(sub)) }) }); if let Some(atlmfc_lib) = atlmfc_lib { self.print(&format!( "cargo:rustc-link-search=native={}", atlmfc_lib.display() )); } } self.print(&format!("cargo:rustc-link-lib=static={}", lib_name)); self.print(&format!("cargo:rustc-link-search=native={}", dst.display())); // Add specific C++ libraries, if enabled. if self.cpp { if let Some(stdlib) = self.get_cpp_link_stdlib()? { self.print(&format!("cargo:rustc-link-lib={}", stdlib)); } } Ok(()) } /// Run the compiler, generating the file `output` /// /// The name `output` should be the name of the library. For backwards compatibility, /// the `output` may start with `lib` and end with `.a`. The Rust compilier will create /// the assembly with the lib prefix and .a extension. MSVC will create a file without prefix, /// ending with `.lib`. /// /// # Panics /// /// Panics if `output` is not formatted correctly or if one of the underlying /// compiler commands fails. It can also panic if it fails reading file names /// or creating directories. pub fn compile(&self, output: &str) { if let Err(e) = self.try_compile(output) { fail(&e.message); } } #[cfg(feature = "parallel")] fn compile_objects<'me>(&'me self, objs: &[Object]) -> Result<(), Error> { use std::sync::atomic::{AtomicBool, Ordering::SeqCst}; use std::sync::Once; // Limit our parallelism globally with a jobserver. Start off by // releasing our own token for this process so we can have a bit of an // easier to write loop below. If this fails, though, then we're likely // on Windows with the main implicit token, so we just have a bit extra // parallelism for a bit and don't reacquire later. let server = jobserver(); let reacquire = server.release_raw().is_ok(); // When compiling objects in parallel we do a few dirty tricks to speed // things up: // // * First is that we use the `jobserver` crate to limit the parallelism // of this build script. The `jobserver` crate will use a jobserver // configured by Cargo for build scripts to ensure that parallelism is // coordinated across C compilations and Rust compilations. Before we // compile anything we make sure to wait until we acquire a token. // // Note that this jobserver is cached globally so we only used one per // process and only worry about creating it once. // // * Next we use a raw `thread::spawn` per thread to actually compile // objects in parallel. We only actually spawn a thread after we've // acquired a token to perform some work // // * Finally though we want to keep the dependencies of this crate // pretty light, so we avoid using a safe abstraction like `rayon` and // instead rely on some bits of `unsafe` code. We know that this stack // frame persists while everything is compiling so we use all the // stack-allocated objects without cloning/reallocating. We use a // transmute to `State` with a `'static` lifetime to persist // everything we need across the boundary, and the join-on-drop // semantics of `JoinOnDrop` should ensure that our stack frame is // alive while threads are alive. // // With all that in mind we compile all objects in a loop here, after we // acquire the appropriate tokens, Once all objects have been compiled // we join on all the threads and propagate the results of compilation. // // Note that as a slight optimization we try to break out as soon as // possible as soon as any compilation fails to ensure that errors get // out to the user as fast as possible. let error = AtomicBool::new(false); let mut threads = Vec::new(); for obj in objs { if error.load(SeqCst) { break; } let token = server.acquire()?; let state = State { build: self, obj, error: &error, }; let state = unsafe { std::mem::transmute::<State, State<'static>>(state) }; let thread = thread::spawn(|| { let state: State<'me> = state; // erase the `'static` lifetime let result = state.build.compile_object(state.obj); if result.is_err() { state.error.store(true, SeqCst); } drop(token); // make sure our jobserver token is released after the compile return result; }); threads.push(JoinOnDrop(Some(thread))); } for mut thread in threads { if let Some(thread) = thread.0.take() { thread.join().expect("thread should not panic")?; } } // Reacquire our process's token before we proceed, which we released // before entering the loop above. if reacquire { server.acquire_raw()?; } return Ok(()); /// Shared state from the parent thread to the child thread. This /// package of pointers is temporarily transmuted to a `'static` /// lifetime to cross the thread boundary and then once the thread is /// running we erase the `'static` to go back to an anonymous lifetime. struct State<'a> { build: &'a Build, obj: &'a Object, error: &'a AtomicBool, } /// Returns a suitable `jobserver::Client` used to coordinate /// parallelism between build scripts. fn jobserver() -> &'static jobserver::Client { static INIT: Once = Once::new(); static mut JOBSERVER: Option<jobserver::Client> = None; fn _assert_sync<T: Sync>() {} _assert_sync::<jobserver::Client>(); unsafe { INIT.call_once(|| { let server = default_jobserver(); JOBSERVER = Some(server); }); JOBSERVER.as_ref().unwrap() } } unsafe fn default_jobserver() -> jobserver::Client { // Try to use the environmental jobserver which Cargo typically // initializes for us... if let Some(client) = jobserver::Client::from_env() { return client; } // ... but if that fails for whatever reason select something // reasonable and crate a new jobserver. Use `NUM_JOBS` if set (it's // configured by Cargo) and otherwise just fall back to a // semi-reasonable number. Note that we could use `num_cpus` here // but it's an extra dependency that will almost never be used, so // it's generally not too worth it. let mut parallelism = 4; if let Ok(amt) = env::var("NUM_JOBS") { if let Ok(amt) = amt.parse() { parallelism = amt; } } // If we create our own jobserver then be sure to reserve one token // for ourselves. let client = jobserver::Client::new(parallelism).expect("failed to create jobserver"); client.acquire_raw().expect("failed to acquire initial"); return client; } struct JoinOnDrop(Option<thread::JoinHandle<Result<(), Error>>>); impl Drop for JoinOnDrop { fn drop(&mut self) { if let Some(thread) = self.0.take() { drop(thread.join()); } } } } #[cfg(not(feature = "parallel"))] fn compile_objects(&self, objs: &[Object]) -> Result<(), Error> { for obj in objs { self.compile_object(obj)?; } Ok(()) } fn compile_object(&self, obj: &Object) -> Result<(), Error> { let is_asm = obj.src.extension().and_then(|s| s.to_str()) == Some("asm"); let target = self.get_target()?; let msvc = target.contains("msvc"); let compiler = self.try_get_compiler()?; let clang = compiler.family == ToolFamily::Clang; let (mut cmd, name) = if msvc && is_asm { self.msvc_macro_assembler()? } else { let mut cmd = compiler.to_command(); for &(ref a, ref b) in self.env.iter() { cmd.env(a, b); } ( cmd, compiler .path .file_name() .ok_or_else(|| Error::new(ErrorKind::IOError, "Failed to get compiler path."))? .to_string_lossy() .into_owned(), ) }; let is_arm = target.contains("aarch64") || target.contains("arm"); command_add_output_file(&mut cmd, &obj.dst, self.cuda, msvc, clang, is_asm, is_arm); // armasm and armasm64 don't requrie -c option if !msvc || !is_asm || !is_arm { cmd.arg("-c"); } cmd.arg(&obj.src); run(&mut cmd, &name)?; Ok(()) } /// This will return a result instead of panicing; see expand() for the complete description. pub fn try_expand(&self) -> Result<Vec<u8>, Error> { let compiler = self.try_get_compiler()?; let mut cmd = compiler.to_command(); for &(ref a, ref b) in self.env.iter() { cmd.env(a, b); } cmd.arg("-E"); assert!( self.files.len() <= 1, "Expand may only be called for a single file" ); for file in self.files.iter() { cmd.arg(file); } let name = compiler .path .file_name() .ok_or_else(|| Error::new(ErrorKind::IOError, "Failed to get compiler path."))? .to_string_lossy() .into_owned(); Ok(run_output(&mut cmd, &name)?) } /// Run the compiler, returning the macro-expanded version of the input files. /// /// This is only relevant for C and C++ files. /// /// # Panics /// Panics if more than one file is present in the config, or if compiler /// path has an invalid file name. /// /// # Example /// ```no_run /// let out = cc::Build::new().file("src/foo.c").expand(); /// ``` pub fn expand(&self) -> Vec<u8> { match self.try_expand() { Err(e) => fail(&e.message), Ok(v) => v, } } /// Get the compiler that's in use for this configuration. /// /// This function will return a `Tool` which represents the culmination /// of this configuration at a snapshot in time. The returned compiler can /// be inspected (e.g. the path, arguments, environment) to forward along to /// other tools, or the `to_command` method can be used to invoke the /// compiler itself. /// /// This method will take into account all configuration such as debug /// information, optimization level, include directories, defines, etc. /// Additionally, the compiler binary in use follows the standard /// conventions for this path, e.g. looking at the explicitly set compiler, /// environment variables (a number of which are inspected here), and then /// falling back to the default configuration. /// /// # Panics /// /// Panics if an error occurred while determining the architecture. pub fn get_compiler(&self) -> Tool { match self.try_get_compiler() { Ok(tool) => tool, Err(e) => fail(&e.message), } } /// Get the compiler that's in use for this configuration. /// /// This will return a result instead of panicing; see get_compiler() for the complete description. pub fn try_get_compiler(&self) -> Result<Tool, Error> { let opt_level = self.get_opt_level()?; let target = self.get_target()?; let mut cmd = self.get_base_compiler()?; let envflags = self.envflags(if self.cpp { "CXXFLAGS" } else { "CFLAGS" }); // Disable default flag generation via `no_default_flags` or environment variable let no_defaults = self.no_default_flags || self.getenv("CRATE_CC_NO_DEFAULTS").is_some(); if !no_defaults { self.add_default_flags(&mut cmd, &target, &opt_level)?; } else { println!("Info: default compiler flags are disabled"); } for arg in envflags { cmd.push_cc_arg(arg.into()); } for directory in self.include_directories.iter() { cmd.args.push("-I".into()); cmd.args.push(directory.into()); } // If warnings and/or extra_warnings haven't been explicitly set, // then we set them only if the environment doesn't already have // CFLAGS/CXXFLAGS, since those variables presumably already contain // the desired set of warnings flags. if self .warnings .unwrap_or(if self.has_flags() { false } else { true }) { let wflags = cmd.family.warnings_flags().into(); cmd.push_cc_arg(wflags); } if self .extra_warnings .unwrap_or(if self.has_flags() { false } else { true }) { if let Some(wflags) = cmd.family.extra_warnings_flags() { cmd.push_cc_arg(wflags.into()); } } for flag in self.flags.iter() { cmd.args.push(flag.into()); } for flag in self.flags_supported.iter() { if self.is_flag_supported(flag).unwrap_or(false) { cmd.push_cc_arg(flag.into()); } } for &(ref key, ref value) in self.definitions.iter() { if let Some(ref value) = *value { cmd.args.push(format!("-D{}={}", key, value).into()); } else { cmd.args.push(format!("-D{}", key).into()); } } if self.warnings_into_errors { let warnings_to_errors_flag = cmd.family.warnings_to_errors_flag().into(); cmd.push_cc_arg(warnings_to_errors_flag); } Ok(cmd) } fn add_default_flags( &self, cmd: &mut Tool, target: &str, opt_level: &str, ) -> Result<(), Error> { // Non-target flags // If the flag is not conditioned on target variable, it belongs here :) match cmd.family { ToolFamily::Msvc { .. } => { cmd.push_cc_arg("-nologo".into()); let crt_flag = match self.static_crt { Some(true) => "-MT", Some(false) => "-MD", None => { let features = self .getenv("CARGO_CFG_TARGET_FEATURE") .unwrap_or(String::new()); if features.contains("crt-static") { "-MT" } else { "-MD" } } }; cmd.push_cc_arg(crt_flag.into()); match &opt_level[..] { // Msvc uses /O1 to enable all optimizations that minimize code size. "z" | "s" | "1" => cmd.push_opt_unless_duplicate("-O1".into()), // -O3 is a valid value for gcc and clang compilers, but not msvc. Cap to /O2. "2" | "3" => cmd.push_opt_unless_duplicate("-O2".into()), _ => {} } } ToolFamily::Gnu | ToolFamily::Clang => { // arm-linux-androideabi-gcc 4.8 shipped with Android NDK does // not support '-Oz' if opt_level == "z" && cmd.family != ToolFamily::Clang { cmd.push_opt_unless_duplicate("-Os".into()); } else { cmd.push_opt_unless_duplicate(format!("-O{}", opt_level).into()); } if cmd.family == ToolFamily::Clang && target.contains("android") { // For compatibility with code that doesn't use pre-defined `__ANDROID__` macro. // If compiler used via ndk-build or cmake (officially supported build methods) // this macros is defined. // See https://android.googlesource.com/platform/ndk/+/refs/heads/ndk-release-r21/build/cmake/android.toolchain.cmake#456 // https://android.googlesource.com/platform/ndk/+/refs/heads/ndk-release-r21/build/core/build-binary.mk#141 cmd.push_opt_unless_duplicate("-DANDROID".into()); } if !target.contains("-ios") { cmd.push_cc_arg("-ffunction-sections".into()); cmd.push_cc_arg("-fdata-sections".into()); } // Disable generation of PIC on bare-metal for now: rust-lld doesn't support this yet if self .pic .unwrap_or(!target.contains("windows") && !target.contains("-none-")) { cmd.push_cc_arg("-fPIC".into()); // PLT only applies if code is compiled with PIC support, // and only for ELF targets. if target.contains("linux") && !self.use_plt.unwrap_or(true) { cmd.push_cc_arg("-fno-plt".into()); } } } } if self.get_debug() { if self.cuda { // NVCC debug flag cmd.args.push("-G".into()); } let family = cmd.family; family.add_debug_flags(cmd); } if self.get_force_frame_pointer() { let family = cmd.family; family.add_force_frame_pointer(cmd); } // Target flags match cmd.family { ToolFamily::Clang => { if !(target.contains("android") && android_clang_compiler_uses_target_arg_internally(&cmd.path)) { cmd.args.push(format!("--target={}", target).into()); } } ToolFamily::Msvc { clang_cl } => { // This is an undocumented flag from MSVC but helps with making // builds more reproducible by avoiding putting timestamps into // files. //cmd.push_cc_arg("-Brepro".into()); if clang_cl { if target.contains("x86_64") { cmd.push_cc_arg("-m64".into()); } else if target.contains("86") { cmd.push_cc_arg("-m32".into()); cmd.push_cc_arg("-arch:IA32".into()); } else { cmd.push_cc_arg(format!("--target={}", target).into()); } } else { if target.contains("i586") { cmd.push_cc_arg("-arch:IA32".into()); } } // There is a check in corecrt.h that will generate a // compilation error if // _ARM_WINAPI_PARTITION_DESKTOP_SDK_AVAILABLE is // not defined to 1. The check was added in Windows // 8 days because only store apps were allowed on ARM. // This changed with the release of Windows 10 IoT Core. // The check will be going away in future versions of // the SDK, but for all released versions of the // Windows SDK it is required. if target.contains("arm") || target.contains("thumb") { cmd.args .push("-D_ARM_WINAPI_PARTITION_DESKTOP_SDK_AVAILABLE=1".into()); } } ToolFamily::Gnu => { if target.contains("i686") || target.contains("i586") { cmd.args.push("-m32".into()); } else if target == "x86_64-unknown-linux-gnux32" { cmd.args.push("-mx32".into()); } else if target.contains("x86_64") || target.contains("powerpc64") { cmd.args.push("-m64".into()); } if target.contains("darwin") { if target.contains("x86_64") { cmd.args.push("-arch".into()); cmd.args.push("x86_64".into()); } else if target.contains("arm64e") { cmd.args.push("-arch".into()); cmd.args.push("arm64e".into()); } else if target.contains("aarch64") { cmd.args.push("-arch".into()); cmd.args.push("arm64".into()); } } if self.static_flag.is_none() { let features = self .getenv("CARGO_CFG_TARGET_FEATURE") .unwrap_or(String::new()); if features.contains("crt-static") { cmd.args.push("-static".into()); } } // armv7 targets get to use armv7 instructions if (target.starts_with("armv7") || target.starts_with("thumbv7")) && target.contains("-linux-") { cmd.args.push("-march=armv7-a".into()); } // (x86 Android doesn't say "eabi") if target.contains("-androideabi") && target.contains("v7") { // -march=armv7-a handled above cmd.args.push("-mthumb".into()); if !target.contains("neon") { // On android we can guarantee some extra float instructions // (specified in the android spec online) // NEON guarantees even more; see below. cmd.args.push("-mfpu=vfpv3-d16".into()); } cmd.args.push("-mfloat-abi=softfp".into()); } if target.contains("neon") { cmd.args.push("-mfpu=neon-vfpv4".into()); } if target.starts_with("armv4t-unknown-linux-") { cmd.args.push("-march=armv4t".into()); cmd.args.push("-marm".into()); cmd.args.push("-mfloat-abi=soft".into()); } if target.starts_with("armv5te-unknown-linux-") { cmd.args.push("-march=armv5te".into()); cmd.args.push("-marm".into()); cmd.args.push("-mfloat-abi=soft".into()); } // For us arm == armv6 by default if target.starts_with("arm-unknown-linux-") { cmd.args.push("-march=armv6".into()); cmd.args.push("-marm".into()); if target.ends_with("hf") { cmd.args.push("-mfpu=vfp".into()); } else { cmd.args.push("-mfloat-abi=soft".into()); } } // We can guarantee some settings for FRC if target.starts_with("arm-frc-") { cmd.args.push("-march=armv7-a".into()); cmd.args.push("-mcpu=cortex-a9".into()); cmd.args.push("-mfpu=vfpv3".into()); cmd.args.push("-mfloat-abi=softfp".into()); cmd.args.push("-marm".into()); } // Turn codegen down on i586 to avoid some instructions. if target.starts_with("i586-unknown-linux-") { cmd.args.push("-march=pentium".into()); } // Set codegen level for i686 correctly if target.starts_with("i686-unknown-linux-") { cmd.args.push("-march=i686".into()); } // Looks like `musl-gcc` makes is hard for `-m32` to make its way // all the way to the linker, so we need to actually instruct the // linker that we're generating 32-bit executables as well. This'll // typically only be used for build scripts which transitively use // these flags that try to compile executables. if target == "i686-unknown-linux-musl" || target == "i586-unknown-linux-musl" { cmd.args.push("-Wl,-melf_i386".into()); } if target.starts_with("thumb") { cmd.args.push("-mthumb".into()); if target.ends_with("eabihf") { cmd.args.push("-mfloat-abi=hard".into()) } } if target.starts_with("thumbv6m") { cmd.args.push("-march=armv6s-m".into()); } if target.starts_with("thumbv7em") { cmd.args.push("-march=armv7e-m".into()); if target.ends_with("eabihf") { cmd.args.push("-mfpu=fpv4-sp-d16".into()) } } if target.starts_with("thumbv7m") { cmd.args.push("-march=armv7-m".into()); } if target.starts_with("thumbv8m.base") { cmd.args.push("-march=armv8-m.base".into()); } if target.starts_with("thumbv8m.main") { cmd.args.push("-march=armv8-m.main".into()); if target.ends_with("eabihf") { cmd.args.push("-mfpu=fpv5-sp-d16".into()) } } if target.starts_with("armebv7r") | target.starts_with("armv7r") { if target.starts_with("armeb") { cmd.args.push("-mbig-endian".into()); } else { cmd.args.push("-mlittle-endian".into()); } // ARM mode cmd.args.push("-marm".into()); // R Profile cmd.args.push("-march=armv7-r".into()); if target.ends_with("eabihf") { // Calling convention cmd.args.push("-mfloat-abi=hard".into()); // lowest common denominator FPU // (see Cortex-R4 technical reference manual) cmd.args.push("-mfpu=vfpv3-d16".into()) } else { // Calling convention cmd.args.push("-mfloat-abi=soft".into()); } } if target.starts_with("armv7a") { cmd.args.push("-march=armv7-a".into()); if target.ends_with("eabihf") { // lowest common denominator FPU cmd.args.push("-mfpu=vfpv3-d16".into()); } } if target.starts_with("riscv32") || target.starts_with("riscv64") { // get the 32i/32imac/32imc/64gc/64imac/... part let mut parts = target.split('-'); if let Some(arch) = parts.next() { let arch = &arch[5..]; cmd.args.push(("-march=rv".to_owned() + arch).into()); if target.contains("linux") && arch.starts_with("64") { cmd.args.push("-mabi=lp64d".into()); } else if target.contains("linux") && arch.starts_with("32") { cmd.args.push("-mabi=ilp32d".into()); } else if arch.starts_with("64") { cmd.args.push("-mabi=lp64".into()); } else { cmd.args.push("-mabi=ilp32".into()); } cmd.args.push("-mcmodel=medany".into()); } } } } if target.contains("-ios") { // FIXME: potential bug. iOS is always compiled with Clang, but Gcc compiler may be // detected instead. self.ios_flags(cmd)?; } if self.static_flag.unwrap_or(false) { cmd.args.push("-static".into()); } if self.shared_flag.unwrap_or(false) { cmd.args.push("-shared".into()); } if self.cpp { match (self.cpp_set_stdlib.as_ref(), cmd.family) { (None, _) => {} (Some(stdlib), ToolFamily::Gnu) | (Some(stdlib), ToolFamily::Clang) => { cmd.push_cc_arg(format!("-stdlib=lib{}", stdlib).into()); } _ => { println!( "cargo:warning=cpp_set_stdlib is specified, but the {:?} compiler \ does not support this option, ignored", cmd.family ); } } } Ok(()) } fn has_flags(&self) -> bool { let flags_env_var_name = if self.cpp { "CXXFLAGS" } else { "CFLAGS" }; let flags_env_var_value = self.get_var(flags_env_var_name); if let Ok(_) = flags_env_var_value { true } else { false } } fn msvc_macro_assembler(&self) -> Result<(Command, String), Error> { let target = self.get_target()?; let tool = if target.contains("x86_64") { "ml64.exe" } else if target.contains("arm") { "armasm.exe" } else if target.contains("aarch64") { "armasm64.exe" } else { "ml.exe" }; let mut cmd = windows_registry::find(&target, tool).unwrap_or_else(|| self.cmd(tool)); cmd.arg("-nologo"); // undocumented, yet working with armasm[64] for directory in self.include_directories.iter() { cmd.arg("-I").arg(directory); } if target.contains("aarch64") || target.contains("arm") { println!("cargo:warning=The MSVC ARM assemblers do not support -D flags"); } else { for &(ref key, ref value) in self.definitions.iter() { if let Some(ref value) = *value { cmd.arg(&format!("-D{}={}", key, value)); } else { cmd.arg(&format!("-D{}", key)); } } } if target.contains("i686") || target.contains("i586") { cmd.arg("-safeseh"); } for flag in self.flags.iter() { cmd.arg(flag); } Ok((cmd, tool.to_string())) } fn assemble(&self, lib_name: &str, dst: &Path, objs: &[Object]) -> Result<(), Error> { // Delete the destination if it exists as the `ar` tool at least on Unix // appends to it, which we don't want. let _ = fs::remove_file(&dst); let objects: Vec<_> = objs.iter().map(|obj| obj.dst.clone()).collect(); let target = self.get_target()?; if target.contains("msvc") { let (mut cmd, program) = self.get_ar()?; let mut out = OsString::from("-out:"); out.push(dst); cmd.arg(out).arg("-nologo"); for flag in self.ar_flags.iter() { cmd.arg(flag); } // Similar to https://github.com/rust-lang/rust/pull/47507 // and https://github.com/rust-lang/rust/pull/48548 let estimated_command_line_len = objects .iter() .chain(&self.objects) .map(|a| a.as_os_str().len()) .sum::<usize>(); if estimated_command_line_len > 1024 * 6 { let mut args = String::from("\u{FEFF}"); // BOM for arg in objects.iter().chain(&self.objects) { args.push('"'); for c in arg.to_str().unwrap().chars() { if c == '"' { args.push('\\') } args.push(c) } args.push('"'); args.push('\n'); } let mut utf16le = Vec::new(); for code_unit in args.encode_utf16() { utf16le.push(code_unit as u8); utf16le.push((code_unit >> 8) as u8); } let mut args_file = OsString::from(dst); args_file.push(".args"); fs::File::create(&args_file) .unwrap() .write_all(&utf16le) .unwrap(); let mut args_file_arg = OsString::from("@"); args_file_arg.push(args_file); cmd.arg(args_file_arg); } else { cmd.args(&objects).args(&self.objects); } run(&mut cmd, &program)?; // The Rust compiler will look for libfoo.a and foo.lib, but the // MSVC linker will also be passed foo.lib, so be sure that both // exist for now. let lib_dst = dst.with_file_name(format!("{}.lib", lib_name)); let _ = fs::remove_file(&lib_dst); match fs::hard_link(&dst, &lib_dst).or_else(|_| { // if hard-link fails, just copy (ignoring the number of bytes written) fs::copy(&dst, &lib_dst).map(|_| ()) }) { Ok(_) => (), Err(_) => { return Err(Error::new( ErrorKind::IOError, "Could not copy or create a hard-link to the generated lib file.", )); } }; } else { let (mut ar, cmd) = self.get_ar()?; // Set an environment variable to tell the OSX archiver to ensure // that all dates listed in the archive are zero, improving // determinism of builds. AFAIK there's not really official // documentation of this but there's a lot of references to it if // you search google. // // You can reproduce this locally on a mac with: // // $ touch foo.c // $ cc -c foo.c -o foo.o // // # Notice that these two checksums are different // $ ar crus libfoo1.a foo.o && sleep 2 && ar crus libfoo2.a foo.o // $ md5sum libfoo*.a // // # Notice that these two checksums are the same // $ export ZERO_AR_DATE=1 // $ ar crus libfoo1.a foo.o && sleep 2 && touch foo.o && ar crus libfoo2.a foo.o // $ md5sum libfoo*.a // // In any case if this doesn't end up getting read, it shouldn't // cause that many issues! ar.env("ZERO_AR_DATE", "1"); for flag in self.ar_flags.iter() { ar.arg(flag); } run( ar.arg("crs").arg(dst).args(&objects).args(&self.objects), &cmd, )?; } Ok(()) } fn ios_flags(&self, cmd: &mut Tool) -> Result<(), Error> { enum ArchSpec { Device(&'static str), Simulator(&'static str), } let target = self.get_target()?; let arch = target.split('-').nth(0).ok_or_else(|| { Error::new( ErrorKind::ArchitectureInvalid, "Unknown architecture for iOS target.", ) })?; let arch = match arch { "arm" | "armv7" | "thumbv7" => ArchSpec::Device("armv7"), "armv7s" | "thumbv7s" => ArchSpec::Device("armv7s"), "arm64e" => ArchSpec::Device("arm64e"), "arm64" | "aarch64" => ArchSpec::Device("arm64"), "i386" | "i686" => ArchSpec::Simulator("-m32"), "x86_64" => ArchSpec::Simulator("-m64"), _ => { return Err(Error::new( ErrorKind::ArchitectureInvalid, "Unknown architecture for iOS target.", )); } }; let min_version = std::env::var("IPHONEOS_DEPLOYMENT_TARGET").unwrap_or_else(|_| "7.0".into()); let sdk = match arch { ArchSpec::Device(arch) => { cmd.args.push("-arch".into()); cmd.args.push(arch.into()); cmd.args .push(format!("-miphoneos-version-min={}", min_version).into()); "iphoneos" } ArchSpec::Simulator(arch) => { cmd.args.push(arch.into()); cmd.args .push(format!("-mios-simulator-version-min={}", min_version).into()); "iphonesimulator" } }; self.print(&format!("Detecting iOS SDK path for {}", sdk)); let sdk_path = self .cmd("xcrun") .arg("--show-sdk-path") .arg("--sdk") .arg(sdk) .stderr(Stdio::inherit()) .output()? .stdout; let sdk_path = match String::from_utf8(sdk_path) { Ok(p) => p, Err(_) => { return Err(Error::new( ErrorKind::IOError, "Unable to determine iOS SDK path.", )); } }; cmd.args.push("-isysroot".into()); cmd.args.push(sdk_path.trim().into()); cmd.args.push("-fembed-bitcode".into()); /* * TODO we probably ultimately want the -fembed-bitcode-marker flag * but can't have it now because of an issue in LLVM: * https://github.com/alexcrichton/cc-rs/issues/301 * https://github.com/rust-lang/rust/pull/48896#comment-372192660 */ /* if self.get_opt_level()? == "0" { cmd.args.push("-fembed-bitcode-marker".into()); } */ Ok(()) } fn cmd<P: AsRef<OsStr>>(&self, prog: P) -> Command { let mut cmd = Command::new(prog); for &(ref a, ref b) in self.env.iter() { cmd.env(a, b); } cmd } fn get_base_compiler(&self) -> Result<Tool, Error> { if let Some(ref c) = self.compiler { return Ok(Tool::new(c.clone())); } let host = self.get_host()?; let target = self.get_target()?; let (env, msvc, gnu, traditional, clang) = if self.cpp { ("CXX", "cl.exe", "g++", "c++", "clang++") } else { ("CC", "cl.exe", "gcc", "cc", "clang") }; // On historical Solaris systems, "cc" may have been Sun Studio, which // is not flag-compatible with "gcc". This history casts a long shadow, // and many modern illumos distributions today ship GCC as "gcc" without // also making it available as "cc". let default = if host.contains("solaris") || host.contains("illumos") { gnu } else { traditional }; let cl_exe = windows_registry::find_tool(&target, "cl.exe"); let tool_opt: Option<Tool> = self .env_tool(env) .map(|(tool, wrapper, args)| { // find the driver mode, if any const DRIVER_MODE: &str = "--driver-mode="; let driver_mode = args .iter() .find(|a| a.starts_with(DRIVER_MODE)) .map(|a| &a[DRIVER_MODE.len()..]); // Chop off leading/trailing whitespace to work around // semi-buggy build scripts which are shared in // makefiles/configure scripts (where spaces are far more // lenient) let mut t = Tool::with_clang_driver(PathBuf::from(tool.trim()), driver_mode); if let Some(cc_wrapper) = wrapper { t.cc_wrapper_path = Some(PathBuf::from(cc_wrapper)); } for arg in args { t.cc_wrapper_args.push(arg.into()); } t }) .or_else(|| { if target.contains("emscripten") { let tool = if self.cpp { "em++" } else { "emcc" }; // Windows uses bat file so we have to be a bit more specific if cfg!(windows) { let mut t = Tool::new(PathBuf::from("cmd")); t.args.push("/c".into()); t.args.push(format!("{}.bat", tool).into()); Some(t) } else { Some(Tool::new(PathBuf::from(tool))) } } else { None } }) .or_else(|| cl_exe.clone()); let tool = match tool_opt { Some(t) => t, None => { let compiler = if host.contains("windows") && target.contains("windows") { if target.contains("msvc") { msvc.to_string() } else { format!("{}.exe", gnu) } } else if target.contains("android") { autodetect_android_compiler(&target, &host, gnu, clang) } else if target.contains("cloudabi") { format!("{}-{}", target, traditional) } else if target == "wasm32-wasi" || target == "wasm32-unknown-wasi" || target == "wasm32-unknown-unknown" { "clang".to_string() } else if target.contains("vxworks") { "wr-c++".to_string() } else if self.get_host()? != target { let prefix = self.prefix_for_target(&target); match prefix { Some(prefix) => format!("{}-{}", prefix, gnu), None => default.to_string(), } } else { default.to_string() }; let mut t = Tool::new(PathBuf::from(compiler)); if let Some(cc_wrapper) = Self::rustc_wrapper_fallback() { t.cc_wrapper_path = Some(PathBuf::from(cc_wrapper)); } t } }; let mut tool = if self.cuda { assert!( tool.args.is_empty(), "CUDA compilation currently assumes empty pre-existing args" ); let nvcc = match self.get_var("NVCC") { Err(_) => "nvcc".into(), Ok(nvcc) => nvcc, }; let mut nvcc_tool = Tool::with_features(PathBuf::from(nvcc), None, self.cuda); nvcc_tool .args .push(format!("-ccbin={}", tool.path.display()).into()); nvcc_tool.family = tool.family; nvcc_tool } else { tool }; // If we found `cl.exe` in our environment, the tool we're returning is // an MSVC-like tool, *and* no env vars were set then set env vars for // the tool that we're returning. // // Env vars are needed for things like `link.exe` being put into PATH as // well as header include paths sometimes. These paths are automatically // included by default but if the `CC` or `CXX` env vars are set these // won't be used. This'll ensure that when the env vars are used to // configure for invocations like `clang-cl` we still get a "works out // of the box" experience. if let Some(cl_exe) = cl_exe { if tool.family == (ToolFamily::Msvc { clang_cl: true }) && tool.env.len() == 0 && target.contains("msvc") { for &(ref k, ref v) in cl_exe.env.iter() { tool.env.push((k.to_owned(), v.to_owned())); } } } Ok(tool) } fn get_var(&self, var_base: &str) -> Result<String, Error> { let target = self.get_target()?; let host = self.get_host()?; let kind = if host == target { "HOST" } else { "TARGET" }; let target_u = target.replace("-", "_"); let res = self .getenv(&format!("{}_{}", var_base, target)) .or_else(|| self.getenv(&format!("{}_{}", var_base, target_u))) .or_else(|| self.getenv(&format!("{}_{}", kind, var_base))) .or_else(|| self.getenv(var_base)); match res { Some(res) => Ok(res), None => Err(Error::new( ErrorKind::EnvVarNotFound, &format!("Could not find environment variable {}.", var_base), )), } } fn envflags(&self, name: &str) -> Vec<String> { self.get_var(name) .unwrap_or(String::new()) .split(|c: char| c.is_whitespace()) .filter(|s| !s.is_empty()) .map(|s| s.to_string()) .collect() } /// Returns a fallback `cc_compiler_wrapper` by introspecting `RUSTC_WRAPPER` fn rustc_wrapper_fallback() -> Option<String> { // No explicit CC wrapper was detected, but check if RUSTC_WRAPPER // is defined and is a build accelerator that is compatible with // C/C++ compilers (e.g. sccache) let valid_wrappers = ["sccache"]; let rustc_wrapper = std::env::var_os("RUSTC_WRAPPER")?; let wrapper_path = Path::new(&rustc_wrapper); let wrapper_stem = wrapper_path.file_stem()?; if valid_wrappers.contains(&wrapper_stem.to_str()?) { Some(rustc_wrapper.to_str()?.to_owned()) } else { None } } /// Returns compiler path, optional modifier name from whitelist, and arguments vec fn env_tool(&self, name: &str) -> Option<(String, Option<String>, Vec<String>)> { let tool = match self.get_var(name) { Ok(tool) => tool, Err(_) => return None, }; // If this is an exact path on the filesystem we don't want to do any // interpretation at all, just pass it on through. This'll hopefully get // us to support spaces-in-paths. if Path::new(&tool).exists() { return Some((tool, None, Vec::new())); } // Ok now we want to handle a couple of scenarios. We'll assume from // here on out that spaces are splitting separate arguments. Two major // features we want to support are: // // CC='sccache cc' // // aka using `sccache` or any other wrapper/caching-like-thing for // compilations. We want to know what the actual compiler is still, // though, because our `Tool` API support introspection of it to see // what compiler is in use. // // additionally we want to support // // CC='cc -flag' // // where the CC env var is used to also pass default flags to the C // compiler. // // It's true that everything here is a bit of a pain, but apparently if // you're not literally make or bash then you get a lot of bug reports. let known_wrappers = ["ccache", "distcc", "sccache", "icecc"]; let mut parts = tool.split_whitespace(); let maybe_wrapper = match parts.next() { Some(s) => s, None => return None, }; let file_stem = Path::new(maybe_wrapper) .file_stem() .unwrap() .to_str() .unwrap(); if known_wrappers.contains(&file_stem) { if let Some(compiler) = parts.next() { return Some(( compiler.to_string(), Some(maybe_wrapper.to_string()), parts.map(|s| s.to_string()).collect(), )); } } Some(( maybe_wrapper.to_string(), Self::rustc_wrapper_fallback(), parts.map(|s| s.to_string()).collect(), )) } /// Returns the default C++ standard library for the current target: `libc++` /// for OS X and `libstdc++` for anything else. fn get_cpp_link_stdlib(&self) -> Result<Option<String>, Error> { match self.cpp_link_stdlib.clone() { Some(s) => Ok(s), None => { if let Ok(stdlib) = self.get_var("CXXSTDLIB") { if stdlib.is_empty() { Ok(None) } else { Ok(Some(stdlib)) } } else { let target = self.get_target()?; if target.contains("msvc") { Ok(None) } else if target.contains("apple") { Ok(Some("c++".to_string())) } else if target.contains("freebsd") { Ok(Some("c++".to_string())) } else if target.contains("openbsd") { Ok(Some("c++".to_string())) } else { Ok(Some("stdc++".to_string())) } } } } } fn get_ar(&self) -> Result<(Command, String), Error> { if let Some(ref p) = self.archiver { let name = p.file_name().and_then(|s| s.to_str()).unwrap_or("ar"); return Ok((self.cmd(p), name.to_string())); } if let Ok(p) = self.get_var("AR") { return Ok((self.cmd(&p), p)); } let target = self.get_target()?; let default_ar = "ar".to_string(); let program = if target.contains("android") { format!("{}-ar", target.replace("armv7", "arm")) } else if target.contains("emscripten") { // Windows use bat files so we have to be a bit more specific if cfg!(windows) { let mut cmd = self.cmd("cmd"); cmd.arg("/c").arg("emar.bat"); return Ok((cmd, "emar.bat".to_string())); } "emar".to_string() } else if target.contains("msvc") { match windows_registry::find(&target, "lib.exe") { Some(t) => return Ok((t, "lib.exe".to_string())), None => "lib.exe".to_string(), } } else if self.get_host()? != target { match self.prefix_for_target(&target) { Some(p) => { let target_ar = format!("{}-ar", p); if Command::new(&target_ar).output().is_ok() { target_ar } else { default_ar } } None => default_ar, } } else { default_ar }; Ok((self.cmd(&program), program)) } fn prefix_for_target(&self, target: &str) -> Option<String> { // CROSS_COMPILE is of the form: "arm-linux-gnueabi-" let cc_env = self.getenv("CROSS_COMPILE"); let cross_compile = cc_env .as_ref() .map(|s| s.trim_right_matches('-').to_owned()); cross_compile.or(match &target[..] { "aarch64-unknown-linux-gnu" => Some("aarch64-linux-gnu"), "aarch64-unknown-linux-musl" => Some("aarch64-linux-musl"), "aarch64-unknown-netbsd" => Some("aarch64--netbsd"), "arm-unknown-linux-gnueabi" => Some("arm-linux-gnueabi"), "armv4t-unknown-linux-gnueabi" => Some("arm-linux-gnueabi"), "armv5te-unknown-linux-gnueabi" => Some("arm-linux-gnueabi"), "armv5te-unknown-linux-musleabi" => Some("arm-linux-gnueabi"), "arm-frc-linux-gnueabi" => Some("arm-frc-linux-gnueabi"), "arm-unknown-linux-gnueabihf" => Some("arm-linux-gnueabihf"), "arm-unknown-linux-musleabi" => Some("arm-linux-musleabi"), "arm-unknown-linux-musleabihf" => Some("arm-linux-musleabihf"), "arm-unknown-netbsd-eabi" => Some("arm--netbsdelf-eabi"), "armv6-unknown-netbsd-eabihf" => Some("armv6--netbsdelf-eabihf"), "armv7-unknown-linux-gnueabi" => Some("arm-linux-gnueabi"), "armv7-unknown-linux-gnueabihf" => Some("arm-linux-gnueabihf"), "armv7-unknown-linux-musleabihf" => Some("arm-linux-musleabihf"), "armv7neon-unknown-linux-gnueabihf" => Some("arm-linux-gnueabihf"), "armv7neon-unknown-linux-musleabihf" => Some("arm-linux-musleabihf"), "thumbv7-unknown-linux-gnueabihf" => Some("arm-linux-gnueabihf"), "thumbv7-unknown-linux-musleabihf" => Some("arm-linux-musleabihf"), "thumbv7neon-unknown-linux-gnueabihf" => Some("arm-linux-gnueabihf"), "thumbv7neon-unknown-linux-musleabihf" => Some("arm-linux-musleabihf"), "armv7-unknown-netbsd-eabihf" => Some("armv7--netbsdelf-eabihf"), "hexagon-unknown-linux-musl" => Some("hexagon-linux-musl"), "i586-unknown-linux-musl" => Some("musl"), "i686-pc-windows-gnu" => Some("i686-w64-mingw32"), "i686-uwp-windows-gnu" => Some("i686-w64-mingw32"), "i686-unknown-linux-musl" => Some("musl"), "i686-unknown-netbsd" => Some("i486--netbsdelf"), "mips-unknown-linux-gnu" => Some("mips-linux-gnu"), "mipsel-unknown-linux-gnu" => Some("mipsel-linux-gnu"), "mips64-unknown-linux-gnuabi64" => Some("mips64-linux-gnuabi64"), "mips64el-unknown-linux-gnuabi64" => Some("mips64el-linux-gnuabi64"), "mipsisa32r6-unknown-linux-gnu" => Some("mipsisa32r6-linux-gnu"), "mipsisa32r6el-unknown-linux-gnu" => Some("mipsisa32r6el-linux-gnu"), "mipsisa64r6-unknown-linux-gnuabi64" => Some("mipsisa64r6-linux-gnuabi64"), "mipsisa64r6el-unknown-linux-gnuabi64" => Some("mipsisa64r6el-linux-gnuabi64"), "powerpc-unknown-linux-gnu" => Some("powerpc-linux-gnu"), "powerpc-unknown-linux-gnuspe" => Some("powerpc-linux-gnuspe"), "powerpc-unknown-netbsd" => Some("powerpc--netbsd"), "powerpc64-unknown-linux-gnu" => Some("powerpc-linux-gnu"), "powerpc64le-unknown-linux-gnu" => Some("powerpc64le-linux-gnu"), "riscv32i-unknown-none-elf" => self.find_working_gnu_prefix(&[ "riscv32-unknown-elf", "riscv64-unknown-elf", "riscv-none-embed", ]), "riscv32imac-unknown-none-elf" => self.find_working_gnu_prefix(&[ "riscv32-unknown-elf", "riscv64-unknown-elf", "riscv-none-embed", ]), "riscv32imc-unknown-none-elf" => self.find_working_gnu_prefix(&[ "riscv32-unknown-elf", "riscv64-unknown-elf", "riscv-none-embed", ]), "riscv64gc-unknown-none-elf" => self.find_working_gnu_prefix(&[ "riscv64-unknown-elf", "riscv32-unknown-elf", "riscv-none-embed", ]), "riscv64imac-unknown-none-elf" => self.find_working_gnu_prefix(&[ "riscv64-unknown-elf", "riscv32-unknown-elf", "riscv-none-embed", ]), "riscv64gc-unknown-linux-gnu" => Some("riscv64-linux-gnu"), "s390x-unknown-linux-gnu" => Some("s390x-linux-gnu"), "sparc-unknown-linux-gnu" => Some("sparc-linux-gnu"), "sparc64-unknown-linux-gnu" => Some("sparc64-linux-gnu"), "sparc64-unknown-netbsd" => Some("sparc64--netbsd"), "sparcv9-sun-solaris" => Some("sparcv9-sun-solaris"), "armv7a-none-eabi" => Some("arm-none-eabi"), "armv7a-none-eabihf" => Some("arm-none-eabi"), "armebv7r-none-eabi" => Some("arm-none-eabi"), "armebv7r-none-eabihf" => Some("arm-none-eabi"), "armv7r-none-eabi" => Some("arm-none-eabi"), "armv7r-none-eabihf" => Some("arm-none-eabi"), "thumbv6m-none-eabi" => Some("arm-none-eabi"), "thumbv7em-none-eabi" => Some("arm-none-eabi"), "thumbv7em-none-eabihf" => Some("arm-none-eabi"), "thumbv7m-none-eabi" => Some("arm-none-eabi"), "thumbv8m.base-none-eabi" => Some("arm-none-eabi"), "thumbv8m.main-none-eabi" => Some("arm-none-eabi"), "thumbv8m.main-none-eabihf" => Some("arm-none-eabi"), "x86_64-pc-windows-gnu" => Some("x86_64-w64-mingw32"), "x86_64-uwp-windows-gnu" => Some("x86_64-w64-mingw32"), "x86_64-rumprun-netbsd" => Some("x86_64-rumprun-netbsd"), "x86_64-unknown-linux-musl" => Some("musl"), "x86_64-unknown-netbsd" => Some("x86_64--netbsd"), _ => None, } .map(|x| x.to_owned())) } /// Some platforms have multiple, compatible, canonical prefixes. Look through /// each possible prefix for a compiler that exists and return it. The prefixes /// should be ordered from most-likely to least-likely. fn find_working_gnu_prefix(&self, prefixes: &[&'static str]) -> Option<&'static str> { let suffix = if self.cpp { "-g++" } else { "-gcc" }; let extension = std::env::consts::EXE_SUFFIX; // Loop through PATH entries searching for each toolchain. This ensures that we // are more likely to discover the toolchain early on, because chances are good // that the desired toolchain is in one of the higher-priority paths. env::var_os("PATH") .as_ref() .and_then(|path_entries| { env::split_paths(path_entries).find_map(|path_entry| { for prefix in prefixes { let target_compiler = format!("{}{}{}", prefix, suffix, extension); if path_entry.join(&target_compiler).exists() { return Some(prefix); } } None }) }) .map(|prefix| *prefix) .or_else(|| // If no toolchain was found, provide the first toolchain that was passed in. // This toolchain has been shown not to exist, however it will appear in the // error that is shown to the user which should make it easier to search for // where it should be obtained. prefixes.first().map(|prefix| *prefix)) } fn get_target(&self) -> Result<String, Error> { match self.target.clone() { Some(t) => Ok(t), None => Ok(self.getenv_unwrap("TARGET")?), } } fn get_host(&self) -> Result<String, Error> { match self.host.clone() { Some(h) => Ok(h), None => Ok(self.getenv_unwrap("HOST")?), } } fn get_opt_level(&self) -> Result<String, Error> { match self.opt_level.as_ref().cloned() { Some(ol) => Ok(ol), None => Ok(self.getenv_unwrap("OPT_LEVEL")?), } } fn get_debug(&self) -> bool { self.debug.unwrap_or_else(|| match self.getenv("DEBUG") { Some(s) => s != "false", None => false, }) } fn get_force_frame_pointer(&self) -> bool { self.force_frame_pointer.unwrap_or_else(|| self.get_debug()) } fn get_out_dir(&self) -> Result<PathBuf, Error> { match self.out_dir.clone() { Some(p) => Ok(p), None => Ok(env::var_os("OUT_DIR").map(PathBuf::from).ok_or_else(|| { Error::new( ErrorKind::EnvVarNotFound, "Environment variable OUT_DIR not defined.", ) })?), } } fn getenv(&self, v: &str) -> Option<String> { let mut cache = self.env_cache.lock().unwrap(); if let Some(val) = cache.get(v) { return val.clone(); } let r = env::var(v).ok(); self.print(&format!("{} = {:?}", v, r)); cache.insert(v.to_string(), r.clone()); r } fn getenv_unwrap(&self, v: &str) -> Result<String, Error> { match self.getenv(v) { Some(s) => Ok(s), None => Err(Error::new( ErrorKind::EnvVarNotFound, &format!("Environment variable {} not defined.", v.to_string()), )), } } fn print(&self, s: &str) { if self.cargo_metadata { println!("{}", s); } } } impl Default for Build { fn default() -> Build { Build::new() } } impl Tool { fn new(path: PathBuf) -> Self { Tool::with_features(path, None, false) } fn with_clang_driver(path: PathBuf, clang_driver: Option<&str>) -> Self { Self::with_features(path, clang_driver, false) } #[cfg(windows)] /// Explictly set the `ToolFamily`, skipping name-based detection. fn with_family(path: PathBuf, family: ToolFamily) -> Self { Self { path: path, cc_wrapper_path: None, cc_wrapper_args: Vec::new(), args: Vec::new(), env: Vec::new(), family: family, cuda: false, removed_args: Vec::new(), } } fn with_features(path: PathBuf, clang_driver: Option<&str>, cuda: bool) -> Self { // Try to detect family of the tool from its name, falling back to Gnu. let family = if let Some(fname) = path.file_name().and_then(|p| p.to_str()) { if fname.contains("clang-cl") { ToolFamily::Msvc { clang_cl: true } } else if fname.contains("cl") && !fname.contains("cloudabi") && !fname.contains("uclibc") && !fname.contains("clang") { ToolFamily::Msvc { clang_cl: false } } else if fname.contains("clang") { match clang_driver { Some("cl") => ToolFamily::Msvc { clang_cl: true }, _ => ToolFamily::Clang, } } else { ToolFamily::Gnu } } else { ToolFamily::Gnu }; Tool { path: path, cc_wrapper_path: None, cc_wrapper_args: Vec::new(), args: Vec::new(), env: Vec::new(), family: family, cuda: cuda, removed_args: Vec::new(), } } /// Add an argument to be stripped from the final command arguments. fn remove_arg(&mut self, flag: OsString) { self.removed_args.push(flag); } /// Add a flag, and optionally prepend the NVCC wrapper flag "-Xcompiler". /// /// Currently this is only used for compiling CUDA sources, since NVCC only /// accepts a limited set of GNU-like flags, and the rest must be prefixed /// with a "-Xcompiler" flag to get passed to the underlying C++ compiler. fn push_cc_arg(&mut self, flag: OsString) { if self.cuda { self.args.push("-Xcompiler".into()); } self.args.push(flag); } fn is_duplicate_opt_arg(&self, flag: &OsString) -> bool { let flag = flag.to_str().unwrap(); let mut chars = flag.chars(); // Only duplicate check compiler flags if self.is_like_msvc() { if chars.next() != Some('/') { return false; } } else if self.is_like_gnu() || self.is_like_clang() { if chars.next() != Some('-') { return false; } } // Check for existing optimization flags (-O, /O) if chars.next() == Some('O') { return self .args() .iter() .any(|ref a| a.to_str().unwrap_or("").chars().nth(1) == Some('O')); } // TODO Check for existing -m..., -m...=..., /arch:... flags return false; } /// Don't push optimization arg if it conflicts with existing args fn push_opt_unless_duplicate(&mut self, flag: OsString) { if self.is_duplicate_opt_arg(&flag) { println!("Info: Ignoring duplicate arg {:?}", &flag); } else { self.push_cc_arg(flag); } } /// Converts this compiler into a `Command` that's ready to be run. /// /// This is useful for when the compiler needs to be executed and the /// command returned will already have the initial arguments and environment /// variables configured. pub fn to_command(&self) -> Command { let mut cmd = match self.cc_wrapper_path { Some(ref cc_wrapper_path) => { let mut cmd = Command::new(&cc_wrapper_path); cmd.arg(&self.path); cmd } None => Command::new(&self.path), }; cmd.args(&self.cc_wrapper_args); let value = self .args .iter() .filter(|a| !self.removed_args.contains(a)) .collect::<Vec<_>>(); cmd.args(&value); for &(ref k, ref v) in self.env.iter() { cmd.env(k, v); } cmd } /// Returns the path for this compiler. /// /// Note that this may not be a path to a file on the filesystem, e.g. "cc", /// but rather something which will be resolved when a process is spawned. pub fn path(&self) -> &Path { &self.path } /// Returns the default set of arguments to the compiler needed to produce /// executables for the target this compiler generates. pub fn args(&self) -> &[OsString] { &self.args } /// Returns the set of environment variables needed for this compiler to /// operate. /// /// This is typically only used for MSVC compilers currently. pub fn env(&self) -> &[(OsString, OsString)] { &self.env } /// Returns the compiler command in format of CC environment variable. /// Or empty string if CC env was not present /// /// This is typically used by configure script pub fn cc_env(&self) -> OsString { match self.cc_wrapper_path { Some(ref cc_wrapper_path) => { let mut cc_env = cc_wrapper_path.as_os_str().to_owned(); cc_env.push(" "); cc_env.push(self.path.to_path_buf().into_os_string()); for arg in self.cc_wrapper_args.iter() { cc_env.push(" "); cc_env.push(arg); } cc_env } None => OsString::from(""), } } /// Returns the compiler flags in format of CFLAGS environment variable. /// Important here - this will not be CFLAGS from env, its internal gcc's flags to use as CFLAGS /// This is typically used by configure script pub fn cflags_env(&self) -> OsString { let mut flags = OsString::new(); for (i, arg) in self.args.iter().enumerate() { if i > 0 { flags.push(" "); } flags.push(arg); } flags } /// Whether the tool is GNU Compiler Collection-like. pub fn is_like_gnu(&self) -> bool { self.family == ToolFamily::Gnu } /// Whether the tool is Clang-like. pub fn is_like_clang(&self) -> bool { self.family == ToolFamily::Clang } /// Whether the tool is MSVC-like. pub fn is_like_msvc(&self) -> bool { match self.family { ToolFamily::Msvc { .. } => true, _ => false, } } } fn run(cmd: &mut Command, program: &str) -> Result<(), Error> { let (mut child, print) = spawn(cmd, program)?; let status = match child.wait() { Ok(s) => s, Err(_) => { return Err(Error::new( ErrorKind::ToolExecError, &format!( "Failed to wait on spawned child process, command {:?} with args {:?}.", cmd, program ), )); } }; print.join().unwrap(); println!("{}", status); if status.success() { Ok(()) } else { Err(Error::new( ErrorKind::ToolExecError, &format!( "Command {:?} with args {:?} did not execute successfully (status code {}).", cmd, program, status ), )) } } fn run_output(cmd: &mut Command, program: &str) -> Result<Vec<u8>, Error> { cmd.stdout(Stdio::piped()); let (mut child, print) = spawn(cmd, program)?; let mut stdout = vec![]; child .stdout .take() .unwrap() .read_to_end(&mut stdout) .unwrap(); let status = match child.wait() { Ok(s) => s, Err(_) => { return Err(Error::new( ErrorKind::ToolExecError, &format!( "Failed to wait on spawned child process, command {:?} with args {:?}.", cmd, program ), )); } }; print.join().unwrap(); println!("{}", status); if status.success() { Ok(stdout) } else { Err(Error::new( ErrorKind::ToolExecError, &format!( "Command {:?} with args {:?} did not execute successfully (status code {}).", cmd, program, status ), )) } } fn spawn(cmd: &mut Command, program: &str) -> Result<(Child, JoinHandle<()>), Error> { println!("running: {:?}", cmd); // Capture the standard error coming from these programs, and write it out // with cargo:warning= prefixes. Note that this is a bit wonky to avoid // requiring the output to be UTF-8, we instead just ship bytes from one // location to another. match cmd.stderr(Stdio::piped()).spawn() { Ok(mut child) => { let stderr = BufReader::new(child.stderr.take().unwrap()); let print = thread::spawn(move || { for line in stderr.split(b'\n').filter_map(|l| l.ok()) { print!("cargo:warning="); std::io::stdout().write_all(&line).unwrap(); println!(""); } }); Ok((child, print)) } Err(ref e) if e.kind() == io::ErrorKind::NotFound => { let extra = if cfg!(windows) { " (see https://github.com/alexcrichton/cc-rs#compile-time-requirements \ for help)" } else { "" }; Err(Error::new( ErrorKind::ToolNotFound, &format!("Failed to find tool. Is `{}` installed?{}", program, extra), )) } Err(_) => Err(Error::new( ErrorKind::ToolExecError, &format!("Command {:?} with args {:?} failed to start.", cmd, program), )), } } fn fail(s: &str) -> ! { let _ = writeln!(io::stderr(), "\n\nerror occurred: {}\n\n", s); std::process::exit(1); } fn command_add_output_file( cmd: &mut Command, dst: &Path, cuda: bool, msvc: bool, clang: bool, is_asm: bool, is_arm: bool, ) { if msvc && !clang && !cuda && !(is_asm && is_arm) { let mut s = OsString::from("-Fo"); s.push(&dst); cmd.arg(s); } else { cmd.arg("-o").arg(&dst); } } // Use by default minimum available API level // See note about naming here // https://android.googlesource.com/platform/ndk/+/refs/heads/ndk-release-r21/docs/BuildSystemMaintainers.md#Clang static NEW_STANDALONE_ANDROID_COMPILERS: [&str; 4] = [ "aarch64-linux-android21-clang", "armv7a-linux-androideabi16-clang", "i686-linux-android16-clang", "x86_64-linux-android21-clang", ]; // New "standalone" C/C++ cross-compiler executables from recent Android NDK // are just shell scripts that call main clang binary (from Android NDK) with // proper `--target` argument. // // For example, armv7a-linux-androideabi16-clang passes // `--target=armv7a-linux-androideabi16` to clang. // So to construct proper command line check if // `--target` argument would be passed or not to clang fn android_clang_compiler_uses_target_arg_internally(clang_path: &Path) -> bool { NEW_STANDALONE_ANDROID_COMPILERS .iter() .any(|x| Some(x.as_ref()) == clang_path.file_name()) } fn autodetect_android_compiler(target: &str, host: &str, gnu: &str, clang: &str) -> String { let new_clang_key = match target { "aarch64-linux-android" => Some("aarch64"), "armv7-linux-androideabi" => Some("armv7a"), "i686-linux-android" => Some("i686"), "x86_64-linux-android" => Some("x86_64"), _ => None, }; let new_clang = new_clang_key .map(|key| { NEW_STANDALONE_ANDROID_COMPILERS .iter() .find(|x| x.starts_with(key)) }) .unwrap_or(None); if let Some(new_clang) = new_clang { if Command::new(new_clang).output().is_ok() { return (*new_clang).into(); } } let target = target .replace("armv7neon", "arm") .replace("armv7", "arm") .replace("thumbv7neon", "arm") .replace("thumbv7", "arm"); let gnu_compiler = format!("{}-{}", target, gnu); let clang_compiler = format!("{}-{}", target, clang); // On Windows, the Android clang compiler is provided as a `.cmd` file instead // of a `.exe` file. `std::process::Command` won't run `.cmd` files unless the // `.cmd` is explicitly appended to the command name, so we do that here. let clang_compiler_cmd = format!("{}-{}.cmd", target, clang); // Check if gnu compiler is present // if not, use clang if Command::new(&gnu_compiler).output().is_ok() { gnu_compiler } else if host.contains("windows") && Command::new(&clang_compiler_cmd).output().is_ok() { clang_compiler_cmd } else { clang_compiler } }
36.577947
141
0.519401
1e99de6a881debc992e2239f454fcd3eecf05dd9
5,347
use bundle::{format_cargo_toml, format_lib, Crate, Recipe, StdResult}; use std::{ env, fs, path::{Path, PathBuf}, process::{Command, ExitStatus, Stdio} }; fn main() -> StdResult<()> { let env_recipe = env::var("LINEARF_RECIPE"); let env_dir = env!("CARGO_MANIFEST_DIR"); let features = { let mut a = env::args(); a.next(); a.next() } .expect("The first argument is needed for \"features\""); println!( "{} {}", features, env_recipe.as_deref().ok().unwrap_or("None") ); let recipe = input(env_recipe)?; let here = Path::new(env_dir); let core = here.parent().unwrap().join("core"); let (registry_toml, registry_lib) = registry(here); let crates = read_crates(&recipe.crates)?; let stash = Stash::stash( &registry_toml, &registry_lib, crates.iter().map(|(_, f)| f.clone()).collect() )?; preprocess(&recipe, registry_lib, registry_toml, crates, &core)?; let run = build(&features); stash.restore()?; std::process::exit(run?.code().ok_or("Process terminated by signal")?); } fn input(env_reg: Result<String, env::VarError>) -> StdResult<Recipe> { match env_reg { Ok(s) => Ok(serde_json::from_str(&s)?), Err(env::VarError::NotPresent) => Ok(Recipe::default()), Err(e) => Err(e.into()) } } fn registry(here: &Path) -> (PathBuf, PathBuf) { let registry = here.parent().unwrap().join("registry"); let cargo_toml = registry.join("Cargo.toml"); let lib = registry.join("src").join("lib.rs"); (cargo_toml, lib) } fn read_crates(crates: &[Crate]) -> std::io::Result<Vec<(&Crate, F)>> { crates .iter() .map(|c| { let f = F::read(&c.dir.join("Cargo.toml"))?; Ok((c, f)) }) .collect() } fn preprocess( recipe: &Recipe, registry_lib: PathBuf, registry_toml: PathBuf, crates: Vec<(&Crate, F)>, core: &Path ) -> StdResult<()> { fs::write(&registry_lib, format_lib(recipe))?; fs::write(&registry_toml, format_cargo_toml(recipe)?)?; for (c, F { p, s }) in crates.into_iter() { let mut manifest: toml::value::Table = toml::from_str(&s)?; let deps = manifest .get_mut("dependencies") .ok_or_else(|| format!("{:?} has no \"dependencies\" ", &p))? .as_table_mut() .ok_or_else(|| format!("{:?} has no \"dependencies\" ", &p))?; deps["linearf"] = { let mut m = toml::map::Map::new(); m.insert( "path".to_string(), toml::Value::from(relative(&p, core).display().to_string()) ); toml::Value::Table(m) }; let pac = manifest .get_mut("package") .ok_or_else(|| format!("{:?} has no \"package\" ", &p))? .as_table_mut() .ok_or_else(|| format!("{:?} has no \"package\" ", &p))?; pac["name"] = toml::Value::String(c.name.clone()); fs::write(p, toml::to_string(&toml::Value::Table(manifest))?)?; } Ok(()) } fn build(features: &str) -> std::io::Result<ExitStatus> { Command::new("cargo") .args(["fmt", "-p", "registry"]) .stdout(Stdio::inherit()) .stderr(Stdio::inherit()) .status() .ok(); Command::new("cargo") .args(["build", "--features", features, "--release", "--lib=bridge"]) .stdout(Stdio::inherit()) .stderr(Stdio::inherit()) .status() } pub struct Stash { files: Vec<F> } #[derive(Clone)] struct F { p: PathBuf, s: String } impl F { fn read(p: &Path) -> std::io::Result<Self> { Ok(Self { p: p.into(), s: fs::read_to_string(p)? }) } } impl Stash { #[allow(clippy::self_named_constructors)] fn stash(cargo_toml: &Path, lib: &Path, crates: Vec<F>) -> StdResult<Self> { let mut files = crates.to_vec(); files.push(F::read(cargo_toml)?); files.push(F::read(lib)?); Ok(Self { files }) } fn restore(self) -> StdResult<()> { for F { p, s } in self.files { fs::write(p, s)?; } Ok(()) } } fn relative(root: &Path, file: &Path) -> PathBuf { let l: Vec<_> = root.components().into_iter().collect(); let r: Vec<_> = file.components().into_iter().collect(); let (cnt, _) = root .components() .zip(file.components()) .map(|(a, b)| a == b) .fold((0, true), |(cnt, success), same| { let success = success && same; let cnt = if success { cnt + 1 } else { cnt }; (cnt, success) }); use std::borrow::Cow; let prefix = if l.len() - cnt <= 1 { Cow::Borrowed("") } else { Cow::Owned((0..(l.len() - cnt - 1)).map(|_| "../").collect::<String>()) }; Path::new(&*prefix).join(r[cnt..].iter().collect::<PathBuf>()) } #[test] fn can_relative() { assert_eq!( relative(Path::new("/foo.ts"), Path::new("/bar.ts")), Path::new("bar.ts") ); assert_eq!( relative(Path::new("/foo.ts"), Path::new("/foo/bar.ts")), Path::new("foo/bar.ts") ); assert_eq!( relative(Path::new("/foo/bar.ts"), Path::new("/bar.ts")), Path::new("../bar.ts") ); }
29.059783
80
0.518609
bf075716ad2e2109fad5f4e46036489bf93ef9f3
12,347
use crate::serialization_ast::datamodel_ast::{ Datamodel, Enum, EnumValue, Field, Function, Model, PrimaryKey, UniqueIndex, }; use bigdecimal::ToPrimitive; use datamodel::dml::{self, CompositeTypeFieldType, FieldType, Ignorable, PrismaValue, ScalarType}; pub fn schema_to_dmmf(schema: &dml::Datamodel) -> Datamodel { let mut datamodel = Datamodel { models: vec![], enums: vec![], types: Vec::with_capacity(schema.composite_types.len()), }; for enum_model in schema.enums() { datamodel.enums.push(enum_to_dmmf(enum_model)); } for model in schema.models().filter(|model| !model.is_ignored) { datamodel.models.push(model_to_dmmf(model)); } for ct in schema.composite_types() { datamodel.types.push(composite_type_to_dmmf(ct)) } datamodel } fn enum_to_dmmf(en: &dml::Enum) -> Enum { let mut enm = Enum { name: en.name.clone(), values: vec![], db_name: en.database_name.clone(), documentation: en.documentation.clone(), }; for enum_value in en.values() { enm.values.push(enum_value_to_dmmf(enum_value)); } enm } fn enum_value_to_dmmf(en: &dml::EnumValue) -> EnumValue { EnumValue { name: en.name.clone(), db_name: en.database_name.clone(), } } fn composite_type_to_dmmf(ct: &dml::CompositeType) -> Model { Model { name: ct.name.clone(), db_name: None, fields: ct .fields .iter() .filter(|field| !matches!(&field.r#type, CompositeTypeFieldType::Unsupported(_))) .map(composite_type_field_to_dmmf) .collect(), is_generated: None, documentation: None, primary_key: None, unique_fields: Vec::new(), unique_indexes: Vec::new(), } } fn composite_type_field_to_dmmf(field: &dml::CompositeTypeField) -> Field { Field { name: field.name.clone(), kind: match field.r#type { CompositeTypeFieldType::CompositeType(_) => String::from("object"), CompositeTypeFieldType::Enum(_) => String::from("enum"), CompositeTypeFieldType::Scalar(_, _, _) => String::from("scalar"), CompositeTypeFieldType::Unsupported(_) => String::from("unsupported"), }, is_required: field.arity == dml::FieldArity::Required || field.arity == dml::FieldArity::List, is_list: field.arity == dml::FieldArity::List, is_id: false, is_read_only: false, has_default_value: field.default_value.is_some(), default: default_value_to_serde(&field.default_value), is_unique: false, relation_name: None, relation_from_fields: None, relation_to_fields: None, relation_on_delete: None, field_type: match &field.r#type { CompositeTypeFieldType::CompositeType(t) => t.clone(), CompositeTypeFieldType::Enum(t) => t.clone(), CompositeTypeFieldType::Unsupported(t) => t.clone(), CompositeTypeFieldType::Scalar(t, _, _) => type_to_string(t), }, is_generated: None, is_updated_at: None, documentation: None, } } fn model_to_dmmf(model: &dml::Model) -> Model { let primary_key = if let Some(pk) = &model.primary_key { (!pk.defined_on_field).then(|| PrimaryKey { name: pk.name.clone(), //TODO(extended indices) add field options here fields: pk.fields.clone().into_iter().map(|f| f.name).collect(), }) } else { None }; Model { name: model.name.clone(), db_name: model.database_name.clone(), fields: model .fields() .filter(|field| !field.is_ignored() && !matches!(field.field_type(), FieldType::Unsupported(_))) .map(|f| field_to_dmmf(model, f)) .collect(), is_generated: Some(model.is_generated), documentation: model.documentation.clone(), primary_key, unique_fields: model .indices .iter() .filter_map(|i| { (i.is_unique() && !i.defined_on_field).then(|| { i.fields .clone() .into_iter() .map(|f| f.path.into_iter().map(|(field, _)| field).collect::<Vec<_>>().join(".")) .collect() }) }) .collect(), unique_indexes: model .indices .iter() .filter_map(|i| { (i.is_unique() && !i.defined_on_field).then(|| UniqueIndex { name: i.name.clone(), //TODO(extended indices) add field options here fields: i .fields .clone() .into_iter() .map(|f| f.path.into_iter().map(|(field, _)| field).collect::<Vec<_>>().join(".")) .collect(), }) }) .collect(), } } fn field_to_dmmf(model: &dml::Model, field: &dml::Field) -> Field { let a_relation_field_is_based_on_this_field: bool = model .relation_fields() .any(|f| f.relation_info.fields.iter().any(|f| f == field.name())); Field { name: field.name().to_string(), kind: get_field_kind(field), is_required: *field.arity() == dml::FieldArity::Required || *field.arity() == dml::FieldArity::List, is_list: *field.arity() == dml::FieldArity::List, is_id: model.field_is_primary(field.name()), is_read_only: a_relation_field_is_based_on_this_field, has_default_value: field.default_value().is_some(), default: default_value_to_serde(&field.default_value().cloned()), is_unique: model.field_is_unique(field.name()), relation_name: get_relation_name(field), relation_from_fields: get_relation_from_fields(field), relation_to_fields: get_relation_to_fields(field), relation_on_delete: get_relation_delete_strategy(field), field_type: get_field_type(field), is_generated: Some(field.is_generated()), is_updated_at: Some(field.is_updated_at()), documentation: field.documentation().map(|v| v.to_owned()), } } fn get_field_kind(field: &dml::Field) -> String { match field.field_type() { dml::FieldType::CompositeType(_) => String::from("object"), dml::FieldType::Relation(_) => String::from("object"), dml::FieldType::Enum(_) => String::from("enum"), dml::FieldType::Scalar(_, _, _) => String::from("scalar"), dml::FieldType::Unsupported(_) => String::from("unsupported"), } } fn default_value_to_serde(dv_opt: &Option<dml::DefaultValue>) -> Option<serde_json::Value> { dv_opt.as_ref().map(|dv| match dv.kind() { dml::DefaultKind::Single(value) => prisma_value_to_serde(&value.clone()), dml::DefaultKind::Expression(vg) => { let args: Vec<_> = vg.args().iter().map(|(_, v)| v.clone()).collect(); function_to_serde(vg.name(), &args) } }) } fn prisma_value_to_serde(value: &PrismaValue) -> serde_json::Value { match value { PrismaValue::Boolean(val) => serde_json::Value::Bool(*val), PrismaValue::String(val) => serde_json::Value::String(val.clone()), PrismaValue::Enum(val) => serde_json::Value::String(val.clone()), PrismaValue::Float(val) => { serde_json::Value::Number(serde_json::Number::from_f64(val.to_f64().unwrap()).unwrap()) } PrismaValue::Int(val) => serde_json::Value::Number(serde_json::Number::from_f64(*val as f64).unwrap()), PrismaValue::BigInt(val) => serde_json::Value::String(val.to_string()), PrismaValue::DateTime(val) => serde_json::Value::String(val.to_rfc3339()), PrismaValue::Null => serde_json::Value::Null, PrismaValue::Uuid(val) => serde_json::Value::String(val.to_string()), PrismaValue::Json(val) => serde_json::Value::String(val.to_string()), PrismaValue::Xml(val) => serde_json::Value::String(val.to_string()), PrismaValue::List(value_vec) => serde_json::Value::Array(value_vec.iter().map(prisma_value_to_serde).collect()), PrismaValue::Bytes(b) => serde_json::Value::String(dml::prisma_value::encode_bytes(b)), PrismaValue::Object(pairs) => { let mut map = serde_json::Map::with_capacity(pairs.len()); pairs.iter().for_each(|(key, value)| { map.insert(key.clone(), prisma_value_to_serde(value)); }); serde_json::Value::Object(map) } } } fn function_to_serde(name: &str, args: &[PrismaValue]) -> serde_json::Value { let func = Function { name: String::from(name), args: args.iter().map(prisma_value_to_serde).collect(), }; serde_json::to_value(&func).expect("Failed to render function JSON") } fn get_field_type(field: &dml::Field) -> String { match &field.field_type() { dml::FieldType::CompositeType(t) => t.clone(), dml::FieldType::Relation(relation_info) => relation_info.to.clone(), dml::FieldType::Enum(t) => t.clone(), dml::FieldType::Unsupported(t) => t.clone(), dml::FieldType::Scalar(t, _, _) => type_to_string(t), } } fn type_to_string(scalar: &ScalarType) -> String { scalar.to_string() } fn get_relation_name(field: &dml::Field) -> Option<String> { match &field { dml::Field::RelationField(rf) => Some(rf.relation_info.name.clone()), _ => None, } } fn get_relation_from_fields(field: &dml::Field) -> Option<Vec<String>> { match &field { dml::Field::RelationField(rf) => Some(rf.relation_info.fields.clone()), _ => None, } } fn get_relation_to_fields(field: &dml::Field) -> Option<Vec<String>> { match &field { dml::Field::RelationField(rf) => Some(rf.relation_info.references.clone()), _ => None, } } fn get_relation_delete_strategy(field: &dml::Field) -> Option<String> { match &field { dml::Field::RelationField(rf) => rf.relation_info.on_delete.map(|ri| ri.to_string()), _ => None, } } #[cfg(test)] mod tests { use super::schema_to_dmmf; use datamodel::dml::Datamodel; use pretty_assertions::assert_eq; use std::fs; pub(crate) fn parse(datamodel_string: &str) -> Datamodel { match datamodel::parse_datamodel(datamodel_string) { Ok(s) => s.subject, Err(errs) => { panic!( "Datamodel parsing failed\n\n{}", errs.to_pretty_string("", datamodel_string) ) } } } fn render_to_dmmf(schema: &datamodel::dml::Datamodel) -> String { let dmmf = schema_to_dmmf(schema); serde_json::to_string_pretty(&dmmf).expect("Failed to render JSON") } #[test] fn test_dmmf_rendering() { let test_cases = vec![ "general", "functions", "source", "source_with_comments", "source_with_generator", "without_relation_name", "ignore", ]; for test_case in test_cases { println!("TESTING: {}", test_case); let datamodel_string = load_from_file(format!("{}.prisma", test_case).as_str()); let dml = parse(&datamodel_string); let dmmf_string = render_to_dmmf(&dml); assert_eq_json( &dmmf_string, &load_from_file(format!("{}.json", test_case).as_str()), test_case, ); } } #[track_caller] fn assert_eq_json(a: &str, b: &str, msg: &str) { let json_a: serde_json::Value = serde_json::from_str(a).expect("The String a was not valid JSON."); let json_b: serde_json::Value = serde_json::from_str(b).expect("The String b was not valid JSON."); assert_eq!(json_a, json_b, "{}", msg); } fn load_from_file(file: &str) -> String { let samples_folder_path = concat!(env!("CARGO_MANIFEST_DIR"), "/test_files"); fs::read_to_string(format!("{}/{}", samples_folder_path, file)).unwrap() } }
35.788406
120
0.584029
e8f1a47d10d92aae76764515ab29717bba4bc5e3
10,278
use crate::context::{Context, Tokens}; use proc_macro2::TokenStream; use quote::{quote, quote_spanned}; use syn::spanned::Spanned as _; struct Expander { ctx: Context, tokens: Tokens, } impl Expander { /// Expand on a struct. fn expand_struct( &mut self, input: &syn::DeriveInput, st: &syn::DataStruct, ) -> Option<TokenStream> { let (expanded, expected) = match &st.fields { syn::Fields::Unit => { let value = &self.tokens.value; let expanded = quote_spanned! { input.span() => #value::Unit => { Ok(Self) } #value::UnitStruct(..) => { Ok(Self) } }; (expanded, &self.tokens.unit_struct) } syn::Fields::Unnamed(unnamed) => { let expanded = &self.expand_unnamed(unnamed)?; let value = &self.tokens.value; let expanded = quote_spanned! { unnamed.span() => #value::Tuple(tuple) => { let tuple = tuple.borrow_ref()?; Ok(Self(#expanded)) } #value::TupleStruct(tuple) => { let tuple = tuple.borrow_ref()?; Ok(Self(#expanded)) } }; (expanded, &self.tokens.tuple) } syn::Fields::Named(named) => { let expanded = &self.expand_named(named)?; let value = &self.tokens.value; let expanded = quote_spanned! { named.span() => #value::Object(object) => { let object = object.borrow_ref()?; Ok(Self { #expanded }) } #value::Struct(object) => { let object = object.borrow_ref()?; Ok(Self { #expanded }) } }; (expanded, &self.tokens.object) } }; let ident = &input.ident; let value = &self.tokens.value; let vm_error = &self.tokens.vm_error; let from_value = &self.tokens.from_value; Some(quote! { impl #from_value for #ident { fn from_value(value: #value) -> ::std::result::Result<Self, #vm_error> { match value { #expanded actual => { Err(#vm_error::expected::<#expected>(actual.type_info()?)) } } } } }) } /// Expand on a struct. fn expand_enum(&mut self, input: &syn::DeriveInput, en: &syn::DataEnum) -> Option<TokenStream> { let mut unit_matches = Vec::new(); let mut unnamed_matches = Vec::new(); let mut named_matches = Vec::new(); for variant in &en.variants { let ident = &variant.ident; let lit_str = syn::LitStr::new(&ident.to_string(), variant.span()); match &variant.fields { syn::Fields::Unit => { unit_matches.push(quote_spanned! { variant.span() => #lit_str => Ok(Self::#ident) }); } syn::Fields::Unnamed(named) => { let expanded = self.expand_unnamed(named)?; unnamed_matches.push(quote_spanned! { variant.span() => #lit_str => { Ok( Self::#ident ( #expanded ) ) } }); } syn::Fields::Named(named) => { let expanded = self.expand_named(named)?; named_matches.push(quote_spanned! { variant.span() => #lit_str => { Ok( Self::#ident { #expanded } ) } }); } } } let from_value = &self.tokens.from_value; let variant_data = &self.tokens.variant_data; let ident = &input.ident; let value = &self.tokens.value; let vm_error = &self.tokens.vm_error; let vm_error_kind = &self.tokens.vm_error_kind; let variant = quote_spanned! { input.span() => #value::Variant(variant) => { let variant = variant.borrow_ref()?; let mut it = variant.rtti().item.iter(); let name = match it.next_back_str() { Some(name) => name, None => return Err(#vm_error::from(#vm_error_kind::MissingVariantName)), }; match variant.data() { #variant_data::Unit => match name { #(#unit_matches,)* name => { return Err(#vm_error::from(#vm_error_kind::MissingVariant { name: name.into() })) } }, #variant_data::Tuple(tuple) => match name { #(#unnamed_matches)* name => { return Err(#vm_error::from(#vm_error_kind::MissingVariant { name: name.into() })) } }, #variant_data::Struct(object) => match name { #(#named_matches)* name => { return Err(#vm_error::from(#vm_error_kind::MissingVariant { name: name.into() })) } }, } } }; Some(quote_spanned! { input.span() => impl #from_value for #ident { fn from_value(value: #value) -> ::std::result::Result<Self, #vm_error> { match value { #variant, actual => { Err(#vm_error::from(#vm_error_kind::ExpectedVariant { actual: actual.type_info()?, })) } } } } }) } /// Get a field identifier. fn field_ident<'a>(&mut self, field: &'a syn::Field) -> Option<&'a syn::Ident> { match &field.ident { Some(ident) => Some(ident), None => { self.ctx.errors.push(syn::Error::new_spanned( field, "unnamed fields are not supported", )); None } } } /// Expand unnamed fields. fn expand_unnamed(&mut self, unnamed: &syn::FieldsUnnamed) -> Option<TokenStream> { let mut from_values = Vec::new(); for (index, field) in unnamed.unnamed.iter().enumerate() { let _ = self.ctx.field_attrs(&field.attrs)?; let from_value = &self.tokens.from_value; let vm_error = &self.tokens.vm_error; let vm_error_kind = &self.tokens.vm_error_kind; let from_value = quote_spanned! { field.span() => #from_value::from_value(value.clone())? }; from_values.push(quote_spanned! { field.span() => match tuple.get(#index) { Some(value) => #from_value, None => { return Err(#vm_error::from(#vm_error_kind::MissingTupleIndex { target: std::any::type_name::<Self>(), index: #index, })); } } }); } Some(quote_spanned!(unnamed.span() => #(#from_values),*)) } /// Expand named fields. fn expand_named(&mut self, named: &syn::FieldsNamed) -> Option<TokenStream> { let mut from_values = Vec::new(); for field in &named.named { let ident = self.field_ident(field)?; let _ = self.ctx.field_attrs(&field.attrs)?; let name = &syn::LitStr::new(&ident.to_string(), ident.span()); let from_value = &self.tokens.from_value; let vm_error = &self.tokens.vm_error; let vm_error_kind = &self.tokens.vm_error_kind; let from_value = quote_spanned! { field.span() => #from_value::from_value(value.clone())? }; from_values.push(quote_spanned! { field.span() => #ident: match object.get(#name) { Some(value) => #from_value, None => { return Err(#vm_error::from(#vm_error_kind::MissingStructField { target: std::any::type_name::<Self>(), name: #name, })); } } }); } Some(quote_spanned!(named.span() => #(#from_values),* )) } } pub(super) fn expand(input: &syn::DeriveInput) -> Result<TokenStream, Vec<syn::Error>> { let mut ctx = Context::new(); let attrs = match ctx.type_attrs(&input.attrs) { Some(attrs) => attrs, None => { return Err(ctx.errors); } }; let tokens = ctx.tokens_with_module(attrs.module.as_ref()); let mut expander = Expander { ctx, tokens }; match &input.data { syn::Data::Struct(st) => { if let Some(expanded) = expander.expand_struct(input, st) { return Ok(expanded); } } syn::Data::Enum(en) => { if let Some(expanded) = expander.expand_enum(input, en) { return Ok(expanded); } } syn::Data::Union(un) => { expander.ctx.errors.push(syn::Error::new_spanned( un.union_token, "not supported on unions", )); } } Err(expander.ctx.errors) }
34.146179
109
0.43238
640c9f3636d4b89262cf19045c0c9d346504ca53
11,005
#![allow(missing_docs, nonstandard_style)] use crate::ffi::{OsStr, OsString}; use crate::io::ErrorKind; use crate::os::windows::ffi::{OsStrExt, OsStringExt}; use crate::path::PathBuf; use crate::ptr; use crate::time::Duration; pub use self::rand::hashmap_random_keys; pub use libc::strlen; #[macro_use] pub mod compat; pub mod alloc; pub mod args; pub mod c; pub mod cmath; pub mod condvar; pub mod env; pub mod ext; pub mod fast_thread_local; pub mod fs; pub mod handle; pub mod io; pub mod memchr; pub mod mutex; pub mod net; pub mod os; pub mod os_str; pub mod path; pub mod pipe; pub mod process; pub mod rand; pub mod rwlock; pub mod thread; pub mod thread_local; pub mod time; cfg_if::cfg_if! { if #[cfg(not(target_vendor = "uwp"))] { pub mod stdio; pub mod stack_overflow; } else { pub mod stdio_uwp; pub mod stack_overflow_uwp; pub use self::stdio_uwp as stdio; pub use self::stack_overflow_uwp as stack_overflow; } } #[cfg(not(test))] pub fn init() {} pub fn decode_error_kind(errno: i32) -> ErrorKind { match errno as c::DWORD { c::ERROR_ACCESS_DENIED => return ErrorKind::PermissionDenied, c::ERROR_ALREADY_EXISTS => return ErrorKind::AlreadyExists, c::ERROR_FILE_EXISTS => return ErrorKind::AlreadyExists, c::ERROR_BROKEN_PIPE => return ErrorKind::BrokenPipe, c::ERROR_FILE_NOT_FOUND => return ErrorKind::NotFound, c::ERROR_PATH_NOT_FOUND => return ErrorKind::NotFound, c::ERROR_NO_DATA => return ErrorKind::BrokenPipe, c::ERROR_SEM_TIMEOUT | c::WAIT_TIMEOUT | c::ERROR_DRIVER_CANCEL_TIMEOUT | c::ERROR_OPERATION_ABORTED | c::ERROR_SERVICE_REQUEST_TIMEOUT | c::ERROR_COUNTER_TIMEOUT | c::ERROR_TIMEOUT | c::ERROR_RESOURCE_CALL_TIMED_OUT | c::ERROR_CTX_MODEM_RESPONSE_TIMEOUT | c::ERROR_CTX_CLIENT_QUERY_TIMEOUT | c::FRS_ERR_SYSVOL_POPULATE_TIMEOUT | c::ERROR_DS_TIMELIMIT_EXCEEDED | c::DNS_ERROR_RECORD_TIMED_OUT | c::ERROR_IPSEC_IKE_TIMED_OUT | c::ERROR_RUNLEVEL_SWITCH_TIMEOUT | c::ERROR_RUNLEVEL_SWITCH_AGENT_TIMEOUT => return ErrorKind::TimedOut, _ => {} } match errno { c::WSAEACCES => ErrorKind::PermissionDenied, c::WSAEADDRINUSE => ErrorKind::AddrInUse, c::WSAEADDRNOTAVAIL => ErrorKind::AddrNotAvailable, c::WSAECONNABORTED => ErrorKind::ConnectionAborted, c::WSAECONNREFUSED => ErrorKind::ConnectionRefused, c::WSAECONNRESET => ErrorKind::ConnectionReset, c::WSAEINVAL => ErrorKind::InvalidInput, c::WSAENOTCONN => ErrorKind::NotConnected, c::WSAEWOULDBLOCK => ErrorKind::WouldBlock, c::WSAETIMEDOUT => ErrorKind::TimedOut, _ => ErrorKind::Other, } } pub fn unrolled_find_u16s(needle: u16, haystack: &[u16]) -> Option<usize> { let ptr = haystack.as_ptr(); let mut len = haystack.len(); let mut start = &haystack[..]; // For performance reasons unfold the loop eight times. while len >= 8 { if start[0] == needle { return Some((start.as_ptr() as usize - ptr as usize) / 2); } if start[1] == needle { return Some((start[1..].as_ptr() as usize - ptr as usize) / 2); } if start[2] == needle { return Some((start[2..].as_ptr() as usize - ptr as usize) / 2); } if start[3] == needle { return Some((start[3..].as_ptr() as usize - ptr as usize) / 2); } if start[4] == needle { return Some((start[4..].as_ptr() as usize - ptr as usize) / 2); } if start[5] == needle { return Some((start[5..].as_ptr() as usize - ptr as usize) / 2); } if start[6] == needle { return Some((start[6..].as_ptr() as usize - ptr as usize) / 2); } if start[7] == needle { return Some((start[7..].as_ptr() as usize - ptr as usize) / 2); } start = &start[8..]; len -= 8; } for (i, c) in start.iter().enumerate() { if *c == needle { return Some((start.as_ptr() as usize - ptr as usize) / 2 + i); } } None } pub fn to_u16s<S: AsRef<OsStr>>(s: S) -> crate::io::Result<Vec<u16>> { fn inner(s: &OsStr) -> crate::io::Result<Vec<u16>> { let mut maybe_result: Vec<u16> = s.encode_wide().collect(); if unrolled_find_u16s(0, &maybe_result).is_some() { return Err(crate::io::Error::new( ErrorKind::InvalidInput, "strings passed to WinAPI cannot contain NULs", )); } maybe_result.push(0); Ok(maybe_result) } inner(s.as_ref()) } // Many Windows APIs follow a pattern of where we hand a buffer and then they // will report back to us how large the buffer should be or how many bytes // currently reside in the buffer. This function is an abstraction over these // functions by making them easier to call. // // The first callback, `f1`, is yielded a (pointer, len) pair which can be // passed to a syscall. The `ptr` is valid for `len` items (u16 in this case). // The closure is expected to return what the syscall returns which will be // interpreted by this function to determine if the syscall needs to be invoked // again (with more buffer space). // // Once the syscall has completed (errors bail out early) the second closure is // yielded the data which has been read from the syscall. The return value // from this closure is then the return value of the function. fn fill_utf16_buf<F1, F2, T>(mut f1: F1, f2: F2) -> crate::io::Result<T> where F1: FnMut(*mut u16, c::DWORD) -> c::DWORD, F2: FnOnce(&[u16]) -> T, { // Start off with a stack buf but then spill over to the heap if we end up // needing more space. let mut stack_buf = [0u16; 512]; let mut heap_buf = Vec::new(); unsafe { let mut n = stack_buf.len(); loop { let buf = if n <= stack_buf.len() { &mut stack_buf[..] } else { let extra = n - heap_buf.len(); heap_buf.reserve(extra); heap_buf.set_len(n); &mut heap_buf[..] }; // This function is typically called on windows API functions which // will return the correct length of the string, but these functions // also return the `0` on error. In some cases, however, the // returned "correct length" may actually be 0! // // To handle this case we call `SetLastError` to reset it to 0 and // then check it again if we get the "0 error value". If the "last // error" is still 0 then we interpret it as a 0 length buffer and // not an actual error. c::SetLastError(0); let k = match f1(buf.as_mut_ptr(), n as c::DWORD) { 0 if c::GetLastError() == 0 => 0, 0 => return Err(crate::io::Error::last_os_error()), n => n, } as usize; if k == n && c::GetLastError() == c::ERROR_INSUFFICIENT_BUFFER { n *= 2; } else if k >= n { n = k; } else { return Ok(f2(&buf[..k])); } } } } fn os2path(s: &[u16]) -> PathBuf { PathBuf::from(OsString::from_wide(s)) } #[allow(dead_code)] // Only used in backtrace::gnu::get_executable_filename() fn wide_char_to_multi_byte( code_page: u32, flags: u32, s: &[u16], no_default_char: bool, ) -> crate::io::Result<Vec<i8>> { unsafe { let mut size = c::WideCharToMultiByte( code_page, flags, s.as_ptr(), s.len() as i32, ptr::null_mut(), 0, ptr::null(), ptr::null_mut(), ); if size == 0 { return Err(crate::io::Error::last_os_error()); } let mut buf = Vec::with_capacity(size as usize); buf.set_len(size as usize); let mut used_default_char = c::FALSE; size = c::WideCharToMultiByte( code_page, flags, s.as_ptr(), s.len() as i32, buf.as_mut_ptr(), buf.len() as i32, ptr::null(), if no_default_char { &mut used_default_char } else { ptr::null_mut() }, ); if size == 0 { return Err(crate::io::Error::last_os_error()); } if no_default_char && used_default_char == c::TRUE { return Err(crate::io::Error::new( crate::io::ErrorKind::InvalidData, "string cannot be converted to requested code page", )); } buf.set_len(size as usize); Ok(buf) } } pub fn truncate_utf16_at_nul(v: &[u16]) -> &[u16] { match unrolled_find_u16s(0, v) { // don't include the 0 Some(i) => &v[..i], None => v, } } pub trait IsZero { fn is_zero(&self) -> bool; } macro_rules! impl_is_zero { ($($t:ident)*) => ($(impl IsZero for $t { fn is_zero(&self) -> bool { *self == 0 } })*) } impl_is_zero! { i8 i16 i32 i64 isize u8 u16 u32 u64 usize } pub fn cvt<I: IsZero>(i: I) -> crate::io::Result<I> { if i.is_zero() { Err(crate::io::Error::last_os_error()) } else { Ok(i) } } pub fn dur2timeout(dur: Duration) -> c::DWORD { // Note that a duration is a (u64, u32) (seconds, nanoseconds) pair, and the // timeouts in windows APIs are typically u32 milliseconds. To translate, we // have two pieces to take care of: // // * Nanosecond precision is rounded up // * Greater than u32::MAX milliseconds (50 days) is rounded up to INFINITE // (never time out). dur.as_secs() .checked_mul(1000) .and_then(|ms| ms.checked_add((dur.subsec_nanos() as u64) / 1_000_000)) .and_then(|ms| ms.checked_add(if dur.subsec_nanos() % 1_000_000 > 0 { 1 } else { 0 })) .map(|ms| if ms > <c::DWORD>::MAX as u64 { c::INFINITE } else { ms as c::DWORD }) .unwrap_or(c::INFINITE) } // On Windows, use the processor-specific __fastfail mechanism. In Windows 8 // and later, this will terminate the process immediately without running any // in-process exception handlers. In earlier versions of Windows, this // sequence of instructions will be treated as an access violation, // terminating the process but without necessarily bypassing all exception // handlers. // // https://docs.microsoft.com/en-us/cpp/intrinsics/fastfail #[allow(unreachable_code)] pub fn abort_internal() -> ! { #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] unsafe { llvm_asm!("int $$0x29" :: "{ecx}"(7) ::: volatile); // 7 is FAST_FAIL_FATAL_APP_EXIT crate::intrinsics::unreachable(); } crate::intrinsics::abort(); }
32.949102
94
0.584462
22b0d723aad8c8c7ff13434929db5029a6f63508
281
// compile-flags: -Zmiri-disable-isolation extern { pub fn chdir(dir: *const u8) -> i32; } fn main() { let path = vec![0xc3u8, 0x28, 0]; // test that `chdir` errors with invalid utf-8 path unsafe { chdir(path.as_ptr()) }; //~ ERROR is not a valid utf-8 string }
23.416667
75
0.619217
165ff82622f7e64581e03ce0a79d35062b5cc5a6
3,773
// Copyright 2020 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. pub mod fvm; pub mod operator; use { fidl_fuchsia_hardware_block_partition::Guid, fuchsia_async::{Task, TimeoutExt}, fuchsia_zircon::Vmo, futures::{future::join_all, SinkExt}, fvm::Volume, log::debug, operator::VolumeOperator, rand::{rngs::SmallRng, Rng, SeedableRng}, std::{thread::sleep, time::Duration}, stress_test_utils::TestInstance, }; // All partitions in this test have their type set to this arbitrary GUID. const TYPE_GUID: Guid = Guid { value: [0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf], }; pub async fn run_test( mut rng: SmallRng, ramdisk_block_count: u64, fvm_slice_size: u64, ramdisk_block_size: u64, num_volumes: u64, max_slices_in_extend: u64, max_vslice_count: u64, disconnect_secs: u64, rebind_probability: f64, time_limit_secs: Option<u64>, num_operations: Option<u64>, ) { let vmo_size = ramdisk_block_count * ramdisk_block_size; // Create the VMO that the ramdisk is backed by let vmo = Vmo::create(vmo_size).unwrap(); // Initialize the ramdisk and setup FVM. let mut instance = TestInstance::init(&vmo, fvm_slice_size, ramdisk_block_size).await; let mut tasks = vec![]; let mut senders = vec![]; for i in 0..num_volumes { // Make a new RNG for this volume let volume_rng_seed: u128 = rng.gen(); let volume_rng = SmallRng::from_seed(volume_rng_seed.to_le_bytes()); // Create the new volume let volume_name = format!("testpart-{}", i); let instance_guid = instance.new_volume(&volume_name, TYPE_GUID).await; // Connect to the volume let (volume, sender) = Volume::new(instance_guid, fvm_slice_size).await; // Create the operator let operator = VolumeOperator::new(volume, volume_rng, max_slices_in_extend, max_vslice_count); // Start the operator let task = operator.run(num_operations.unwrap_or(u64::MAX)); tasks.push(task); senders.push(sender); } // Send the initial block path to all operators for sender in senders.iter_mut() { let _ = sender.send(instance.block_path()).await; } // Create the disconnection task in a new thread if disconnect_secs > 0 { Task::blocking(async move { loop { sleep(Duration::from_secs(disconnect_secs)); if rng.gen_bool(rebind_probability) { debug!("Rebinding FVM driver"); instance.rebind_fvm_driver().await; } else { // Crash the old instance and replace it with a new instance. // This will cause the component tree to be taken down abruptly. debug!("Killing component manager"); instance.kill_component_manager(); instance = TestInstance::existing(&vmo, ramdisk_block_size).await; } // Give the new block path to the operators. // Ignore the result because some operators may have completed. let path = instance.block_path(); for sender in senders.iter_mut() { let _ = sender.send(path.clone()).await; } } }) .detach(); } let operator_tasks = join_all(tasks); if let Some(time_limit_secs) = time_limit_secs { operator_tasks.on_timeout(Duration::from_secs(time_limit_secs), || vec![]).await; } else { operator_tasks.await; }; }
33.096491
92
0.618606
48632e121a5bd5d9797aaec39dec041a55b7ccd3
6,556
use super::DataFrame; use super::Series; use std::collections::HashMap; use wasm_bindgen::prelude::*; macro_rules! operation { ($fn_name: ident) => { #[wasm_bindgen] impl DataFrame { /// Returns $fn_name of given column pub fn $fn_name(&self, col: JsValue) -> f64 { let col_name: String = serde_wasm_bindgen::from_value(col).unwrap(); if self.data.contains_key(&col_name) { match &self.data[&col_name] { Series::Floats(value) => { return value.$fn_name(); } Series::Integers(value) => { return value.$fn_name() as f64; } Series::Strings(_value) => { panic!("min function not supported for strings"); } } } else { panic!("Column name {} not found", col_name); } } } }; } operation!(min); operation!(max); operation!(mean); operation!(median); #[wasm_bindgen] impl DataFrame { /// Returns minimun of all columns #[wasm_bindgen(js_name = minColumns)] pub fn min_colunmns(&self) -> JsValue { let mut res: HashMap<String, f64> = HashMap::new(); for (name, ser) in &self.data { match ser { Series::Floats(value) => { res.entry(name.clone()).or_insert(value.min()); } Series::Integers(value) => { res.entry(name.clone()).or_insert(value.min() as f64); } _ => {} } } serde_wasm_bindgen::to_value(&res).unwrap() } /// Returns maximum elements of all colunms #[wasm_bindgen(js_name = maxColumns)] pub fn max_columns(&self) -> JsValue { let mut res: HashMap<String, f64> = HashMap::new(); for (name, ser) in &self.data { match &ser { Series::Floats(value) => { res.entry(name.clone()).or_insert(value.max()); } Series::Integers(value) => { res.entry(name.clone()).or_insert(value.max() as f64); } _ => {} } } serde_wasm_bindgen::to_value(&res).unwrap() } /// Returns mean of all columns #[wasm_bindgen(js_name = meanColumns)] pub fn mean_columns(&self) -> JsValue { let mut res: HashMap<String, f64> = HashMap::new(); for (name, ser) in &self.data { match ser { Series::Floats(value) => { res.entry(name.clone()).or_insert(value.mean()); } Series::Integers(value) => { res.entry(name.clone()).or_insert(value.mean()); } _ => {} } } serde_wasm_bindgen::to_value(&res).unwrap() } /// Returns median of all columns #[wasm_bindgen(js_name = medianColumns)] pub fn median_columns(&self) -> JsValue { let mut res: HashMap<String, f64> = HashMap::new(); for (name, ser) in &self.data { match &ser { Series::Floats(value) => { res.entry(name.clone()).or_insert(value.median()); } Series::Integers(value) => { res.entry(name.clone()).or_insert(value.median()); } _ => {} } } serde_wasm_bindgen::to_value(&res).unwrap() } /// Returns variance of given column pub fn variance(&self, col: JsValue, degree_of_freedom: f64) -> f64 { let col_name: String = serde_wasm_bindgen::from_value(col).unwrap(); if self.data.contains_key(&col_name) { match &self.data[&col_name] { Series::Floats(value) => return value.variance(degree_of_freedom), Series::Integers(value) => return value.variance(degree_of_freedom), Series::Strings(_value) => { panic!("Varinance not supported for Strings"); } } } panic!("Column name {} not found", col_name) } /// Returns variance of columns #[wasm_bindgen(js_name = varianceColumns)] pub fn variance_columns(&self, degree_of_freedom: f64) -> JsValue { let mut res: HashMap<String, f64> = HashMap::new(); for (name, ser) in &self.data { match &ser { Series::Floats(value) => { res.entry(name.clone()) .or_insert(value.variance(degree_of_freedom)); } Series::Integers(value) => { res.entry(name.clone()) .or_insert(value.variance(degree_of_freedom)); } _ => {} } } serde_wasm_bindgen::to_value(&res).unwrap() } /// Returns standard deviation of given column #[wasm_bindgen(js_name = standardDeviation)] pub fn std_dev(&self, col: JsValue, degree_of_freedom: f64) -> f64 { let col_name: String = serde_wasm_bindgen::from_value(col).unwrap(); if self.data.contains_key(&col_name) { match &self.data[&col_name] { Series::Floats(value) => return value.std_dev(degree_of_freedom), Series::Integers(value) => return value.std_dev(degree_of_freedom), Series::Strings(_value) => { panic!("Standard deviation not supported for strings"); } } } panic!("Column name {} not found", col_name) } #[wasm_bindgen(js_name = standardDeviationColumns)] pub fn std_dev_columns(&self, degree_of_freedom: f64) -> JsValue { let mut res: HashMap<String, f64> = HashMap::new(); for (name, ser) in &self.data { match &ser { Series::Floats(value) => { res.entry(name.clone()) .or_insert(value.std_dev(degree_of_freedom)); } Series::Integers(value) => { res.entry(name.clone()) .or_insert(value.std_dev(degree_of_freedom)); } _ => {} } } serde_wasm_bindgen::to_value(&res).unwrap() } }
33.111111
84
0.489933
e69319bd1a0cd7619ffd82d49051af9c46ef3dcc
13,225
#[doc = r" Value read from the register"] pub struct R { bits: u32, } #[doc = r" Value to write to the register"] pub struct W { bits: u32, } impl super::UPRST { #[doc = r" Modifies the contents of the register"] #[inline] pub fn modify<F>(&self, f: F) where for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W, { let bits = self.register.get(); let r = R { bits: bits }; let mut w = W { bits: bits }; f(&r, &mut w); self.register.set(w.bits); } #[doc = r" Reads the contents of the register"] #[inline] pub fn read(&self) -> R { R { bits: self.register.get(), } } #[doc = r" Writes to the register"] #[inline] pub fn write<F>(&self, f: F) where F: FnOnce(&mut W) -> &mut W, { let mut w = W::reset_value(); f(&mut w); self.register.set(w.bits); } #[doc = r" Writes the reset value to the register"] #[inline] pub fn reset(&self) { self.write(|w| w) } } #[doc = r" Value of the field"] pub struct PEN0R { bits: bool, } impl PEN0R { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct PEN1R { bits: bool, } impl PEN1R { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct PEN2R { bits: bool, } impl PEN2R { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct PEN3R { bits: bool, } impl PEN3R { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct PEN4R { bits: bool, } impl PEN4R { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct PEN5R { bits: bool, } impl PEN5R { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct PEN6R { bits: bool, } impl PEN6R { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct PEN7R { bits: bool, } impl PEN7R { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Proxy"] pub struct _PEN0W<'a> { w: &'a mut W, } impl<'a> _PEN0W<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 0; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _PEN1W<'a> { w: &'a mut W, } impl<'a> _PEN1W<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 1; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _PEN2W<'a> { w: &'a mut W, } impl<'a> _PEN2W<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 2; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _PEN3W<'a> { w: &'a mut W, } impl<'a> _PEN3W<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 3; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _PEN4W<'a> { w: &'a mut W, } impl<'a> _PEN4W<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 4; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _PEN5W<'a> { w: &'a mut W, } impl<'a> _PEN5W<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 5; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _PEN6W<'a> { w: &'a mut W, } impl<'a> _PEN6W<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 6; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _PEN7W<'a> { w: &'a mut W, } impl<'a> _PEN7W<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 7; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } impl R { #[doc = r" Value of the register as raw bits"] #[inline] pub fn bits(&self) -> u32 { self.bits } #[doc = "Bit 0 - Pipe0 Enable"] #[inline] pub fn pen0(&self) -> PEN0R { let bits = { const MASK: bool = true; const OFFSET: u8 = 0; ((self.bits >> OFFSET) & MASK as u32) != 0 }; PEN0R { bits } } #[doc = "Bit 1 - Pipe1 Enable"] #[inline] pub fn pen1(&self) -> PEN1R { let bits = { const MASK: bool = true; const OFFSET: u8 = 1; ((self.bits >> OFFSET) & MASK as u32) != 0 }; PEN1R { bits } } #[doc = "Bit 2 - Pipe2 Enable"] #[inline] pub fn pen2(&self) -> PEN2R { let bits = { const MASK: bool = true; const OFFSET: u8 = 2; ((self.bits >> OFFSET) & MASK as u32) != 0 }; PEN2R { bits } } #[doc = "Bit 3 - Pipe3 Enable"] #[inline] pub fn pen3(&self) -> PEN3R { let bits = { const MASK: bool = true; const OFFSET: u8 = 3; ((self.bits >> OFFSET) & MASK as u32) != 0 }; PEN3R { bits } } #[doc = "Bit 4 - Pipe4 Enable"] #[inline] pub fn pen4(&self) -> PEN4R { let bits = { const MASK: bool = true; const OFFSET: u8 = 4; ((self.bits >> OFFSET) & MASK as u32) != 0 }; PEN4R { bits } } #[doc = "Bit 5 - Pipe5 Enable"] #[inline] pub fn pen5(&self) -> PEN5R { let bits = { const MASK: bool = true; const OFFSET: u8 = 5; ((self.bits >> OFFSET) & MASK as u32) != 0 }; PEN5R { bits } } #[doc = "Bit 6 - Pipe6 Enable"] #[inline] pub fn pen6(&self) -> PEN6R { let bits = { const MASK: bool = true; const OFFSET: u8 = 6; ((self.bits >> OFFSET) & MASK as u32) != 0 }; PEN6R { bits } } #[doc = "Bit 7 - Pipe7 Enable"] #[inline] pub fn pen7(&self) -> PEN7R { let bits = { const MASK: bool = true; const OFFSET: u8 = 7; ((self.bits >> OFFSET) & MASK as u32) != 0 }; PEN7R { bits } } } impl W { #[doc = r" Reset value of the register"] #[inline] pub fn reset_value() -> W { W { bits: 0 } } #[doc = r" Writes raw bits to the register"] #[inline] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } #[doc = "Bit 0 - Pipe0 Enable"] #[inline] pub fn pen0(&mut self) -> _PEN0W { _PEN0W { w: self } } #[doc = "Bit 1 - Pipe1 Enable"] #[inline] pub fn pen1(&mut self) -> _PEN1W { _PEN1W { w: self } } #[doc = "Bit 2 - Pipe2 Enable"] #[inline] pub fn pen2(&mut self) -> _PEN2W { _PEN2W { w: self } } #[doc = "Bit 3 - Pipe3 Enable"] #[inline] pub fn pen3(&mut self) -> _PEN3W { _PEN3W { w: self } } #[doc = "Bit 4 - Pipe4 Enable"] #[inline] pub fn pen4(&mut self) -> _PEN4W { _PEN4W { w: self } } #[doc = "Bit 5 - Pipe5 Enable"] #[inline] pub fn pen5(&mut self) -> _PEN5W { _PEN5W { w: self } } #[doc = "Bit 6 - Pipe6 Enable"] #[inline] pub fn pen6(&mut self) -> _PEN6W { _PEN6W { w: self } } #[doc = "Bit 7 - Pipe7 Enable"] #[inline] pub fn pen7(&mut self) -> _PEN7W { _PEN7W { w: self } } }
24.627561
59
0.484991
e464b77f36c2ead21a73be6a2e3f0e160ce3c331
474
use image::Rgba; use nalgebra::Matrix4; use super::vertex::Vertex; pub struct Triangle { pub a: Vertex, pub b: Vertex, pub c: Vertex, pub colour: Rgba<u8>, } impl Triangle { /// pub fn transform(&self, transform: &Matrix4<f32>) -> Triangle { Triangle { a: self.a.transform(&transform), b: self.b.transform(&transform), c: self.c.transform(&transform), colour: self.colour, } } }
19.75
67
0.559072
d97788e97b2ed46ae7d386d78581618ae3370ac4
5,380
//! Lock disabling IRQs while held //! //! See the [sync] module documentation. //! //! [sync]: crate::sync use crate::i386::instructions::interrupts; use spin::{Mutex as SpinLock, MutexGuard as SpinLockGuard}; use core::fmt; use core::mem::ManuallyDrop; use core::ops::{Deref, DerefMut}; use core::sync::atomic::Ordering; use super::INTERRUPT_DISARM; /// Permanently disables the interrupts. Forever. /// /// Only used by the panic handlers! /// /// Simply sets [INTERRUPT_DISARM]. pub unsafe fn permanently_disable_interrupts() { INTERRUPT_DISARM.store(true, Ordering::SeqCst); unsafe { interrupts::cli() } } /// SpinLock that disables IRQ. /// /// # Description /// /// This type behaves like a spinlock from the Linux crate. For simplicity of /// use and implementation. The mapping is as follows: /// /// - `lock` behaves like a `spinlock_irqsave`. It returns a guard. /// - Dropping the guard behaves like `spinlock_irqrestore` /// /// This means that locking a spinlock disables interrupts until all spinlock /// guards have been dropped. /// /// A note on reordering: reordering lock drops is prohibited and doing so will /// result in UB. // // TODO: Find sane design for SpinLockIRQ safety // BODY: Currently, SpinLockIRQ API is unsound. If the guards are dropped in // BODY: the wrong order, it may cause IF to be reset too early. // BODY: // BODY: Ideally, we would need a way to prevent the guard variable to be // BODY: reassigned. AKA: prevent moving. Note that this is different from what // BODY: the Pin API solves. The Pin API is about locking a variable in one // BODY: memory location, but its binding may still be moved and dropped. // BODY: Unfortunately, Rust does not have a way to express that a value cannot // BODY: be reassigned. // BODY: // BODY: Another possibility would be to switch to a callback API. This would // BODY: solve the problem, but the scheduler would be unable to consume such // BODY: locks. Maybe we could have an unsafe "scheduler_relock" function that // BODY: may only be called from the scheduler? pub struct SpinLockIRQ<T: ?Sized> { /// SpinLock we wrap. internal: SpinLock<T> } impl<T> SpinLockIRQ<T> { /// Creates a new spinlockirq wrapping the supplied data. pub const fn new(internal: T) -> SpinLockIRQ<T> { SpinLockIRQ { internal: SpinLock::new(internal) } } /// Consumes this SpinLockIRQ, returning the underlying data. pub fn into_inner(self) -> T { self.internal.into_inner() } } impl<T: ?Sized> SpinLockIRQ<T> { /// Disables interrupts and locks the mutex. pub fn lock(&self) -> SpinLockIRQGuard<T> { if INTERRUPT_DISARM.load(Ordering::SeqCst) { let internalguard = self.internal.lock(); SpinLockIRQGuard(ManuallyDrop::new(internalguard), false) } else { // Save current interrupt state. let saved_intpt_flag = interrupts::are_enabled(); // Disable interruptions unsafe { interrupts::cli(); } let internalguard = self.internal.lock(); SpinLockIRQGuard(ManuallyDrop::new(internalguard), saved_intpt_flag) } } /// Disables interrupts and locks the mutex. pub fn try_lock(&self) -> Option<SpinLockIRQGuard<T>> { if INTERRUPT_DISARM.load(Ordering::SeqCst) { self.internal.try_lock() .map(|v| SpinLockIRQGuard(ManuallyDrop::new(v), false)) } else { // Save current interrupt state. let saved_intpt_flag = interrupts::are_enabled(); // Disable interruptions unsafe { interrupts::cli(); } // Lock spinlock let internalguard = self.internal.try_lock(); if let Some(internalguard) = internalguard { // if lock is successful, return guard. Some(SpinLockIRQGuard(ManuallyDrop::new(internalguard), saved_intpt_flag)) } else { // Else, restore interrupt state if saved_intpt_flag { unsafe { interrupts::sti(); } } None } } } /// Force unlocks the lock. pub unsafe fn force_unlock(&self) { self.internal.force_unlock() } } impl<T: fmt::Debug> fmt::Debug for SpinLockIRQ<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { if let Some(v) = self.try_lock() { f.debug_struct("SpinLockIRQ") .field("data", &v) .finish() } else { write!(f, "SpinLockIRQ {{ <locked> }}") } } } /// The SpinLockIrq lock guard. #[derive(Debug)] pub struct SpinLockIRQGuard<'a, T: ?Sized>(ManuallyDrop<SpinLockGuard<'a, T>>, bool); impl<'a, T: ?Sized + 'a> Drop for SpinLockIRQGuard<'a, T> { fn drop(&mut self) { // TODO: Spin release // unlock unsafe { ManuallyDrop::drop(&mut self.0); } // Restore irq if self.1 { unsafe { interrupts::sti(); } } // TODO: Enable preempt } } impl<'a, T: ?Sized + 'a> Deref for SpinLockIRQGuard<'a, T> { type Target = T; fn deref(&self) -> &T { &*self.0 } } impl<'a, T: ?Sized + 'a> DerefMut for SpinLockIRQGuard<'a, T> { fn deref_mut(&mut self) -> &mut T { &mut *self.0 } }
31.461988
90
0.618401
5bdfcaaa516c1865060286f8d7f5efe98a3ef766
3,547
#![allow(non_snake_case, non_upper_case_globals)] #![allow(non_camel_case_types)] //! COMP1 //! //! Used by: stm32h735, stm32h743, stm32h743v, stm32h747cm4, stm32h747cm7, stm32h753, stm32h753v #[cfg(not(feature = "nosync"))] pub use crate::stm32h7::peripherals::comp1::Instance; pub use crate::stm32h7::peripherals::comp1::{RegisterBlock, ResetValues}; pub use crate::stm32h7::peripherals::comp1::{CFGR1, CFGR2, ICFR, OR, SR}; /// Access functions for the COMP1 peripheral instance pub mod COMP1 { use super::ResetValues; #[cfg(not(feature = "nosync"))] use super::Instance; #[cfg(not(feature = "nosync"))] const INSTANCE: Instance = Instance { addr: 0x58003800, _marker: ::core::marker::PhantomData, }; /// Reset values for each field in COMP1 pub const reset: ResetValues = ResetValues { SR: 0x00000000, ICFR: 0x00000000, OR: 0x00000000, CFGR1: 0x00000000, CFGR2: 0x00000000, }; #[cfg(not(feature = "nosync"))] #[allow(renamed_and_removed_lints)] #[allow(private_no_mangle_statics)] #[no_mangle] static mut COMP1_TAKEN: bool = false; /// Safe access to COMP1 /// /// This function returns `Some(Instance)` if this instance is not /// currently taken, and `None` if it is. This ensures that if you /// do get `Some(Instance)`, you are ensured unique access to /// the peripheral and there cannot be data races (unless other /// code uses `unsafe`, of course). You can then pass the /// `Instance` around to other functions as required. When you're /// done with it, you can call `release(instance)` to return it. /// /// `Instance` itself dereferences to a `RegisterBlock`, which /// provides access to the peripheral's registers. #[cfg(not(feature = "nosync"))] #[inline] pub fn take() -> Option<Instance> { external_cortex_m::interrupt::free(|_| unsafe { if COMP1_TAKEN { None } else { COMP1_TAKEN = true; Some(INSTANCE) } }) } /// Release exclusive access to COMP1 /// /// This function allows you to return an `Instance` so that it /// is available to `take()` again. This function will panic if /// you return a different `Instance` or if this instance is not /// already taken. #[cfg(not(feature = "nosync"))] #[inline] pub fn release(inst: Instance) { external_cortex_m::interrupt::free(|_| unsafe { if COMP1_TAKEN && inst.addr == INSTANCE.addr { COMP1_TAKEN = false; } else { panic!("Released a peripheral which was not taken"); } }); } /// Unsafely steal COMP1 /// /// This function is similar to take() but forcibly takes the /// Instance, marking it as taken irregardless of its previous /// state. #[cfg(not(feature = "nosync"))] #[inline] pub unsafe fn steal() -> Instance { COMP1_TAKEN = true; INSTANCE } } /// Raw pointer to COMP1 /// /// Dereferencing this is unsafe because you are not ensured unique /// access to the peripheral, so you may encounter data races with /// other users of this peripheral. It is up to you to ensure you /// will not cause data races. /// /// This constant is provided for ease of use in unsafe code: you can /// simply call for example `write_reg!(gpio, GPIOA, ODR, 1);`. pub const COMP1: *const RegisterBlock = 0x58003800 as *const _;
33.462264
96
0.623344
91505a6ec42b580171dec674b6f15a02cba5162f
976
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. //! Contains record-based API for reading Parquet files. pub mod reader; mod api; mod triplet; pub use self::api::{Row, RowAccessor, List, ListAccessor, Map, MapAccessor};
39.04
76
0.754098
ac94b5f99d988b257491d5e4b19755b21bb18ec2
43,071
// Generated from definition io.k8s.api.rbac.v1beta1.Role /// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. #[derive(Clone, Debug, Default, PartialEq)] pub struct Role { /// Standard object's metadata. pub metadata: Option<crate::v1_8::apimachinery::pkg::apis::meta::v1::ObjectMeta>, /// Rules holds all the PolicyRules for this Role pub rules: Vec<crate::v1_8::api::rbac::v1beta1::PolicyRule>, } // Begin rbac.authorization.k8s.io/v1beta1/Role // Generated from operation createRbacAuthorizationV1beta1NamespacedRole impl Role { /// create a Role /// /// Use the returned [`crate::ResponseBody`]`<`[`CreateNamespacedRoleResponse`]`>` constructor, or [`CreateNamespacedRoleResponse`] directly, to parse the HTTP response. /// /// # Arguments /// /// * `namespace` /// /// object name and auth scope, such as for teams and projects /// /// * `body` /// /// * `optional` /// /// Optional parameters. Use `Default::default()` to not pass any. #[cfg(feature = "api")] pub fn create_namespaced_role( namespace: &str, body: &crate::v1_8::api::rbac::v1beta1::Role, optional: CreateNamespacedRoleOptional<'_>, ) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<CreateNamespacedRoleResponse>), crate::RequestError> { let CreateNamespacedRoleOptional { pretty, } = optional; let __url = format!("/apis/rbac.authorization.k8s.io/v1beta1/namespaces/{namespace}/roles?", namespace = crate::url::percent_encoding::percent_encode(namespace.as_bytes(), crate::url::percent_encoding::PATH_SEGMENT_ENCODE_SET), ); let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url); if let Some(pretty) = pretty { __query_pairs.append_pair("pretty", pretty); } let __url = __query_pairs.finish(); let mut __request = http::Request::post(__url); let __body = serde_json::to_vec(body).map_err(crate::RequestError::Json)?; __request.header(http::header::CONTENT_TYPE, http::header::HeaderValue::from_static("application/json")); match __request.body(__body) { Ok(request) => Ok((request, crate::ResponseBody::new)), Err(err) => Err(crate::RequestError::Http(err)), } } } /// Optional parameters of [`Role::create_namespaced_role`] #[cfg(feature = "api")] #[derive(Clone, Copy, Debug, Default)] pub struct CreateNamespacedRoleOptional<'a> { /// If 'true', then the output is pretty printed. pub pretty: Option<&'a str>, } /// Use `<CreateNamespacedRoleResponse as Response>::try_from_parts` to parse the HTTP response body of [`Role::create_namespaced_role`] #[cfg(feature = "api")] #[derive(Debug)] pub enum CreateNamespacedRoleResponse { Ok(crate::v1_8::api::rbac::v1beta1::Role), Other(Result<Option<serde_json::Value>, serde_json::Error>), } #[cfg(feature = "api")] impl crate::Response for CreateNamespacedRoleResponse { fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> { match status_code { http::StatusCode::OK => { let result = match serde_json::from_slice(buf) { Ok(value) => value, Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => return Err(crate::ResponseError::Json(err)), }; Ok((CreateNamespacedRoleResponse::Ok(result), buf.len())) }, _ => { let (result, read) = if buf.is_empty() { (Ok(None), 0) } else { match serde_json::from_slice(buf) { Ok(value) => (Ok(Some(value)), buf.len()), Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => (Err(err), 0), } }; Ok((CreateNamespacedRoleResponse::Other(result), read)) }, } } } // Generated from operation deleteRbacAuthorizationV1beta1CollectionNamespacedRole impl Role { /// delete collection of Role /// /// Use the returned [`crate::ResponseBody`]`<`[`DeleteCollectionNamespacedRoleResponse`]`>` constructor, or [`DeleteCollectionNamespacedRoleResponse`] directly, to parse the HTTP response. /// /// # Arguments /// /// * `namespace` /// /// object name and auth scope, such as for teams and projects /// /// * `delete_optional` /// /// Delete options. Use `Default::default()` to not pass any. /// /// * `list_optional` /// /// List options. Use `Default::default()` to not pass any. #[cfg(feature = "api")] pub fn delete_collection_namespaced_role( namespace: &str, delete_optional: crate::v1_8::DeleteOptional<'_>, list_optional: crate::v1_8::ListOptional<'_>, ) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<DeleteCollectionNamespacedRoleResponse>), crate::RequestError> { let __url = format!("/apis/rbac.authorization.k8s.io/v1beta1/namespaces/{namespace}/roles?", namespace = crate::url::percent_encoding::percent_encode(namespace.as_bytes(), crate::url::percent_encoding::PATH_SEGMENT_ENCODE_SET), ); let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url); list_optional.__serialize(&mut __query_pairs); let __url = __query_pairs.finish(); let mut __request = http::Request::delete(__url); let __body = serde_json::to_vec(&delete_optional).map_err(crate::RequestError::Json)?; __request.header(http::header::CONTENT_TYPE, http::header::HeaderValue::from_static("application/json")); match __request.body(__body) { Ok(request) => Ok((request, crate::ResponseBody::new)), Err(err) => Err(crate::RequestError::Http(err)), } } } /// Use `<DeleteCollectionNamespacedRoleResponse as Response>::try_from_parts` to parse the HTTP response body of [`Role::delete_collection_namespaced_role`] #[cfg(feature = "api")] #[derive(Debug)] pub enum DeleteCollectionNamespacedRoleResponse { OkStatus(crate::v1_8::apimachinery::pkg::apis::meta::v1::Status), OkValue(crate::v1_8::api::rbac::v1beta1::RoleList), Other(Result<Option<serde_json::Value>, serde_json::Error>), } #[cfg(feature = "api")] impl crate::Response for DeleteCollectionNamespacedRoleResponse { fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> { match status_code { http::StatusCode::OK => { let result: serde_json::Map<String, serde_json::Value> = match serde_json::from_slice(buf) { Ok(value) => value, Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => return Err(crate::ResponseError::Json(err)), }; let is_status = match result.get("kind") { Some(serde_json::Value::String(s)) if s == "Status" => true, _ => false, }; if is_status { let result = serde::Deserialize::deserialize(serde_json::Value::Object(result)); let result = result.map_err(crate::ResponseError::Json)?; Ok((DeleteCollectionNamespacedRoleResponse::OkStatus(result), buf.len())) } else { let result = serde::Deserialize::deserialize(serde_json::Value::Object(result)); let result = result.map_err(crate::ResponseError::Json)?; Ok((DeleteCollectionNamespacedRoleResponse::OkValue(result), buf.len())) } }, _ => { let (result, read) = if buf.is_empty() { (Ok(None), 0) } else { match serde_json::from_slice(buf) { Ok(value) => (Ok(Some(value)), buf.len()), Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => (Err(err), 0), } }; Ok((DeleteCollectionNamespacedRoleResponse::Other(result), read)) }, } } } // Generated from operation deleteRbacAuthorizationV1beta1NamespacedRole impl Role { /// delete a Role /// /// Use the returned [`crate::ResponseBody`]`<`[`DeleteNamespacedRoleResponse`]`>` constructor, or [`DeleteNamespacedRoleResponse`] directly, to parse the HTTP response. /// /// # Arguments /// /// * `name` /// /// name of the Role /// /// * `namespace` /// /// object name and auth scope, such as for teams and projects /// /// * `optional` /// /// Optional parameters. Use `Default::default()` to not pass any. #[cfg(feature = "api")] pub fn delete_namespaced_role( name: &str, namespace: &str, optional: crate::v1_8::DeleteOptional<'_>, ) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<DeleteNamespacedRoleResponse>), crate::RequestError> { let __url = format!("/apis/rbac.authorization.k8s.io/v1beta1/namespaces/{namespace}/roles/{name}", name = crate::url::percent_encoding::percent_encode(name.as_bytes(), crate::url::percent_encoding::PATH_SEGMENT_ENCODE_SET), namespace = crate::url::percent_encoding::percent_encode(namespace.as_bytes(), crate::url::percent_encoding::PATH_SEGMENT_ENCODE_SET), ); let mut __request = http::Request::delete(__url); let __body = serde_json::to_vec(&optional).map_err(crate::RequestError::Json)?; __request.header(http::header::CONTENT_TYPE, http::header::HeaderValue::from_static("application/json")); match __request.body(__body) { Ok(request) => Ok((request, crate::ResponseBody::new)), Err(err) => Err(crate::RequestError::Http(err)), } } } /// Use `<DeleteNamespacedRoleResponse as Response>::try_from_parts` to parse the HTTP response body of [`Role::delete_namespaced_role`] #[cfg(feature = "api")] #[derive(Debug)] pub enum DeleteNamespacedRoleResponse { OkStatus(crate::v1_8::apimachinery::pkg::apis::meta::v1::Status), OkValue(crate::v1_8::api::rbac::v1beta1::Role), Other(Result<Option<serde_json::Value>, serde_json::Error>), } #[cfg(feature = "api")] impl crate::Response for DeleteNamespacedRoleResponse { fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> { match status_code { http::StatusCode::OK => { let result: serde_json::Map<String, serde_json::Value> = match serde_json::from_slice(buf) { Ok(value) => value, Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => return Err(crate::ResponseError::Json(err)), }; let is_status = match result.get("kind") { Some(serde_json::Value::String(s)) if s == "Status" => true, _ => false, }; if is_status { let result = serde::Deserialize::deserialize(serde_json::Value::Object(result)); let result = result.map_err(crate::ResponseError::Json)?; Ok((DeleteNamespacedRoleResponse::OkStatus(result), buf.len())) } else { let result = serde::Deserialize::deserialize(serde_json::Value::Object(result)); let result = result.map_err(crate::ResponseError::Json)?; Ok((DeleteNamespacedRoleResponse::OkValue(result), buf.len())) } }, _ => { let (result, read) = if buf.is_empty() { (Ok(None), 0) } else { match serde_json::from_slice(buf) { Ok(value) => (Ok(Some(value)), buf.len()), Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => (Err(err), 0), } }; Ok((DeleteNamespacedRoleResponse::Other(result), read)) }, } } } // Generated from operation listRbacAuthorizationV1beta1NamespacedRole impl Role { /// list or watch objects of kind Role /// /// This operation only supports listing all items of this type. /// /// Use the returned [`crate::ResponseBody`]`<`[`ListNamespacedRoleResponse`]`>` constructor, or [`ListNamespacedRoleResponse`] directly, to parse the HTTP response. /// /// # Arguments /// /// * `namespace` /// /// object name and auth scope, such as for teams and projects /// /// * `optional` /// /// Optional parameters. Use `Default::default()` to not pass any. #[cfg(feature = "api")] pub fn list_namespaced_role( namespace: &str, optional: crate::v1_8::ListOptional<'_>, ) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<ListNamespacedRoleResponse>), crate::RequestError> { let __url = format!("/apis/rbac.authorization.k8s.io/v1beta1/namespaces/{namespace}/roles?", namespace = crate::url::percent_encoding::percent_encode(namespace.as_bytes(), crate::url::percent_encoding::PATH_SEGMENT_ENCODE_SET), ); let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url); optional.__serialize(&mut __query_pairs); let __url = __query_pairs.finish(); let mut __request = http::Request::get(__url); let __body = vec![]; match __request.body(__body) { Ok(request) => Ok((request, crate::ResponseBody::new)), Err(err) => Err(crate::RequestError::Http(err)), } } } /// Use `<ListNamespacedRoleResponse as Response>::try_from_parts` to parse the HTTP response body of [`Role::list_namespaced_role`] #[cfg(feature = "api")] #[derive(Debug)] pub enum ListNamespacedRoleResponse { Ok(crate::v1_8::api::rbac::v1beta1::RoleList), Other(Result<Option<serde_json::Value>, serde_json::Error>), } #[cfg(feature = "api")] impl crate::Response for ListNamespacedRoleResponse { fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> { match status_code { http::StatusCode::OK => { let result = match serde_json::from_slice(buf) { Ok(value) => value, Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => return Err(crate::ResponseError::Json(err)), }; Ok((ListNamespacedRoleResponse::Ok(result), buf.len())) }, _ => { let (result, read) = if buf.is_empty() { (Ok(None), 0) } else { match serde_json::from_slice(buf) { Ok(value) => (Ok(Some(value)), buf.len()), Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => (Err(err), 0), } }; Ok((ListNamespacedRoleResponse::Other(result), read)) }, } } } // Generated from operation listRbacAuthorizationV1beta1RoleForAllNamespaces impl Role { /// list or watch objects of kind Role /// /// This operation only supports listing all items of this type. /// /// Use the returned [`crate::ResponseBody`]`<`[`ListRoleForAllNamespacesResponse`]`>` constructor, or [`ListRoleForAllNamespacesResponse`] directly, to parse the HTTP response. /// /// # Arguments /// /// * `optional` /// /// Optional parameters. Use `Default::default()` to not pass any. #[cfg(feature = "api")] pub fn list_role_for_all_namespaces( optional: crate::v1_8::ListOptional<'_>, ) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<ListRoleForAllNamespacesResponse>), crate::RequestError> { let __url = "/apis/rbac.authorization.k8s.io/v1beta1/roles?".to_owned(); let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url); optional.__serialize(&mut __query_pairs); let __url = __query_pairs.finish(); let mut __request = http::Request::get(__url); let __body = vec![]; match __request.body(__body) { Ok(request) => Ok((request, crate::ResponseBody::new)), Err(err) => Err(crate::RequestError::Http(err)), } } } /// Use `<ListRoleForAllNamespacesResponse as Response>::try_from_parts` to parse the HTTP response body of [`Role::list_role_for_all_namespaces`] #[cfg(feature = "api")] #[derive(Debug)] pub enum ListRoleForAllNamespacesResponse { Ok(crate::v1_8::api::rbac::v1beta1::RoleList), Other(Result<Option<serde_json::Value>, serde_json::Error>), } #[cfg(feature = "api")] impl crate::Response for ListRoleForAllNamespacesResponse { fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> { match status_code { http::StatusCode::OK => { let result = match serde_json::from_slice(buf) { Ok(value) => value, Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => return Err(crate::ResponseError::Json(err)), }; Ok((ListRoleForAllNamespacesResponse::Ok(result), buf.len())) }, _ => { let (result, read) = if buf.is_empty() { (Ok(None), 0) } else { match serde_json::from_slice(buf) { Ok(value) => (Ok(Some(value)), buf.len()), Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => (Err(err), 0), } }; Ok((ListRoleForAllNamespacesResponse::Other(result), read)) }, } } } // Generated from operation patchRbacAuthorizationV1beta1NamespacedRole impl Role { /// partially update the specified Role /// /// Use the returned [`crate::ResponseBody`]`<`[`PatchNamespacedRoleResponse`]`>` constructor, or [`PatchNamespacedRoleResponse`] directly, to parse the HTTP response. /// /// # Arguments /// /// * `name` /// /// name of the Role /// /// * `namespace` /// /// object name and auth scope, such as for teams and projects /// /// * `body` /// /// * `optional` /// /// Optional parameters. Use `Default::default()` to not pass any. #[cfg(feature = "api")] pub fn patch_namespaced_role( name: &str, namespace: &str, body: &crate::v1_8::apimachinery::pkg::apis::meta::v1::Patch, optional: crate::v1_8::PatchOptional<'_>, ) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<PatchNamespacedRoleResponse>), crate::RequestError> { let __url = format!("/apis/rbac.authorization.k8s.io/v1beta1/namespaces/{namespace}/roles/{name}?", name = crate::url::percent_encoding::percent_encode(name.as_bytes(), crate::url::percent_encoding::PATH_SEGMENT_ENCODE_SET), namespace = crate::url::percent_encoding::percent_encode(namespace.as_bytes(), crate::url::percent_encoding::PATH_SEGMENT_ENCODE_SET), ); let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url); optional.__serialize(&mut __query_pairs); let __url = __query_pairs.finish(); let mut __request = http::Request::patch(__url); let __body = serde_json::to_vec(body).map_err(crate::RequestError::Json)?; __request.header(http::header::CONTENT_TYPE, http::header::HeaderValue::from_static(match body { crate::v1_8::apimachinery::pkg::apis::meta::v1::Patch::Json(_) => "application/json-patch+json", crate::v1_8::apimachinery::pkg::apis::meta::v1::Patch::Merge(_) => "application/merge-patch+json", crate::v1_8::apimachinery::pkg::apis::meta::v1::Patch::StrategicMerge(_) => "application/strategic-merge-patch+json", })); match __request.body(__body) { Ok(request) => Ok((request, crate::ResponseBody::new)), Err(err) => Err(crate::RequestError::Http(err)), } } } /// Use `<PatchNamespacedRoleResponse as Response>::try_from_parts` to parse the HTTP response body of [`Role::patch_namespaced_role`] #[cfg(feature = "api")] #[derive(Debug)] pub enum PatchNamespacedRoleResponse { Ok(crate::v1_8::api::rbac::v1beta1::Role), Other(Result<Option<serde_json::Value>, serde_json::Error>), } #[cfg(feature = "api")] impl crate::Response for PatchNamespacedRoleResponse { fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> { match status_code { http::StatusCode::OK => { let result = match serde_json::from_slice(buf) { Ok(value) => value, Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => return Err(crate::ResponseError::Json(err)), }; Ok((PatchNamespacedRoleResponse::Ok(result), buf.len())) }, _ => { let (result, read) = if buf.is_empty() { (Ok(None), 0) } else { match serde_json::from_slice(buf) { Ok(value) => (Ok(Some(value)), buf.len()), Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => (Err(err), 0), } }; Ok((PatchNamespacedRoleResponse::Other(result), read)) }, } } } // Generated from operation readRbacAuthorizationV1beta1NamespacedRole impl Role { /// read the specified Role /// /// Use the returned [`crate::ResponseBody`]`<`[`ReadNamespacedRoleResponse`]`>` constructor, or [`ReadNamespacedRoleResponse`] directly, to parse the HTTP response. /// /// # Arguments /// /// * `name` /// /// name of the Role /// /// * `namespace` /// /// object name and auth scope, such as for teams and projects /// /// * `optional` /// /// Optional parameters. Use `Default::default()` to not pass any. #[cfg(feature = "api")] pub fn read_namespaced_role( name: &str, namespace: &str, optional: ReadNamespacedRoleOptional<'_>, ) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<ReadNamespacedRoleResponse>), crate::RequestError> { let ReadNamespacedRoleOptional { pretty, } = optional; let __url = format!("/apis/rbac.authorization.k8s.io/v1beta1/namespaces/{namespace}/roles/{name}?", name = crate::url::percent_encoding::percent_encode(name.as_bytes(), crate::url::percent_encoding::PATH_SEGMENT_ENCODE_SET), namespace = crate::url::percent_encoding::percent_encode(namespace.as_bytes(), crate::url::percent_encoding::PATH_SEGMENT_ENCODE_SET), ); let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url); if let Some(pretty) = pretty { __query_pairs.append_pair("pretty", pretty); } let __url = __query_pairs.finish(); let mut __request = http::Request::get(__url); let __body = vec![]; match __request.body(__body) { Ok(request) => Ok((request, crate::ResponseBody::new)), Err(err) => Err(crate::RequestError::Http(err)), } } } /// Optional parameters of [`Role::read_namespaced_role`] #[cfg(feature = "api")] #[derive(Clone, Copy, Debug, Default)] pub struct ReadNamespacedRoleOptional<'a> { /// If 'true', then the output is pretty printed. pub pretty: Option<&'a str>, } /// Use `<ReadNamespacedRoleResponse as Response>::try_from_parts` to parse the HTTP response body of [`Role::read_namespaced_role`] #[cfg(feature = "api")] #[derive(Debug)] pub enum ReadNamespacedRoleResponse { Ok(crate::v1_8::api::rbac::v1beta1::Role), Other(Result<Option<serde_json::Value>, serde_json::Error>), } #[cfg(feature = "api")] impl crate::Response for ReadNamespacedRoleResponse { fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> { match status_code { http::StatusCode::OK => { let result = match serde_json::from_slice(buf) { Ok(value) => value, Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => return Err(crate::ResponseError::Json(err)), }; Ok((ReadNamespacedRoleResponse::Ok(result), buf.len())) }, _ => { let (result, read) = if buf.is_empty() { (Ok(None), 0) } else { match serde_json::from_slice(buf) { Ok(value) => (Ok(Some(value)), buf.len()), Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => (Err(err), 0), } }; Ok((ReadNamespacedRoleResponse::Other(result), read)) }, } } } // Generated from operation replaceRbacAuthorizationV1beta1NamespacedRole impl Role { /// replace the specified Role /// /// Use the returned [`crate::ResponseBody`]`<`[`ReplaceNamespacedRoleResponse`]`>` constructor, or [`ReplaceNamespacedRoleResponse`] directly, to parse the HTTP response. /// /// # Arguments /// /// * `name` /// /// name of the Role /// /// * `namespace` /// /// object name and auth scope, such as for teams and projects /// /// * `body` /// /// * `optional` /// /// Optional parameters. Use `Default::default()` to not pass any. #[cfg(feature = "api")] pub fn replace_namespaced_role( name: &str, namespace: &str, body: &crate::v1_8::api::rbac::v1beta1::Role, optional: ReplaceNamespacedRoleOptional<'_>, ) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<ReplaceNamespacedRoleResponse>), crate::RequestError> { let ReplaceNamespacedRoleOptional { pretty, } = optional; let __url = format!("/apis/rbac.authorization.k8s.io/v1beta1/namespaces/{namespace}/roles/{name}?", name = crate::url::percent_encoding::percent_encode(name.as_bytes(), crate::url::percent_encoding::PATH_SEGMENT_ENCODE_SET), namespace = crate::url::percent_encoding::percent_encode(namespace.as_bytes(), crate::url::percent_encoding::PATH_SEGMENT_ENCODE_SET), ); let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url); if let Some(pretty) = pretty { __query_pairs.append_pair("pretty", pretty); } let __url = __query_pairs.finish(); let mut __request = http::Request::put(__url); let __body = serde_json::to_vec(body).map_err(crate::RequestError::Json)?; __request.header(http::header::CONTENT_TYPE, http::header::HeaderValue::from_static("application/json")); match __request.body(__body) { Ok(request) => Ok((request, crate::ResponseBody::new)), Err(err) => Err(crate::RequestError::Http(err)), } } } /// Optional parameters of [`Role::replace_namespaced_role`] #[cfg(feature = "api")] #[derive(Clone, Copy, Debug, Default)] pub struct ReplaceNamespacedRoleOptional<'a> { /// If 'true', then the output is pretty printed. pub pretty: Option<&'a str>, } /// Use `<ReplaceNamespacedRoleResponse as Response>::try_from_parts` to parse the HTTP response body of [`Role::replace_namespaced_role`] #[cfg(feature = "api")] #[derive(Debug)] pub enum ReplaceNamespacedRoleResponse { Ok(crate::v1_8::api::rbac::v1beta1::Role), Other(Result<Option<serde_json::Value>, serde_json::Error>), } #[cfg(feature = "api")] impl crate::Response for ReplaceNamespacedRoleResponse { fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> { match status_code { http::StatusCode::OK => { let result = match serde_json::from_slice(buf) { Ok(value) => value, Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => return Err(crate::ResponseError::Json(err)), }; Ok((ReplaceNamespacedRoleResponse::Ok(result), buf.len())) }, _ => { let (result, read) = if buf.is_empty() { (Ok(None), 0) } else { match serde_json::from_slice(buf) { Ok(value) => (Ok(Some(value)), buf.len()), Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => (Err(err), 0), } }; Ok((ReplaceNamespacedRoleResponse::Other(result), read)) }, } } } // Generated from operation watchRbacAuthorizationV1beta1NamespacedRole impl Role { /// list or watch objects of kind Role /// /// This operation only supports watching one item, or a list of items, of this type for changes. /// /// Use the returned [`crate::ResponseBody`]`<`[`WatchNamespacedRoleResponse`]`>` constructor, or [`WatchNamespacedRoleResponse`] directly, to parse the HTTP response. /// /// # Arguments /// /// * `namespace` /// /// object name and auth scope, such as for teams and projects /// /// * `optional` /// /// Optional parameters. Use `Default::default()` to not pass any. #[cfg(feature = "api")] pub fn watch_namespaced_role( namespace: &str, optional: crate::v1_8::WatchOptional<'_>, ) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<WatchNamespacedRoleResponse>), crate::RequestError> { let __url = format!("/apis/rbac.authorization.k8s.io/v1beta1/namespaces/{namespace}/roles?", namespace = crate::url::percent_encoding::percent_encode(namespace.as_bytes(), crate::url::percent_encoding::PATH_SEGMENT_ENCODE_SET), ); let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url); optional.__serialize(&mut __query_pairs); let __url = __query_pairs.finish(); let mut __request = http::Request::get(__url); let __body = vec![]; match __request.body(__body) { Ok(request) => Ok((request, crate::ResponseBody::new)), Err(err) => Err(crate::RequestError::Http(err)), } } } /// Use `<WatchNamespacedRoleResponse as Response>::try_from_parts` to parse the HTTP response body of [`Role::watch_namespaced_role`] #[cfg(feature = "api")] #[derive(Debug)] pub enum WatchNamespacedRoleResponse { Ok(crate::v1_8::apimachinery::pkg::apis::meta::v1::WatchEvent<Role>), Other(Result<Option<serde_json::Value>, serde_json::Error>), } #[cfg(feature = "api")] impl crate::Response for WatchNamespacedRoleResponse { fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> { match status_code { http::StatusCode::OK => { let mut deserializer = serde_json::Deserializer::from_slice(buf).into_iter(); let (result, byte_offset) = match deserializer.next() { Some(Ok(value)) => (value, deserializer.byte_offset()), Some(Err(ref err)) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Some(Err(err)) => return Err(crate::ResponseError::Json(err)), None => return Err(crate::ResponseError::NeedMoreData), }; Ok((WatchNamespacedRoleResponse::Ok(result), byte_offset)) }, _ => { let (result, read) = if buf.is_empty() { (Ok(None), 0) } else { match serde_json::from_slice(buf) { Ok(value) => (Ok(Some(value)), buf.len()), Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => (Err(err), 0), } }; Ok((WatchNamespacedRoleResponse::Other(result), read)) }, } } } // Generated from operation watchRbacAuthorizationV1beta1RoleForAllNamespaces impl Role { /// list or watch objects of kind Role /// /// This operation only supports watching one item, or a list of items, of this type for changes. /// /// Use the returned [`crate::ResponseBody`]`<`[`WatchRoleForAllNamespacesResponse`]`>` constructor, or [`WatchRoleForAllNamespacesResponse`] directly, to parse the HTTP response. /// /// # Arguments /// /// * `optional` /// /// Optional parameters. Use `Default::default()` to not pass any. #[cfg(feature = "api")] pub fn watch_role_for_all_namespaces( optional: crate::v1_8::WatchOptional<'_>, ) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<WatchRoleForAllNamespacesResponse>), crate::RequestError> { let __url = "/apis/rbac.authorization.k8s.io/v1beta1/roles?".to_owned(); let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url); optional.__serialize(&mut __query_pairs); let __url = __query_pairs.finish(); let mut __request = http::Request::get(__url); let __body = vec![]; match __request.body(__body) { Ok(request) => Ok((request, crate::ResponseBody::new)), Err(err) => Err(crate::RequestError::Http(err)), } } } /// Use `<WatchRoleForAllNamespacesResponse as Response>::try_from_parts` to parse the HTTP response body of [`Role::watch_role_for_all_namespaces`] #[cfg(feature = "api")] #[derive(Debug)] pub enum WatchRoleForAllNamespacesResponse { Ok(crate::v1_8::apimachinery::pkg::apis::meta::v1::WatchEvent<Role>), Other(Result<Option<serde_json::Value>, serde_json::Error>), } #[cfg(feature = "api")] impl crate::Response for WatchRoleForAllNamespacesResponse { fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> { match status_code { http::StatusCode::OK => { let mut deserializer = serde_json::Deserializer::from_slice(buf).into_iter(); let (result, byte_offset) = match deserializer.next() { Some(Ok(value)) => (value, deserializer.byte_offset()), Some(Err(ref err)) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Some(Err(err)) => return Err(crate::ResponseError::Json(err)), None => return Err(crate::ResponseError::NeedMoreData), }; Ok((WatchRoleForAllNamespacesResponse::Ok(result), byte_offset)) }, _ => { let (result, read) = if buf.is_empty() { (Ok(None), 0) } else { match serde_json::from_slice(buf) { Ok(value) => (Ok(Some(value)), buf.len()), Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => (Err(err), 0), } }; Ok((WatchRoleForAllNamespacesResponse::Other(result), read)) }, } } } // End rbac.authorization.k8s.io/v1beta1/Role impl crate::Resource for Role { fn api_version() -> &'static str { "rbac.authorization.k8s.io/v1beta1" } fn group() -> &'static str { "rbac.authorization.k8s.io" } fn kind() -> &'static str { "Role" } fn version() -> &'static str { "v1beta1" } } impl crate::Metadata for Role { type Ty = crate::v1_8::apimachinery::pkg::apis::meta::v1::ObjectMeta; fn metadata(&self) -> Option<&<Self as crate::Metadata>::Ty> { self.metadata.as_ref() } } impl<'de> serde::Deserialize<'de> for Role { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de> { #[allow(non_camel_case_types)] enum Field { Key_api_version, Key_kind, Key_metadata, Key_rules, Other, } impl<'de> serde::Deserialize<'de> for Field { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de> { struct Visitor; impl<'de> serde::de::Visitor<'de> for Visitor { type Value = Field; fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "field identifier") } fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: serde::de::Error { Ok(match v { "apiVersion" => Field::Key_api_version, "kind" => Field::Key_kind, "metadata" => Field::Key_metadata, "rules" => Field::Key_rules, _ => Field::Other, }) } } deserializer.deserialize_identifier(Visitor) } } struct Visitor; impl<'de> serde::de::Visitor<'de> for Visitor { type Value = Role; fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "struct Role") } fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> where A: serde::de::MapAccess<'de> { let mut value_metadata: Option<crate::v1_8::apimachinery::pkg::apis::meta::v1::ObjectMeta> = None; let mut value_rules: Option<Vec<crate::v1_8::api::rbac::v1beta1::PolicyRule>> = None; while let Some(key) = serde::de::MapAccess::next_key::<Field>(&mut map)? { match key { Field::Key_api_version => { let value_api_version: String = serde::de::MapAccess::next_value(&mut map)?; if value_api_version != <Self::Value as crate::Resource>::api_version() { return Err(serde::de::Error::invalid_value(serde::de::Unexpected::Str(&value_api_version), &<Self::Value as crate::Resource>::api_version())); } }, Field::Key_kind => { let value_kind: String = serde::de::MapAccess::next_value(&mut map)?; if value_kind != <Self::Value as crate::Resource>::kind() { return Err(serde::de::Error::invalid_value(serde::de::Unexpected::Str(&value_kind), &<Self::Value as crate::Resource>::kind())); } }, Field::Key_metadata => value_metadata = serde::de::MapAccess::next_value(&mut map)?, Field::Key_rules => value_rules = Some(serde::de::MapAccess::next_value(&mut map)?), Field::Other => { let _: serde::de::IgnoredAny = serde::de::MapAccess::next_value(&mut map)?; }, } } Ok(Role { metadata: value_metadata, rules: value_rules.ok_or_else(|| serde::de::Error::missing_field("rules"))?, }) } } deserializer.deserialize_struct( "Role", &[ "apiVersion", "kind", "metadata", "rules", ], Visitor, ) } } impl serde::Serialize for Role { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer { let mut state = serializer.serialize_struct( "Role", 3 + self.metadata.as_ref().map_or(0, |_| 1), )?; serde::ser::SerializeStruct::serialize_field(&mut state, "apiVersion", <Self as crate::Resource>::api_version())?; serde::ser::SerializeStruct::serialize_field(&mut state, "kind", <Self as crate::Resource>::kind())?; if let Some(value) = &self.metadata { serde::ser::SerializeStruct::serialize_field(&mut state, "metadata", value)?; } serde::ser::SerializeStruct::serialize_field(&mut state, "rules", &self.rules)?; serde::ser::SerializeStruct::end(state) } }
42.729167
193
0.566298
e5de761b7fa387489e90024a4aaea178ca8cc94f
12,631
// Copyright 2018-2022 the Deno authors. All rights reserved. MIT license. // Inspired by nom, but simplified and with custom errors. #[derive(Debug)] pub enum ParseError<'a> { Backtrace, /// Parsing should completely fail. Failure(ParseErrorFailure<'a>), } #[derive(Debug)] pub struct ParseErrorFailure<'a> { pub input: &'a str, pub message: String, } impl<'a> ParseErrorFailure<'a> { pub fn new(input: &'a str, message: impl AsRef<str>) -> Self { ParseErrorFailure { input, message: message.as_ref().to_owned(), } } } impl<'a> ParseError<'a> { pub fn fail<O>( input: &'a str, message: impl AsRef<str>, ) -> ParseResult<'a, O> { Err(ParseError::Failure(ParseErrorFailure::new(input, message))) } pub fn backtrace<O>() -> ParseResult<'a, O> { Err(ParseError::Backtrace) } } pub type ParseResult<'a, O> = Result<(&'a str, O), ParseError<'a>>; /// Recognizes a character. pub fn ch<'a>(c: char) -> impl Fn(&'a str) -> ParseResult<'a, char> { if_true(next_char, move |found_char| *found_char == c) } /// Recognizes a character. pub fn next_char(input: &str) -> ParseResult<char> { match input.chars().next() { Some(next_char) => Ok((&input[next_char.len_utf8()..], next_char)), _ => ParseError::backtrace(), } } /// Recognizes any character in the provided string. pub fn one_of<'a>( value: &'static str, ) -> impl Fn(&'a str) -> ParseResult<'a, char> { move |input| { let (input, c) = next_char(input)?; if value.contains(c) { Ok((input, c)) } else { ParseError::backtrace() } } } /// Recognizes a string. pub fn tag<'a>( value: impl AsRef<str>, ) -> impl Fn(&'a str) -> ParseResult<'a, &'a str> { let value = value.as_ref().to_string(); move |input| { if input.starts_with(&value) { Ok((&input[value.len()..], &input[..value.len()])) } else { Err(ParseError::Backtrace) } } } /// Takes while the condition is true. pub fn take_while( cond: impl Fn(char) -> bool, ) -> impl Fn(&str) -> ParseResult<&str> { move |input| { for (pos, c) in input.char_indices() { if !cond(c) { return Ok((&input[pos..], &input[..pos])); } } Ok(("", input)) } } /// Maps a success to `Some(T)` and a backtrace to `None`. pub fn maybe<'a, O>( combinator: impl Fn(&'a str) -> ParseResult<O>, ) -> impl Fn(&'a str) -> ParseResult<Option<O>> { move |input| match combinator(input) { Ok((input, value)) => Ok((input, Some(value))), Err(ParseError::Backtrace) => Ok((input, None)), Err(err) => Err(err), } } /// Maps the combinator by a function. pub fn map<'a, O, R>( combinator: impl Fn(&'a str) -> ParseResult<O>, func: impl Fn(O) -> R, ) -> impl Fn(&'a str) -> ParseResult<R> { move |input| { let (input, result) = combinator(input)?; Ok((input, func(result))) } } /// Checks for either to match. pub fn or<'a, O>( a: impl Fn(&'a str) -> ParseResult<'a, O>, b: impl Fn(&'a str) -> ParseResult<'a, O>, ) -> impl Fn(&'a str) -> ParseResult<'a, O> { move |input| match a(input) { Ok(result) => Ok(result), Err(ParseError::Backtrace) => b(input), Err(err) => Err(err), } } /// Checks for any to match. pub fn or3<'a, O>( a: impl Fn(&'a str) -> ParseResult<'a, O>, b: impl Fn(&'a str) -> ParseResult<'a, O>, c: impl Fn(&'a str) -> ParseResult<'a, O>, ) -> impl Fn(&'a str) -> ParseResult<'a, O> { or(a, or(b, c)) } /// Checks for any to match. pub fn or4<'a, O>( a: impl Fn(&'a str) -> ParseResult<'a, O>, b: impl Fn(&'a str) -> ParseResult<'a, O>, c: impl Fn(&'a str) -> ParseResult<'a, O>, d: impl Fn(&'a str) -> ParseResult<'a, O>, ) -> impl Fn(&'a str) -> ParseResult<'a, O> { or3(a, b, or(c, d)) } /// Checks for any to match. pub fn or5<'a, O>( a: impl Fn(&'a str) -> ParseResult<'a, O>, b: impl Fn(&'a str) -> ParseResult<'a, O>, c: impl Fn(&'a str) -> ParseResult<'a, O>, d: impl Fn(&'a str) -> ParseResult<'a, O>, e: impl Fn(&'a str) -> ParseResult<'a, O>, ) -> impl Fn(&'a str) -> ParseResult<'a, O> { or4(a, b, c, or(d, e)) } /// Checks for any to match. pub fn or6<'a, O>( a: impl Fn(&'a str) -> ParseResult<'a, O>, b: impl Fn(&'a str) -> ParseResult<'a, O>, c: impl Fn(&'a str) -> ParseResult<'a, O>, d: impl Fn(&'a str) -> ParseResult<'a, O>, e: impl Fn(&'a str) -> ParseResult<'a, O>, f: impl Fn(&'a str) -> ParseResult<'a, O>, ) -> impl Fn(&'a str) -> ParseResult<'a, O> { or5(a, b, c, d, or(e, f)) } /// Checks for any to match. pub fn or7<'a, O>( a: impl Fn(&'a str) -> ParseResult<'a, O>, b: impl Fn(&'a str) -> ParseResult<'a, O>, c: impl Fn(&'a str) -> ParseResult<'a, O>, d: impl Fn(&'a str) -> ParseResult<'a, O>, e: impl Fn(&'a str) -> ParseResult<'a, O>, f: impl Fn(&'a str) -> ParseResult<'a, O>, g: impl Fn(&'a str) -> ParseResult<'a, O>, ) -> impl Fn(&'a str) -> ParseResult<'a, O> { or6(a, b, c, d, e, or(f, g)) } /// Returns the second value and discards the first. pub fn preceded<'a, First, Second>( first: impl Fn(&'a str) -> ParseResult<'a, First>, second: impl Fn(&'a str) -> ParseResult<'a, Second>, ) -> impl Fn(&'a str) -> ParseResult<'a, Second> { move |input| { let (input, _) = first(input)?; let (input, return_value) = second(input)?; Ok((input, return_value)) } } /// Returns the first value and discards the second. pub fn terminated<'a, First, Second>( first: impl Fn(&'a str) -> ParseResult<'a, First>, second: impl Fn(&'a str) -> ParseResult<'a, Second>, ) -> impl Fn(&'a str) -> ParseResult<'a, First> { move |input| { let (input, return_value) = first(input)?; let (input, _) = second(input)?; Ok((input, return_value)) } } /// Gets a second value that is delimited by a first and third. pub fn delimited<'a, First, Second, Third>( first: impl Fn(&'a str) -> ParseResult<'a, First>, second: impl Fn(&'a str) -> ParseResult<'a, Second>, third: impl Fn(&'a str) -> ParseResult<'a, Third>, ) -> impl Fn(&'a str) -> ParseResult<'a, Second> { move |input| { let (input, _) = first(input)?; let (input, return_value) = second(input)?; let (input, _) = third(input)?; Ok((input, return_value)) } } /// Asserts that a combinator resolves. If backtracing occurs, returns a failure. pub fn assert_exists<'a, O>( combinator: impl Fn(&'a str) -> ParseResult<'a, O>, message: &'static str, ) -> impl Fn(&'a str) -> ParseResult<'a, O> { assert(combinator, |result| result.is_ok(), message) } /// Asserts that a given condition is true about the combinator. /// Otherwise returns an error with the message. pub fn assert<'a, O>( combinator: impl Fn(&'a str) -> ParseResult<'a, O>, condition: impl Fn(Result<&(&'a str, O), &ParseError<'a>>) -> bool, message: &'static str, ) -> impl Fn(&'a str) -> ParseResult<'a, O> { move |input| { let result = combinator(input); if condition(result.as_ref()) { result } else { match combinator(input) { Err(ParseError::Failure(err)) => { let mut message = message.to_string(); message.push_str("\n\n"); message.push_str(&err.message); ParseError::fail(err.input, message) } _ => ParseError::fail(input, message), } } } } /// Changes the input on a failure in order to provide /// a better error message. pub fn with_failure_input<'a, O>( new_input: &'a str, combinator: impl Fn(&'a str) -> ParseResult<'a, O>, ) -> impl Fn(&'a str) -> ParseResult<'a, O> { move |input| { let result = combinator(input); match result { Err(ParseError::Failure(mut err)) => { err.input = new_input; Err(ParseError::Failure(err)) } _ => result, } } } /// Provides some context to a failure. pub fn with_error_context<'a, O>( combinator: impl Fn(&'a str) -> ParseResult<'a, O>, message: &'static str, ) -> impl Fn(&'a str) -> ParseResult<'a, O> { move |input| match combinator(input) { Ok(result) => Ok(result), Err(ParseError::Backtrace) => Err(ParseError::Backtrace), Err(ParseError::Failure(err)) => { let mut message = message.to_string(); message.push_str("\n\n"); message.push_str(&err.message); ParseError::fail(err.input, message) } } } /// Keeps consuming a combinator into an array until a condition /// is met or backtracing occurs. pub fn many_till<'a, O, OCondition>( combinator: impl Fn(&'a str) -> ParseResult<'a, O>, condition: impl Fn(&'a str) -> ParseResult<'a, OCondition>, ) -> impl Fn(&'a str) -> ParseResult<'a, Vec<O>> { move |mut input| { let mut results = Vec::new(); while !input.is_empty() && is_backtrace(condition(input))? { match combinator(input) { Ok((result_input, value)) => { results.push(value); input = result_input; } Err(ParseError::Backtrace) => { return Ok((input, results)); } Err(err) => return Err(err), } } Ok((input, results)) } } /// Keeps consuming a combinator into an array until a condition /// is met or backtracing occurs. pub fn separated_list<'a, O, OSeparator>( combinator: impl Fn(&'a str) -> ParseResult<'a, O>, separator: impl Fn(&'a str) -> ParseResult<'a, OSeparator>, ) -> impl Fn(&'a str) -> ParseResult<'a, Vec<O>> { move |mut input| { let mut results = Vec::new(); while !input.is_empty() { match combinator(input) { Ok((result_input, value)) => { results.push(value); input = result_input; } Err(ParseError::Backtrace) => { return Ok((input, results)); } Err(err) => return Err(err), } input = match separator(input) { Ok((input, _)) => input, Err(ParseError::Backtrace) => break, Err(err) => return Err(err), }; } Ok((input, results)) } } /// Applies the combinator 0 or more times and returns a vector /// of all the parsed results. pub fn many0<'a, O>( combinator: impl Fn(&'a str) -> ParseResult<'a, O>, ) -> impl Fn(&'a str) -> ParseResult<'a, Vec<O>> { many_till(combinator, |_| ParseError::backtrace::<()>()) } /// Applies the combinator at least 1 time, but maybe more /// and returns a vector of all the parsed results. pub fn many1<'a, O>( combinator: impl Fn(&'a str) -> ParseResult<'a, O>, ) -> impl Fn(&'a str) -> ParseResult<'a, Vec<O>> { move |input| { let mut results = Vec::new(); let (input, first_result) = combinator(input)?; results.push(first_result); let (input, next_results) = many0(&combinator)(input)?; results.extend(next_results); Ok((input, results)) } } /// Skips the whitespace. pub fn skip_whitespace(input: &str) -> ParseResult<()> { match whitespace(input) { Ok((input, _)) => Ok((input, ())), // the next char was not a backtrace... continue. Err(ParseError::Backtrace) => Ok((input, ())), Err(err) => Err(err), } } /// Parses and expects whitespace. pub fn whitespace(input: &str) -> ParseResult<&str> { if input.is_empty() { return ParseError::backtrace(); } for (pos, c) in input.char_indices() { if !c.is_whitespace() { if pos == 0 { return ParseError::backtrace(); } return Ok((&input[pos..], &input[..pos])); } } Ok(("", input)) } /// Checks if a condition is true for a combinator. pub fn if_true<'a, O>( combinator: impl Fn(&'a str) -> ParseResult<'a, O>, condition: impl Fn(&O) -> bool, ) -> impl Fn(&'a str) -> ParseResult<'a, O> { move |input| { let (input, value) = combinator(input)?; if condition(&value) { Ok((input, value)) } else { ParseError::backtrace() } } } /// Checks if a combinator is false without consuming the input. pub fn check_not<'a, O>( combinator: impl Fn(&'a str) -> ParseResult<'a, O>, ) -> impl Fn(&'a str) -> ParseResult<'a, ()> { move |input| match combinator(input) { Ok(_) => ParseError::backtrace(), Err(_) => Ok((input, ())), } } /// Logs the result for quick debugging purposes. #[cfg(debug_assertions)] #[allow(dead_code)] pub fn log_result<'a, O: std::fmt::Debug>( prefix: &'static str, combinator: impl Fn(&'a str) -> ParseResult<'a, O>, ) -> impl Fn(&'a str) -> ParseResult<'a, O> { move |input| { let result = combinator(input); println!("{} (input): {:?}", prefix, input); println!("{} (result): {:#?}", prefix, result); result } } fn is_backtrace<O>(result: ParseResult<O>) -> Result<bool, ParseError> { match result { Ok(_) => Ok(false), Err(ParseError::Backtrace) => Ok(true), Err(err) => Err(err), } }
28.512415
81
0.583802
14810274f07a792e3ac3a91c8999e076fb8ad5fa
263
#![cfg_attr(docsrs, feature(doc_cfg))] pub mod arena; pub mod contention_pool; mod error; mod functions; pub mod mem; pub mod sort; pub use functions::*; #[cfg(not(feature = "bigidx"))] pub type IdxSize = u32; #[cfg(feature = "bigidx")] pub type IdxSize = u64;
17.533333
38
0.695817
388356fb1db6b14c17509b708f77a16eedc22630
20,495
use kolgac_errors::ty::{TypeErr, TypeErrTy}; use kolgac::{ ast::{Ast, MetaAst}, token::TknTy, ty_rec::KolgaTy, }; use std::collections::HashMap; /// Represents an pair of types that can be unified. It's possible that the types /// in the pair are already the same, in which case unification isn't required. #[derive(Clone, Debug, PartialEq)] pub struct TyMatch { pub lhs: KolgaTy, pub rhs: KolgaTy, pub meta: MetaAst, } impl TyMatch { pub fn new(lhs: KolgaTy, rhs: KolgaTy, meta: MetaAst) -> TyMatch { TyMatch { lhs: lhs, rhs: rhs, meta: meta, } } } /// Used to infer types for a given AST. pub struct TyInfer { /// Represents a substitution from a variable name to a type subs: HashMap<String, KolgaTy>, } impl TyInfer { pub fn new() -> TyInfer { TyInfer { subs: HashMap::new(), } } /// Infers any types for a given AST. This function will walk the entire /// AST twice: /// /// 1. The first pass is to generate TyMatch structs, which contain a pair /// of KolgTy's to be unified. /// 2. Next, we unify all the pairs of types in our TyMatch structs, and generate /// a map from typename to the most general unified type. /// 3. In the second pass of the AST, we replace all instances of symbolic types /// in the AST with the mgu's contained in the type mapping. After this pass, /// our program should have no symbolic types remaining. /// /// Returns an empty result, indicating success. There is no result to return, /// as we alter the AST in place in the last step of the function. pub fn infer(&mut self, ast: &mut Ast) -> Result<(), TypeErr> { match ast { Ast::Prog { meta: _, stmts } => { let ty_eqs = self.ty_eq(stmts); self.unify_all(ty_eqs)?; } _ => return Err(TypeErr::new(0, 0, TypeErrTy::InvalidInfer)), }; match ast { Ast::Prog { meta: _, stmts } => { for stmt in stmts.iter_mut() { self.update_tys(stmt); } } _ => (), }; Ok(()) } /// Rewrites the type records in the passed in AST. After unification, we /// have a mappping from type name to the unified type, so we need to alter /// the existing type record to change the type to that. This function /// should walk the entire AST. fn update_tys(&self, ast: &mut Ast) { match *ast { Ast::BlckStmt { meta: _, ref mut stmts, .. } => { for stmt in stmts.iter_mut() { self.update_tys(stmt); } } Ast::IfStmt { meta: _, ref mut cond_expr, ref mut if_stmts, ref mut elif_exprs, ref mut el_stmts, } => { self.update_tys(cond_expr); self.update_tys(if_stmts); for stmt in elif_exprs.iter_mut() { self.update_tys(stmt); } for stmt in el_stmts.iter_mut() { self.update_tys(stmt); } } Ast::ElifStmt { meta: _, ref mut cond_expr, ref mut stmts, } => { self.update_tys(cond_expr); self.update_tys(stmts); } Ast::WhileStmt { meta: _, ref mut cond_expr, ref mut stmts, } => { self.update_tys(cond_expr); self.update_tys(stmts); } Ast::ForStmt { meta: _, ref mut for_var_decl, ref mut for_cond_expr, ref mut for_step_expr, ref mut stmts, } => { self.update_tys(for_var_decl); self.update_tys(for_cond_expr); self.update_tys(for_step_expr); self.update_tys(stmts); } Ast::ExprStmt { meta: _, ref mut expr, } => { self.update_tys(expr); } Ast::VarAssignExpr { meta: _, ref mut ty_rec, ident_tkn: _, is_imm: _, is_global: _, ref mut value, } => { let potential_ty = self.subs.get(&ty_rec.name); if potential_ty.is_some() { ty_rec.ty = potential_ty.unwrap().clone(); self.update_tys(value); } } Ast::LogicalExpr { meta: _, ref mut ty_rec, op_tkn: _, ref mut lhs, ref mut rhs, } | Ast::BinaryExpr { meta: _, ref mut ty_rec, op_tkn: _, ref mut lhs, ref mut rhs, } => { let potential_ty = self.subs.get(&ty_rec.name); if potential_ty.is_some() { ty_rec.ty = potential_ty.unwrap().clone(); self.update_tys(lhs); self.update_tys(rhs); } } Ast::UnaryExpr { meta: _, ref mut ty_rec, op_tkn: _, ref mut rhs, } => { let potential_ty = self.subs.get(&ty_rec.name); if potential_ty.is_some() { ty_rec.ty = potential_ty.unwrap().clone(); self.update_tys(rhs); } } Ast::VarDeclExpr { meta: _, ref mut ty_rec, .. } | Ast::FnCallExpr { meta: _, ref mut ty_rec, .. } | Ast::ClassFnCallExpr { meta: _, ref mut ty_rec, .. } | Ast::PrimaryExpr { meta: _, ref mut ty_rec, .. } => { let potential_ty = self.subs.get(&ty_rec.name); if potential_ty.is_some() { ty_rec.ty = potential_ty.unwrap().clone(); } } Ast::FnDeclStmt { meta: _, ident_tkn: _, fn_params: _, ret_ty: _, ref mut fn_body, .. } => { self.update_tys(fn_body); } Ast::RetStmt { meta: _, ref mut ret_expr, } => { match *ret_expr { Some(ref mut expr) => self.update_tys(expr), None => (), }; } Ast::ClassDeclStmt { meta: _, ty_rec: _, ident_tkn: _, ref mut methods, .. } => { for mut mtod in methods { self.update_tys(&mut mtod); } } Ast::ClassPropSetExpr { .. } | Ast::ClassConstrExpr { .. } | Ast::Prog { .. } | Ast::ClassPropAccessExpr { .. } => (), } } fn ty_eq(&self, stmts: &mut Vec<Ast>) -> Vec<TyMatch> { let mut ty_eqs = Vec::new(); for stmt in stmts.iter() { ty_eqs.extend(self.gen_ty_eq(stmt)); } ty_eqs } fn unify_all(&mut self, ty_eqs: Vec<TyMatch>) -> Result<(), TypeErr> { for eq in ty_eqs { self.unify(eq.lhs, eq.rhs, eq.meta)?; } Ok(()) } /// Unifies two arbitrary types. At least one of the provided types /// should be a symbolic type, so long as the types aren't the same. /// This prevents an attempt at trying to unify two concrete types, /// like String and Num, which can never be unified. /// /// When we don't have at least one symbolic type here, it should indicate /// that we are trying to assign something to the wrong type. For example, /// take this program: /// /// let x ~= 10; /// x = "hello"; /// /// When attempting to unify this program, we would end up with an lhs /// argument Num, and a rhs arg String, which we cannot unify. In this case, /// we should return a type error with a type mismatch. fn unify(&mut self, lhs: KolgaTy, rhs: KolgaTy, meta: MetaAst) -> Result<(), TypeErr> { if lhs == rhs { return Ok(()); } match lhs { KolgaTy::Symbolic(_) => { return self.unify_var(lhs, rhs, meta); } _ => (), }; match rhs { KolgaTy::Symbolic(_) => { return self.unify_var(rhs, lhs, meta); } _ => (), }; Err(TypeErr::new( meta.line, meta.pos, TypeErrTy::TyMismatch(lhs.to_string(), rhs.to_string()), )) } /// Unifies two variable types. This is done by inserting the type on the rhs /// into our type mapping under the key provided by the lhs name. However, /// this is only done after we recursively call unify on the provided types, /// which we do to ensure that if we have already unified a pair, that unification /// is honored throughout the entire unification process. /// We Expect lhs to be KolgaTy::Symbolic fn unify_var(&mut self, lhs: KolgaTy, rhs: KolgaTy, meta: MetaAst) -> Result<(), TypeErr> { let mb_lhs_name = match lhs.clone() { KolgaTy::Symbolic(name) => Some(name), _ => None, }; let mb_rhs_name = match rhs.clone() { KolgaTy::Symbolic(name) => Some(name), _ => None, }; let subs_clone = self.subs.clone(); // Recursively unify if we have already tried to unify the lhs type, // to continue honoring the association between the exiting lhs type // and the provided rhs type. let name = mb_lhs_name.unwrap(); if self.subs.contains_key(&name) { let existing_ty = subs_clone.get(&name).unwrap(); return self.unify(existing_ty.clone(), rhs, meta); } // Do the same for the rhs type. if mb_rhs_name.is_some() && self.subs.contains_key(&mb_rhs_name.clone().unwrap()) { let name = mb_rhs_name.unwrap(); let existing_ty = subs_clone.get(&name).unwrap(); return self.unify(lhs, existing_ty.clone(), meta); } // Ensure that the type doesn't contain a reference to itself // (ie. let x = x) to prevent infinite unification. if self.occurs_check(lhs, rhs.clone()) { return Err(TypeErr::new(meta.line, meta.pos, TypeErrTy::InfiniteType)); } // Insert the unified type for the lhs key (the name of the symbolic type) self.subs.insert(name, rhs); Ok(()) } /// Checks if the provided lhs type occurs "inside" of the provided rhs type. /// This check is needed to avoid infinite recursion during unification /// (we would endlessly try to unify a type within itself). /// We expect lhs to be KolgaTy::Symbolic fn occurs_check(&self, lhs: KolgaTy, rhs: KolgaTy) -> bool { let mb_rhs_name = match rhs.clone() { KolgaTy::Symbolic(name) => Some(name), _ => None, }; if lhs == rhs { return true; } let subs_clone = self.subs.clone(); // We check if the rhs type is in our type mapping. If it is, we've already // recorded a type for the symbolic type provided. In that case, we need to // recursively check that type as well. if mb_rhs_name.is_some() && self.subs.contains_key(&mb_rhs_name.clone().unwrap()) { let name = mb_rhs_name.unwrap(); let existing_ty = subs_clone.get(&name).unwrap(); return self.occurs_check(lhs, existing_ty.clone()); } false } /// Walks the entire AST and creates pairs of KolgaTy's to be unified in the next /// step of type inference. Typing rules are applied in this step to determine /// which types we expect certain expressions to evaluate to. fn gen_ty_eq(&self, ast: &Ast) -> Vec<TyMatch> { let mut ty_eqs = Vec::new(); match *ast { Ast::PrimaryExpr { .. } => ty_eqs, Ast::LogicalExpr { ref meta, ref ty_rec, ref op_tkn, ref lhs, ref rhs, } | Ast::BinaryExpr { ref meta, ref ty_rec, ref op_tkn, ref lhs, ref rhs, } => { ty_eqs.extend(self.gen_ty_eq(lhs)); ty_eqs.extend(self.gen_ty_eq(rhs)); // Binary operators expect numbers as their args: strings are not supported // We should be safe to unwrap here, otherwise we have a parsing error // (we're trying to put something in an expression without a type) let lhs_ty_rec = lhs.get_ty_rec().unwrap(); let rhs_ty_rec = rhs.get_ty_rec().unwrap(); ty_eqs.push(TyMatch::new(lhs_ty_rec.ty, KolgaTy::Num, meta.clone())); ty_eqs.push(TyMatch::new(rhs_ty_rec.ty, KolgaTy::Num, meta.clone())); if op_tkn.ty.is_cmp_op() { ty_eqs.push(TyMatch::new(ty_rec.ty.clone(), KolgaTy::Bool, meta.clone())); } else { ty_eqs.push(TyMatch::new(ty_rec.ty.clone(), KolgaTy::Num, meta.clone())); } ty_eqs } Ast::UnaryExpr { ref meta, ref ty_rec, ref op_tkn, ref rhs, } => { ty_eqs.extend(self.gen_ty_eq(rhs)); let rhs_ty_rec = rhs.get_ty_rec().unwrap(); if op_tkn.ty == TknTy::Bang { ty_eqs.push(TyMatch::new(rhs_ty_rec.ty, KolgaTy::Bool, meta.clone())); ty_eqs.push(TyMatch::new(ty_rec.ty.clone(), KolgaTy::Bool, meta.clone())); } else { ty_eqs.push(TyMatch::new(rhs_ty_rec.ty, KolgaTy::Num, meta.clone())); ty_eqs.push(TyMatch::new(ty_rec.ty.clone(), KolgaTy::Num, meta.clone())); } ty_eqs } Ast::ExprStmt { meta: _, ref expr } => { ty_eqs.extend(self.gen_ty_eq(expr)); ty_eqs } Ast::BlckStmt { meta: _, ref stmts, .. } => { for stmt in stmts.iter() { ty_eqs.extend(self.gen_ty_eq(stmt)); } ty_eqs } Ast::IfStmt { ref meta, ref cond_expr, ref if_stmts, ref elif_exprs, ref el_stmts, } => { ty_eqs.extend(self.gen_ty_eq(if_stmts)); let cond_expr_ty_rec = cond_expr.get_ty_rec().unwrap(); ty_eqs.push(TyMatch::new( cond_expr_ty_rec.ty, KolgaTy::Bool, meta.clone(), )); for stmt in elif_exprs.iter() { ty_eqs.extend(self.gen_ty_eq(stmt)); } for stmt in el_stmts.iter() { ty_eqs.extend(self.gen_ty_eq(stmt)); } ty_eqs } Ast::ElifStmt { ref meta, ref cond_expr, ref stmts, } => { ty_eqs.extend(self.gen_ty_eq(stmts)); let cond_expr_ty_rec = cond_expr.get_ty_rec().unwrap(); ty_eqs.push(TyMatch::new( cond_expr_ty_rec.ty, KolgaTy::Bool, meta.clone(), )); ty_eqs } Ast::WhileStmt { ref meta, ref cond_expr, ref stmts, } => { ty_eqs.extend(self.gen_ty_eq(stmts)); let cond_expr_ty_rec = cond_expr.get_ty_rec().unwrap(); ty_eqs.push(TyMatch::new( cond_expr_ty_rec.ty, KolgaTy::Bool, meta.clone(), )); ty_eqs } Ast::ForStmt { ref meta, ref for_var_decl, ref for_cond_expr, ref for_step_expr, ref stmts, } => { ty_eqs.extend(self.gen_ty_eq(stmts)); // The var declaration should be a number let var_decl_ty_rec = for_var_decl.get_ty_rec().unwrap(); ty_eqs.push(TyMatch::new(var_decl_ty_rec.ty, KolgaTy::Num, meta.clone())); // The cond expr should be a bool let cond_expr_ty_rec = for_cond_expr.get_ty_rec().unwrap(); ty_eqs.push(TyMatch::new( cond_expr_ty_rec.ty, KolgaTy::Bool, meta.clone(), )); // The step expression should be a number let step_expr_ty_rec = for_step_expr.get_ty_rec().unwrap(); ty_eqs.push(TyMatch::new( step_expr_ty_rec.ty, KolgaTy::Num, meta.clone(), )); ty_eqs } Ast::VarAssignExpr { ref meta, ref ty_rec, ident_tkn: _, is_imm: _, is_global: _, ref value, } => match **value { Ast::FnCallExpr { meta: ref fn_meta, ty_rec: ref fn_ty_rec, .. } => { ty_eqs.push(TyMatch::new( ty_rec.ty.clone(), fn_ty_rec.ty.clone(), fn_meta.clone(), )); ty_eqs } _ => { ty_eqs.extend(self.gen_ty_eq(value)); let val_ty_rec = value.get_ty_rec().unwrap(); ty_eqs.push(TyMatch::new(ty_rec.ty.clone(), val_ty_rec.ty, meta.clone())); ty_eqs } }, Ast::FnDeclStmt { meta: _, ident_tkn: _, fn_params: _, ret_ty: _, ref fn_body, sc: _, } => { ty_eqs.extend(self.gen_ty_eq(fn_body)); ty_eqs } // nothing to do with a function call not being assigned, or a // declaration with no value Ast::RetStmt { meta: _, ref ret_expr, } => { match *ret_expr { Some(ref expr) => ty_eqs.extend(self.gen_ty_eq(expr)), None => (), }; ty_eqs } Ast::ClassDeclStmt { meta: _, ty_rec: _, ident_tkn: _, ref methods, .. } => { for mtod in methods { ty_eqs.extend(self.gen_ty_eq(&mtod)); } ty_eqs } Ast::ClassPropAccessExpr { .. } | Ast::ClassPropSetExpr { .. } | Ast::ClassFnCallExpr { .. } | Ast::VarDeclExpr { .. } | Ast::FnCallExpr { .. } => ty_eqs, _ => ty_eqs, } } }
33.16343
95
0.454208
3a28105c8c1048319804266c2bfb22c0efa6b854
17,849
//! [Liqui.io](https://liqui.io/) API. //! //! [Liqui's API documentation](https://liqui.io/api) //! //! Naming between `ccex::liqui` and Liqui is not 1:1. use {HttpClient, Query}; use failure::{Error, ResultExt}; use hex; use hmac::{Hmac, Mac}; use rust_decimal::Decimal as d128; use serde::de::{self, Deserialize, DeserializeOwned, Deserializer, Visitor}; use serde; use serde_json; use sha2::Sha512; use std::collections::HashMap; use std::fmt::{self, Display, Formatter}; use http; use std::str::FromStr; /// Use this as the `host` for REST requests. pub const API_HOST: &str = "https://api.liqui.io"; /// Credentials needed for private API requests. #[derive(Debug, Hash, PartialEq, PartialOrd, Eq, Ord, Clone, Deserialize, Serialize)] pub struct Credential { pub secret: String, pub key: String, pub nonce: u64, } /// `Buy` or `Sell` #[derive(Debug, Hash, PartialEq, PartialOrd, Eq, Ord, Clone, Copy, Deserialize, Serialize)] #[serde(rename_all = "lowercase")] pub enum Side { Buy, Sell, } impl Display for Side { fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> { match *self { Side::Buy => writeln!(f, "buy"), Side::Sell => writeln!(f, "sell"), } } } /// Single currency. `ETH`, `BTC`, `USDT`, etc. /// /// Use `Currency::from_str` to create a new `Currency`. /// /// ```rust /// let ether: Currency = "ETH".parse()?; /// ``` #[derive(Debug, Hash, PartialEq, PartialOrd, Eq, Ord, Clone, Deserialize, Serialize)] pub struct Currency(String); impl FromStr for Currency { type Err = Error; fn from_str(s: &str) -> Result<Self, Self::Err> { Ok(Currency(s.to_lowercase())) } } impl Display for Currency { fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> { let &Currency(ref currency) = self; f.write_str(currency) } } /// Usually represents a product. `ETH_BTC`, `BTC_USDT`, etc. #[derive(Debug, Hash, PartialEq, PartialOrd, Eq, Ord, Clone, Serialize)] pub struct CurrencyPair(pub Currency, pub Currency); impl CurrencyPair { /// Convenience method for accessing the base currency when `CurrencyPair` represents a /// product. pub fn base(&self) -> &Currency { let &CurrencyPair(ref base, _) = self; base } /// Convenience method for accessing the quote currency when `CurrencyPair` represents a /// product. pub fn quote(&self) -> &Currency { let &CurrencyPair(_, ref quote) = self; quote } } impl Display for CurrencyPair { fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> { write!(f, "{}_{}", self.base(), self.quote()) } } impl<'de> Deserialize<'de> for CurrencyPair { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de> { struct CurrencyPairVisitor; impl<'de> Visitor<'de> for CurrencyPairVisitor { type Value = CurrencyPair; fn expecting(&self, f: &mut fmt::Formatter) -> fmt::Result { f.write_str("a string containing two currencies separated by an underscore") } fn visit_str<E>(self, pair: &str) -> Result<Self::Value, E> where E: de::Error { let currencies: Vec<&str> = pair.split('_').collect(); if currencies.len() < 2 { return Err(E::invalid_value(serde::de::Unexpected::Str(pair), &self)); } let base = Currency::from_str(currencies[0]).map_err(serde::de::Error::custom)?; let quote = Currency::from_str(currencies[1]).map_err(serde::de::Error::custom)?; Ok(CurrencyPair(base, quote)) } } deserializer.deserialize_str(CurrencyPairVisitor) } } /// Exchange ticker snapshot. #[derive(Debug, PartialEq, PartialOrd, Clone, Deserialize, Serialize)] pub struct Ticker { pub high: d128, pub low: d128, pub avg: d128, pub vol: d128, pub vol_cur: d128, pub last: d128, pub buy: d128, pub sell: d128, pub updated: u64, } /// Market depth. #[derive(Debug, PartialEq, PartialOrd, Clone, Deserialize, Serialize)] pub struct Orderbook { pub bids: Vec<(d128, d128)>, pub asks: Vec<(d128, d128)>, } /// An account's funds, privileges, and number of open orders. #[derive(Debug, PartialEq, Clone, Deserialize, Serialize)] pub struct AccountInfo { /// Your account balance available for trading. Doesn’t include funds on /// your open orders. pub funds: HashMap<Currency, d128>, /// The privileges of the current API key. pub rights: Rights, /// The number of open orders on this account. #[serde(rename = "open_orders")] pub num_open_orders: u32, /// Server time (UTC). pub server_time: i64, } /// Account privileges. #[derive(Debug, Hash, PartialEq, PartialOrd, Eq, Ord, Clone, Deserialize, Serialize)] pub struct Rights { #[serde(rename = "info")] pub can_get_info: bool, #[serde(rename = "trade")] pub can_trade: bool, /// Currently unused. #[serde(rename = "withdraw")] pub can_withdraw: bool, } /// The result of a newly placed order. #[derive(Debug, PartialEq, Clone, Deserialize, Serialize)] pub struct OrderPlacement { /// The amount of currency bought/sold. pub received: d128, /// The remaining amount of currency to be bought/sold (and the initial /// order amount). pub remains: d128, /// Is equal to 0 if the request was fully “matched” by the opposite /// orders, otherwise the ID of the executed order will be returned. pub order_id: u64, /// Balance after the request. pub funds: HashMap<Currency, d128>, } /// The result of a newly cancelled order. #[derive(Debug, PartialEq, Clone, Deserialize, Serialize)] pub struct OrderCancellation { /// Liqui-issued order id of the cancelled order. pub order_id: u64, /// Account balance after the order cancellation. pub funds: HashMap<Currency, d128>, } /// Exchange's time and product info. #[derive(Debug, PartialEq, Clone, Deserialize, Serialize)] pub struct ExchangeInfo { pub server_time: u64, #[serde(rename = "pairs")] pub products: HashMap<CurrencyPair, ProductInfo>, } /// Product min/max prices, trading precision, and fees. #[derive(Debug, PartialEq, Clone, Deserialize, Serialize)] pub struct ProductInfo { /// Maximum number of decimal places allowed for the price(?) and amount(?). pub decimal_places: u32, /// Minimum price. pub min_price: d128, /// Maximum price. pub max_price: d128, /// Minimum buy/sell transaction size. pub min_amount: d128, /// Whether the pair is hidden. Hidden pairs remain active, but are not displayed on the /// exchange's web interface. /// /// The value is either `0` or `1`. The developers at Liqui don't know booleans exist. #[serde(rename = "hidden")] pub is_hidden: i32, /// Taker fee represented as a fraction of a percent. For example: `taker_fee == 0.25` /// represents a 0.25% fee. #[serde(rename = "fee")] pub taker_fee: d128, } /// Status of an order. #[derive(Debug, Hash, PartialEq, PartialOrd, Eq, Ord, Clone, Copy, Deserialize, Serialize)] pub enum OrderStatus { Active = 0, Executed = 1, Cancelled = 2, CancelledPartiallyExecuted = 3, } /// Limit order (the only type of order Liqui supports). #[derive(Debug, PartialEq, PartialOrd, Clone, Deserialize, Serialize)] pub struct Order { pub status: OrderStatus, pub pair: CurrencyPair, #[serde(rename = "type")] pub side: Side, pub amount: d128, pub rate: d128, pub timestamp_created: u64, } /// **Public**. Mostly contains product info (min/max price, precision, fees, etc.) pub fn get_exchange_info<Client>(client: &mut Client, host: &str) -> Result<ExchangeInfo, Error> where Client: HttpClient { let http_request = http::Request::builder() .method(http::Method::GET) .uri(format!("{}/api/3/info", host)) .body(String::new())?; let http_response = client.send(&http_request)?; deserialize_public_response(&http_response) } /// **Private**. User account information (balances, api priviliges, and more) pub fn get_account_info<Client>( client: &mut Client, host: &str, credential: &Credential, ) -> Result<AccountInfo, Error> where Client: HttpClient, { let query = { let mut query = Query::with_capacity(2); query.append_param("method", "getInfo"); query.append_param("nonce", credential.nonce.to_string()); query.to_string() }; let mut http_request = http::request::Builder::new() .method(http::Method::POST) .uri(format!("{}/tapi", host)) .body(query)?; sign_private_request(credential, &mut http_request)?; let http_response = client.send(&http_request)?; deserialize_private_response(&http_response) } /// **Public**. Market depth. pub fn get_orderbooks<Client>( client: &mut Client, host: &str, products: &[&CurrencyPair], ) -> Result<HashMap<CurrencyPair, Orderbook>, Error> where Client: HttpClient, { let products: Vec<String> = products.iter().map(ToString::to_string).collect(); let http_request = http::request::Builder::new() .method(http::Method::GET) .uri(format!("{}/api/3/depth/{}", host, products.join("-"))) .body(String::new())?; let http_response = client.send(&http_request)?; deserialize_public_response(&http_response) } /// **Public**. Current price/volume ticker. pub fn get_ticker<Client>( client: &mut Client, host: &str, products: &[CurrencyPair], ) -> Result<HashMap<CurrencyPair, Ticker>, Error> where Client: HttpClient, { let products: Vec<String> = products.iter().map(ToString::to_string).collect(); let http_request = http::request::Builder::new() .method(http::Method::GET) .uri(format!("{}/api/3/ticker/{}", host, products.join("-"))) .body(String::new())?; let http_response = client.send(&http_request)?; deserialize_public_response(&http_response) } /// **Private**. Place a limit order -- the only order type Liqui supports. pub fn place_limit_order<Client>( client: &mut Client, host: &str, credential: &Credential, product: &CurrencyPair, price: d128, quantity: d128, side: Side, ) -> Result<OrderPlacement, Error> where Client: HttpClient, { let body = { let mut query = Query::with_capacity(6); query.append_param("nonce", credential.nonce.to_string()); query.append_param("method", "trade"); query.append_param("pair", product.to_string()); query.append_param("type", side.to_string()); query.append_param("rate", price.to_string()); query.append_param("amount", quantity.to_string()); query.to_string() }; let mut http_request = http::request::Builder::new() .method(http::Method::POST) .uri(format!("{}/tapi", host)) .body(body)?; sign_private_request(credential, &mut http_request)?; let http_response = client.send(&http_request)?; deserialize_private_response(&http_response) } /// **Private**. User's active buy/sell orders for a product. pub fn get_active_orders<Client>( client: &mut Client, host: &str, credential: &Credential, product: &CurrencyPair, ) -> Result<HashMap<u64, Order>, Error> where Client: HttpClient, { let body = { let mut query = Query::with_capacity(3); query.append_param("method", "ActiveOrders"); query.append_param("nonce", credential.nonce.to_string()); query.append_param("pair", product.to_string()); query.to_string() }; let mut http_request = http::request::Builder::new() .method(http::Method::POST) .uri(format!("{}/tapi", host)) .body(body)?; sign_private_request(credential, &mut http_request)?; let http_response = client.send(&http_request)?; deserialize_private_response(&http_response) } /// **Private**. Get a specific order by its Liqui-issued order id. pub fn get_order<Client>( client: &mut Client, host: &str, credential: &Credential, order_id: u64, ) -> Result<Order, Error> where Client: HttpClient, { let body = { let mut query = Query::with_capacity(3); query.append_param("method", "OrderInfo"); query.append_param("nonce", credential.nonce.to_string()); query.append_param("order_id", order_id.to_string()); query.to_string() }; let mut http_request = http::request::Builder::new() .method(http::Method::POST) .uri(format!("{}/tapi", host)) .body(body)?; sign_private_request(credential, &mut http_request)?; let http_response = client.send(&http_request)?; deserialize_private_response(&http_response) } /// **Private**. Cancel an order by its Liqui-issued order id. pub fn cancel_order<Client>( client: &mut Client, host: &str, credential: &Credential, order_id: u64, ) -> Result<OrderCancellation, Error> where Client: HttpClient, { let body = { let mut query = Query::with_capacity(3); query.append_param("method", "CancelOrder"); query.append_param("nonce", credential.nonce.to_string()); query.append_param("order_id", order_id.to_string()); query.to_string() }; let mut http_request = http::request::Builder::new() .method(http::Method::POST) .uri(format!("{}/tapi", host)) .body(body)?; sign_private_request(credential, &mut http_request)?; let http_response = client.send(&http_request)?; deserialize_private_response(&http_response) } /// Response to a private, authenticated request. /// /// As far as I can tell, `PrivateResponse` is ALWAYS returned from the server in all cases. #[derive(Debug, Hash, PartialEq, PartialOrd, Eq, Ord, Clone, Deserialize, Serialize)] struct PrivateResponse<T> { success: i32, #[serde(rename = "return")] ok: Option<T>, error: Option<String>, code: Option<u32>, } impl<T> PrivateResponse<T> { pub fn is_ok(&self) -> bool { self.success == 1 } pub fn into_result(self) -> Result<T, LiquiError> { if self.is_ok() { Ok(self.ok.unwrap()) } else { let error = match self.code { Some(code @ 803) | Some(code @ 804) | Some(code @ 805) | Some(code @ 806) | Some(code @ 807) => LiquiError::InvalidOrder(code, self.error.unwrap()), Some(code @ 831) | Some(code @ 832) => { LiquiError::InsufficientFunds(code, self.error.unwrap()) } Some(code @ 833) => LiquiError::OrderNotFound(code, self.error.unwrap()), code => LiquiError::Unregistered(code, self.error.unwrap()), }; Err(error) } } } #[derive(Debug, Fail)] enum LiquiError { #[fail(display = "({}) {}", _0, _1)] InvalidOrder(u32, String), #[fail(display = "({}) {}", _0, _1)] InsufficientFunds(u32, String), #[fail(display = "({}) {}", _0, _1)] OrderNotFound(u32, String), #[fail(display = "({:?}) {}", _0, _1)] Unregistered(Option<u32>, String), } /// Deserialize a response from a *private* REST request. fn deserialize_private_response<T>(response: &http::Response<String>) -> Result<T, Error> where T: DeserializeOwned { let body = response.body(); let response: PrivateResponse<T> = serde_json::from_str(body.as_str()) .with_context(|_| format!("failed to deserialize: \"{}\"", body))?; response .into_result() .map_err(|e| format_err!("the server returned \"{}\"", e)) } /// Response to a public request. /// /// As far as I can tell, a public response is either: /// * `T` where `T` is the object being requested, or /// * `PublicResponse` in the event of an error. #[derive(Deserialize)] struct PublicResponse { success: Option<i32>, error: Option<String>, } impl PublicResponse { fn is_ok(&self) -> bool { // If `success` exists it means the response is an error. Also, if `success` exists, it's // always equal to `0`. match self.success { Some(success) => success == 1, None => true, } } fn error(&self) -> &str { match self.error { Some(ref error) => error.as_str(), None => "", } } } /// Deserialize a response from a *public* REST request. fn deserialize_public_response<T>(response: &http::Response<String>) -> Result<T, Error> where T: DeserializeOwned { let body = response.body(); // First, deserialize into `PublicResponse`, to check if the response is an error. let response: PublicResponse = serde_json::from_str(body.as_str()) .with_context(|_| format!("failed to deserialize: \"{}\"", body))?; if !response.is_ok() { return Err(format_err!("the server returned: \"{}\"", response.error())); } // Now, deserialize *again* into the expected reponse. let response: T = serde_json::from_str(body.as_str()) .with_context(|_| format!("failed to deserialize: \"{}\"", body))?; Ok(response) } fn sign_private_request( credential: &Credential, request: &mut http::Request<String>, ) -> Result<(), Error> { let mut mac = Hmac::<Sha512>::new(credential.secret.as_bytes()).map_err(|e| format_err!("{:?}", e))?; mac.input(request.body().as_bytes()); let signature = hex::encode(mac.result().code().to_vec()); let headers = request.headers_mut(); headers.insert("Key", credential.key.parse().unwrap()); headers.insert("Sign", signature.parse().unwrap()); Ok(()) }
30.355442
97
0.629335
09593009b087acdf670f7cddaf84449ae9f9dd6b
7,630
use dotenv::dotenv; use postgres::{Client, NoTls}; use std::env; use std::error::Error; use std::result::Result; fn db_setup() -> Result<(), Box<dyn Error>> { const INITIAL_DB_STATE: &str = r##" -- DB init BEGIN; -- Create table for Geometry type data DROP TABLE IF EXISTS geometry_test; CREATE TABLE geometry_test (name VARCHAR PRIMARY KEY, obj GEOMETRY NOT NULL); INSERT INTO geometry_test VALUES ('point', 'SRID=4326;POINT(-126.4 45.32)'); INSERT INTO geometry_test VALUES ('multipoint', 'SRID=4326;MULTIPOINT((-126.4 45.32), (0 0))'); INSERT INTO geometry_test VALUES ('linestring', 'SRID=4326;LINESTRING (30 10, 10 30, 40 40)'); INSERT INTO geometry_test VALUES ('multilinestring', 'SRID=4326;MULTILINESTRING ((10 10, 20 20, 10 40), (40 40, 30 30, 40 20, 30 10))'); INSERT INTO geometry_test VALUES ('polygon', 'SRID=4326;POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))'); INSERT INTO geometry_test VALUES ('multipolygon', 'SRID=4326;MULTIPOLYGON (((40 40, 20 45, 45 30, 40 40)), ((20 35, 10 30, 10 10, 30 5, 45 20, 20 35), (30 20, 20 15, 20 25, 30 20)))'); INSERT INTO geometry_test VALUES ('geometry_collection', 'SRID=4326;GEOMETRYCOLLECTION (POINT (40 10), LINESTRING (10 10, 20 20, 10 40), POLYGON ((40 40, 20 45, 45 30, 40 40)))'); -- Create table for Geography type data DROP TABLE IF EXISTS geography_test; CREATE TABLE geography_test (name VARCHAR PRIMARY KEY, obj GEOMETRY NOT NULL); INSERT INTO geography_test VALUES ('point', 'SRID=4326;POINT(-126.4 45.32)'); INSERT INTO geography_test VALUES ('multipoint', 'SRID=4326;MULTIPOINT((-126.4 45.32), (0 0))'); INSERT INTO geography_test VALUES ('linestring', 'SRID=4326;LINESTRING (30 10, 10 30, 40 40)'); INSERT INTO geography_test VALUES ('multilinestring', 'SRID=4326;MULTILINESTRING ((10 10, 20 20, 10 40), (40 40, 30 30, 40 20, 30 10))'); INSERT INTO geography_test VALUES ('polygon', 'SRID=4326;POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))'); INSERT INTO geography_test VALUES ('multipolygon', 'SRID=4326;MULTIPOLYGON (((40 40, 20 45, 45 30, 40 40)), ((20 35, 10 30, 10 10, 30 5, 45 20, 20 35), (30 20, 20 15, 20 25, 30 20)))'); INSERT INTO geography_test VALUES ('geometry_collection', 'SRID=4326;GEOMETRYCOLLECTION (POINT (40 10), LINESTRING (10 10, 20 20, 10 40), POLYGON ((40 40, 20 45, 45 30, 40 40)))'); COMMIT; "##; dotenv().ok(); let database_url = env::var("DATABASE_URL").expect("DATABASE_URL must be set"); let mut client = Client::connect(&database_url, NoTls)?; client.batch_execute(INITIAL_DB_STATE)?; Ok(()) } #[test] fn read_geometry() -> Result<(), Box<dyn Error>> { use super::PostgisGeometry; use crate::ewkb::{Geometry, LineString, Point, Polygon}; use diesel::prelude::*; use diesel::sql_types::*; use diesel::{Connection, PgConnection}; #[derive(Queryable, QueryableByName, Debug, PartialEq)] struct NamedGeometry { #[sql_type = "Varchar"] pub name: String, #[sql_type = "PostgisGeometry"] pub obj: Geometry, } db_setup()?; let database_url = env::var("DATABASE_URL")?; let connection = PgConnection::establish(&database_url)?; let rows: Vec<NamedGeometry> = diesel::sql_query("SELECT * FROM geometry_test").load(&connection)?; assert_eq!(rows.len(), 7); let row: &NamedGeometry = rows.get(4).unwrap(); assert_eq!(row.name, "polygon"); assert_eq!( row.obj, Geometry::Polygon(Polygon { rings: vec![LineString { points: vec![ Point { x: 30.0, y: 10.0, srid: Some(4326) }, Point { x: 40.0, y: 40.0, srid: Some(4326) }, Point { x: 20.0, y: 40.0, srid: Some(4326) }, Point { x: 10.0, y: 20.0, srid: Some(4326) }, Point { x: 30.0, y: 10.0, srid: Some(4326) } ], srid: Some(4326) }], srid: Some(4326), }) ); Ok(()) } #[test] fn read_linestring() -> Result<(), Box<dyn Error>> { use super::PostgisGeometry; use crate::ewkb::{LineString, Point}; use diesel::prelude::*; use diesel::sql_types::*; use diesel::{Connection, PgConnection}; #[derive(Queryable, QueryableByName, Debug, PartialEq)] struct NamedLineString { #[sql_type = "Varchar"] pub name: String, #[sql_type = "PostgisGeometry"] pub obj: LineString, } db_setup()?; let database_url = env::var("DATABASE_URL")?; let connection = PgConnection::establish(&database_url)?; let rows: Vec<NamedLineString> = diesel::sql_query("SELECT * FROM geometry_test WHERE GeometryType(obj) = 'LINESTRING'") .load(&connection)?; assert_eq!(rows.len(), 1); let row: &NamedLineString = rows.get(0).unwrap(); assert_eq!(row.name, "linestring"); assert_eq!( row.obj, LineString { points: vec![ Point { x: 30.0, y: 10.0, srid: Some(4326) }, Point { x: 10.0, y: 30.0, srid: Some(4326) }, Point { x: 40.0, y: 40.0, srid: Some(4326) } ], srid: Some(4326), } ); Ok(()) } // #[ignore] #[test] fn write_linestring() -> Result<(), Box<dyn Error>> { use crate::ewkb::Point; use diesel::prelude::*; use diesel::{Connection, PgConnection}; pub mod schema { table! { use diesel::sql_types::Varchar; use crate::diesel_shim::PostgisGeometry; geometry_test (name) { name -> Varchar, obj -> PostgisGeometry, } } } use schema::geometry_test; #[derive(Insertable, Queryable, QueryableByName, Debug, PartialEq)] #[table_name = "geometry_test"] struct NamedPoint { pub name: String, pub obj: Point, } db_setup()?; let database_url = env::var("DATABASE_URL")?; let connection = PgConnection::establish(&database_url)?; let mut rows: Vec<NamedPoint>; rows = diesel::sql_query("SELECT * FROM geometry_test WHERE GeometryType(obj) = 'POINT'") .load(&connection)?; assert_eq!(rows.len(), 1); let data = NamedPoint { name: "new_point".into(), obj: Point { x: 0.0, y: 0.0, srid: None, }, }; let inserted: Vec<NamedPoint> = diesel::insert_into(schema::geometry_test::table) .values(&data) .get_results(&connection)?; assert_eq!(data, inserted[0]); rows = diesel::sql_query("SELECT * FROM geometry_test WHERE GeometryType(obj) = 'POINT'") .load(&connection)?; assert_eq!(rows.len(), 2); Ok(()) }
32.746781
198
0.528178
4b7cec76b91b3b511c1d46a54ff4b5ff088ce55b
4,442
use std::fmt::Display; use quill_common::location::{Range, Ranged}; use quill_parser::{expr_pat::ConstantValue, identifier::NameP}; use quill_type::Type; use crate::TypeConstructorInvocation; /// A pattern made up of type constructors and potential unknowns. #[derive(Debug, Clone)] pub enum Pattern { /// A name representing the entire pattern, e.g. `a`. Named(NameP), /// A constant value. Constant { range: Range, value: ConstantValue }, /// A type constructor, e.g. `False` or `Maybe { value = a }`. TypeConstructor { type_ctor: TypeConstructorInvocation, /// The list of fields. If a pattern is provided, the pattern is matched against the named field. /// If no pattern is provided in Quill code, an automatic pattern is created, that simply assigns the field to a new variable with the same name. fields: Vec<(NameP, Type, Pattern)>, }, /// An impl, e.g. `impl { print }`. Impl { impl_token: Range, /// The list of fields. If a pattern is provided, the pattern is matched against the named field. /// If no pattern is provided in Quill code, an automatic pattern is created, that simply assigns the field to a new variable with the same name. fields: Vec<(NameP, Type, Pattern)>, }, /// A function pattern. This cannot be used directly in code, /// this is created only for working with functions that have multiple patterns. Function { param_types: Vec<Type>, args: Vec<Pattern>, }, /// A borrow of a pattern. Borrow { borrow_token: Range, borrowed: Box<Pattern>, }, /// An underscore representing an ignored pattern. Unknown(Range), } impl Ranged for Pattern { fn range(&self) -> Range { match self { Pattern::Named(identifier) => identifier.range, Pattern::Constant { range, .. } => *range, Pattern::TypeConstructor { type_ctor, fields } => fields .iter() .fold(type_ctor.range, |acc, (_name, _ty, pat)| { acc.union(pat.range()) }), Pattern::Impl { impl_token, fields, .. } => fields .iter() .fold(*impl_token, |acc, (_name, _ty, pat)| acc.union(pat.range())), Pattern::Borrow { borrowed, .. } => borrowed.range(), Pattern::Unknown(range) => *range, Pattern::Function { args, .. } => args .iter() .fold(args[0].range(), |acc, i| acc.union(i.range())), } } } impl Display for Pattern { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Pattern::Named(identifier) => write!(f, "{}", identifier.name), Pattern::Constant { value, .. } => write!(f, "const {}", value), Pattern::TypeConstructor { type_ctor, fields } => { if fields.is_empty() { return write!(f, "{}", type_ctor.data_type.name); } write!(f, "{} {{ ", type_ctor.data_type.name)?; for (i, (name, _ty, pat)) in fields.iter().enumerate() { if i != 0 { write!(f, ", ")?; } write!(f, " {}", name.name)?; write!(f, " = {}", pat)?; } write!(f, " }}") } Pattern::Impl { fields, .. } => { if fields.is_empty() { return write!(f, "impl {{}}"); } write!(f, "impl {{ ")?; for (i, (name, _ty, pat)) in fields.iter().enumerate() { if i != 0 { write!(f, ", ")?; } write!(f, " {}", name.name)?; write!(f, " = {}", pat)?; } write!(f, " }}") } Pattern::Function { args, .. } => { for (i, arg) in args.iter().enumerate() { if i != 0 { write!(f, " ")?; } write!(f, "{}", arg)?; } Ok(()) } Pattern::Borrow { borrowed, .. } => write!(f, "&{}", borrowed), Pattern::Unknown(_) => write!(f, "_"), } } }
37.644068
153
0.479063
674d0305f2eae29ed45553648d4b3efbe2460579
277
// ignore-tidy-linelength // compile-flags:-Z unstable-options --extern-html-root-url core=https://example.com/core/0.1.0 // @has extern_html_root_url/index.html // @has - '//a/@href' 'https://example.com/core/0.1.0/core/iter/index.html' #[doc(no_inline)] pub use std::iter;
30.777778
95
0.703971
644ddfaf8c51cfdf7f6a8d9acf55cf798076bcaa
279
#[macro_use] mod macros; mod crypto; mod music_api; mod request; mod server; use structopt::StructOpt; use crate::server::{ Opt, start_server }; #[actix_rt::main] async fn main() -> std::io::Result<()> { let opt = Opt::from_args(); start_server(&opt).await }
13.285714
40
0.645161
18bf4fd9fc334dd990e763bcd6b911639a253ff2
25,001
#[doc = "Register `I2C_BUSSTS` reader"] pub struct R(crate::R<I2C_BUSSTS_SPEC>); impl core::ops::Deref for R { type Target = crate::R<I2C_BUSSTS_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl From<crate::R<I2C_BUSSTS_SPEC>> for R { #[inline(always)] fn from(reader: crate::R<I2C_BUSSTS_SPEC>) -> Self { R(reader) } } #[doc = "Register `I2C_BUSSTS` writer"] pub struct W(crate::W<I2C_BUSSTS_SPEC>); impl core::ops::Deref for W { type Target = crate::W<I2C_BUSSTS_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::ops::DerefMut for W { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl From<crate::W<I2C_BUSSTS_SPEC>> for W { #[inline(always)] fn from(writer: crate::W<I2C_BUSSTS_SPEC>) -> Self { W(writer) } } #[doc = "Bus Busy (Read Only)\nIndicates that a communication is in progress on the bus. It is set by hardware when a START condition is detected. It is cleared by hardware when a STOP condition is detected\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum BUSY_A { #[doc = "0: Bus is IDLE (both SCLK and SDA High)"] _0 = 0, #[doc = "1: Bus is busy"] _1 = 1, } impl From<BUSY_A> for bool { #[inline(always)] fn from(variant: BUSY_A) -> Self { variant as u8 != 0 } } #[doc = "Field `BUSY` reader - Bus Busy (Read Only)\nIndicates that a communication is in progress on the bus. It is set by hardware when a START condition is detected. It is cleared by hardware when a STOP condition is detected"] pub struct BUSY_R(crate::FieldReader<bool, BUSY_A>); impl BUSY_R { pub(crate) fn new(bits: bool) -> Self { BUSY_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> BUSY_A { match self.bits { false => BUSY_A::_0, true => BUSY_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == BUSY_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == BUSY_A::_1 } } impl core::ops::Deref for BUSY_R { type Target = crate::FieldReader<bool, BUSY_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Byte Count Transmission/Receive Done \nNote: Software can write 1 to clear this bit.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum BCDONE_A { #[doc = "0: Byte count transmission/ receive is not finished when the PECEN is set"] _0 = 0, #[doc = "1: Byte count transmission/ receive is finished when the PECEN is set"] _1 = 1, } impl From<BCDONE_A> for bool { #[inline(always)] fn from(variant: BCDONE_A) -> Self { variant as u8 != 0 } } #[doc = "Field `BCDONE` reader - Byte Count Transmission/Receive Done \nNote: Software can write 1 to clear this bit."] pub struct BCDONE_R(crate::FieldReader<bool, BCDONE_A>); impl BCDONE_R { pub(crate) fn new(bits: bool) -> Self { BCDONE_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> BCDONE_A { match self.bits { false => BCDONE_A::_0, true => BCDONE_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == BCDONE_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == BCDONE_A::_1 } } impl core::ops::Deref for BCDONE_R { type Target = crate::FieldReader<bool, BCDONE_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `BCDONE` writer - Byte Count Transmission/Receive Done \nNote: Software can write 1 to clear this bit."] pub struct BCDONE_W<'a> { w: &'a mut W, } impl<'a> BCDONE_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: BCDONE_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Byte count transmission/ receive is not finished when the PECEN is set"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(BCDONE_A::_0) } #[doc = "Byte count transmission/ receive is finished when the PECEN is set"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(BCDONE_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 1)) | ((value as u32 & 0x01) << 1); self.w } } #[doc = "PEC Error in Reception \nNote: Software can write 1 to clear this bit.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum PECERR_A { #[doc = "0: PEC value equal the received PEC data packet"] _0 = 0, #[doc = "1: PEC value doesn't match the receive PEC data packet"] _1 = 1, } impl From<PECERR_A> for bool { #[inline(always)] fn from(variant: PECERR_A) -> Self { variant as u8 != 0 } } #[doc = "Field `PECERR` reader - PEC Error in Reception \nNote: Software can write 1 to clear this bit."] pub struct PECERR_R(crate::FieldReader<bool, PECERR_A>); impl PECERR_R { pub(crate) fn new(bits: bool) -> Self { PECERR_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> PECERR_A { match self.bits { false => PECERR_A::_0, true => PECERR_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == PECERR_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == PECERR_A::_1 } } impl core::ops::Deref for PECERR_R { type Target = crate::FieldReader<bool, PECERR_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `PECERR` writer - PEC Error in Reception \nNote: Software can write 1 to clear this bit."] pub struct PECERR_W<'a> { w: &'a mut W, } impl<'a> PECERR_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: PECERR_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "PEC value equal the received PEC data packet"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(PECERR_A::_0) } #[doc = "PEC value doesn't match the receive PEC data packet"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(PECERR_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 2)) | ((value as u32 & 0x01) << 2); self.w } } #[doc = "SMBus Alert Status \nNote: 1. The SMBALERT pin is an open-drain pin, the pull-high resistor is must in the system. 2. Software can write 1 to clear this bit.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum ALERT_A { #[doc = "0: SMBALERT pin state is low.\\nNo SMBALERT event"] _0 = 0, #[doc = "1: SMBALERT pin state is high.\\nThere is SMBALERT event (falling edge) is detected in SMBALERT pin when the BMHEN = 1 (SMBus host configuration) and the ALERTEN = 1"] _1 = 1, } impl From<ALERT_A> for bool { #[inline(always)] fn from(variant: ALERT_A) -> Self { variant as u8 != 0 } } #[doc = "Field `ALERT` reader - SMBus Alert Status \nNote: 1. The SMBALERT pin is an open-drain pin, the pull-high resistor is must in the system. 2. Software can write 1 to clear this bit."] pub struct ALERT_R(crate::FieldReader<bool, ALERT_A>); impl ALERT_R { pub(crate) fn new(bits: bool) -> Self { ALERT_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> ALERT_A { match self.bits { false => ALERT_A::_0, true => ALERT_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == ALERT_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == ALERT_A::_1 } } impl core::ops::Deref for ALERT_R { type Target = crate::FieldReader<bool, ALERT_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `ALERT` writer - SMBus Alert Status \nNote: 1. The SMBALERT pin is an open-drain pin, the pull-high resistor is must in the system. 2. Software can write 1 to clear this bit."] pub struct ALERT_W<'a> { w: &'a mut W, } impl<'a> ALERT_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: ALERT_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "SMBALERT pin state is low.\nNo SMBALERT event"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(ALERT_A::_0) } #[doc = "SMBALERT pin state is high.\nThere is SMBALERT event (falling edge) is detected in SMBALERT pin when the BMHEN = 1 (SMBus host configuration) and the ALERTEN = 1"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(ALERT_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 3)) | ((value as u32 & 0x01) << 3); self.w } } #[doc = "Bus Suspend or Control Signal Input Status (Read Only)\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum SCTLDIN_A { #[doc = "0: The input status of SUSCON pin is 0"] _0 = 0, #[doc = "1: The input status of SUSCON pin is 1"] _1 = 1, } impl From<SCTLDIN_A> for bool { #[inline(always)] fn from(variant: SCTLDIN_A) -> Self { variant as u8 != 0 } } #[doc = "Field `SCTLDIN` reader - Bus Suspend or Control Signal Input Status (Read Only)"] pub struct SCTLDIN_R(crate::FieldReader<bool, SCTLDIN_A>); impl SCTLDIN_R { pub(crate) fn new(bits: bool) -> Self { SCTLDIN_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> SCTLDIN_A { match self.bits { false => SCTLDIN_A::_0, true => SCTLDIN_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == SCTLDIN_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == SCTLDIN_A::_1 } } impl core::ops::Deref for SCTLDIN_R { type Target = crate::FieldReader<bool, SCTLDIN_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Bus Time-out Status \nIn bus busy, the bit indicates the total clock low time-out event occurred; otherwise, it indicates the bus idle time-out event occurred.\nNote: Software can write 1 to clear this bit.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum BUSTO_A { #[doc = "0: There is no any time-out or external clock time-out"] _0 = 0, #[doc = "1: A time-out or external clock time-out occurred"] _1 = 1, } impl From<BUSTO_A> for bool { #[inline(always)] fn from(variant: BUSTO_A) -> Self { variant as u8 != 0 } } #[doc = "Field `BUSTO` reader - Bus Time-out Status \nIn bus busy, the bit indicates the total clock low time-out event occurred; otherwise, it indicates the bus idle time-out event occurred.\nNote: Software can write 1 to clear this bit."] pub struct BUSTO_R(crate::FieldReader<bool, BUSTO_A>); impl BUSTO_R { pub(crate) fn new(bits: bool) -> Self { BUSTO_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> BUSTO_A { match self.bits { false => BUSTO_A::_0, true => BUSTO_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == BUSTO_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == BUSTO_A::_1 } } impl core::ops::Deref for BUSTO_R { type Target = crate::FieldReader<bool, BUSTO_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `BUSTO` writer - Bus Time-out Status \nIn bus busy, the bit indicates the total clock low time-out event occurred; otherwise, it indicates the bus idle time-out event occurred.\nNote: Software can write 1 to clear this bit."] pub struct BUSTO_W<'a> { w: &'a mut W, } impl<'a> BUSTO_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: BUSTO_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "There is no any time-out or external clock time-out"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(BUSTO_A::_0) } #[doc = "A time-out or external clock time-out occurred"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(BUSTO_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 5)) | ((value as u32 & 0x01) << 5); self.w } } #[doc = "Clock Low Cumulate Time-out Status \nNote: Software can write 1 to clear this bit.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum CLKTO_A { #[doc = "0: Cumulative clock low is no any time-out"] _0 = 0, #[doc = "1: Cumulative clock low time-out occurred"] _1 = 1, } impl From<CLKTO_A> for bool { #[inline(always)] fn from(variant: CLKTO_A) -> Self { variant as u8 != 0 } } #[doc = "Field `CLKTO` reader - Clock Low Cumulate Time-out Status \nNote: Software can write 1 to clear this bit."] pub struct CLKTO_R(crate::FieldReader<bool, CLKTO_A>); impl CLKTO_R { pub(crate) fn new(bits: bool) -> Self { CLKTO_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> CLKTO_A { match self.bits { false => CLKTO_A::_0, true => CLKTO_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == CLKTO_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == CLKTO_A::_1 } } impl core::ops::Deref for CLKTO_R { type Target = crate::FieldReader<bool, CLKTO_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `CLKTO` writer - Clock Low Cumulate Time-out Status \nNote: Software can write 1 to clear this bit."] pub struct CLKTO_W<'a> { w: &'a mut W, } impl<'a> CLKTO_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: CLKTO_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Cumulative clock low is no any time-out"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(CLKTO_A::_0) } #[doc = "Cumulative clock low time-out occurred"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(CLKTO_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 6)) | ((value as u32 & 0x01) << 6); self.w } } #[doc = "PEC Byte Transmission/Receive Done \nNote: Software can write 1 to clear this bit.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum PECDONE_A { #[doc = "0: PEC transmission/ receive is not finished when the PECEN is set"] _0 = 0, #[doc = "1: PEC transmission/ receive is finished when the PECEN is set"] _1 = 1, } impl From<PECDONE_A> for bool { #[inline(always)] fn from(variant: PECDONE_A) -> Self { variant as u8 != 0 } } #[doc = "Field `PECDONE` reader - PEC Byte Transmission/Receive Done \nNote: Software can write 1 to clear this bit."] pub struct PECDONE_R(crate::FieldReader<bool, PECDONE_A>); impl PECDONE_R { pub(crate) fn new(bits: bool) -> Self { PECDONE_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> PECDONE_A { match self.bits { false => PECDONE_A::_0, true => PECDONE_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == PECDONE_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == PECDONE_A::_1 } } impl core::ops::Deref for PECDONE_R { type Target = crate::FieldReader<bool, PECDONE_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `PECDONE` writer - PEC Byte Transmission/Receive Done \nNote: Software can write 1 to clear this bit."] pub struct PECDONE_W<'a> { w: &'a mut W, } impl<'a> PECDONE_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: PECDONE_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "PEC transmission/ receive is not finished when the PECEN is set"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(PECDONE_A::_0) } #[doc = "PEC transmission/ receive is finished when the PECEN is set"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(PECDONE_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 7)) | ((value as u32 & 0x01) << 7); self.w } } impl R { #[doc = "Bit 0 - Bus Busy (Read Only) Indicates that a communication is in progress on the bus. It is set by hardware when a START condition is detected. It is cleared by hardware when a STOP condition is detected"] #[inline(always)] pub fn busy(&self) -> BUSY_R { BUSY_R::new((self.bits & 0x01) != 0) } #[doc = "Bit 1 - Byte Count Transmission/Receive Done Note: Software can write 1 to clear this bit."] #[inline(always)] pub fn bcdone(&self) -> BCDONE_R { BCDONE_R::new(((self.bits >> 1) & 0x01) != 0) } #[doc = "Bit 2 - PEC Error in Reception Note: Software can write 1 to clear this bit."] #[inline(always)] pub fn pecerr(&self) -> PECERR_R { PECERR_R::new(((self.bits >> 2) & 0x01) != 0) } #[doc = "Bit 3 - SMBus Alert Status Note: 1. The SMBALERT pin is an open-drain pin, the pull-high resistor is must in the system. 2. Software can write 1 to clear this bit."] #[inline(always)] pub fn alert(&self) -> ALERT_R { ALERT_R::new(((self.bits >> 3) & 0x01) != 0) } #[doc = "Bit 4 - Bus Suspend or Control Signal Input Status (Read Only)"] #[inline(always)] pub fn sctldin(&self) -> SCTLDIN_R { SCTLDIN_R::new(((self.bits >> 4) & 0x01) != 0) } #[doc = "Bit 5 - Bus Time-out Status In bus busy, the bit indicates the total clock low time-out event occurred; otherwise, it indicates the bus idle time-out event occurred. Note: Software can write 1 to clear this bit."] #[inline(always)] pub fn busto(&self) -> BUSTO_R { BUSTO_R::new(((self.bits >> 5) & 0x01) != 0) } #[doc = "Bit 6 - Clock Low Cumulate Time-out Status Note: Software can write 1 to clear this bit."] #[inline(always)] pub fn clkto(&self) -> CLKTO_R { CLKTO_R::new(((self.bits >> 6) & 0x01) != 0) } #[doc = "Bit 7 - PEC Byte Transmission/Receive Done Note: Software can write 1 to clear this bit."] #[inline(always)] pub fn pecdone(&self) -> PECDONE_R { PECDONE_R::new(((self.bits >> 7) & 0x01) != 0) } } impl W { #[doc = "Bit 1 - Byte Count Transmission/Receive Done Note: Software can write 1 to clear this bit."] #[inline(always)] pub fn bcdone(&mut self) -> BCDONE_W { BCDONE_W { w: self } } #[doc = "Bit 2 - PEC Error in Reception Note: Software can write 1 to clear this bit."] #[inline(always)] pub fn pecerr(&mut self) -> PECERR_W { PECERR_W { w: self } } #[doc = "Bit 3 - SMBus Alert Status Note: 1. The SMBALERT pin is an open-drain pin, the pull-high resistor is must in the system. 2. Software can write 1 to clear this bit."] #[inline(always)] pub fn alert(&mut self) -> ALERT_W { ALERT_W { w: self } } #[doc = "Bit 5 - Bus Time-out Status In bus busy, the bit indicates the total clock low time-out event occurred; otherwise, it indicates the bus idle time-out event occurred. Note: Software can write 1 to clear this bit."] #[inline(always)] pub fn busto(&mut self) -> BUSTO_W { BUSTO_W { w: self } } #[doc = "Bit 6 - Clock Low Cumulate Time-out Status Note: Software can write 1 to clear this bit."] #[inline(always)] pub fn clkto(&mut self) -> CLKTO_W { CLKTO_W { w: self } } #[doc = "Bit 7 - PEC Byte Transmission/Receive Done Note: Software can write 1 to clear this bit."] #[inline(always)] pub fn pecdone(&mut self) -> PECDONE_W { PECDONE_W { w: self } } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.0.bits(bits); self } } #[doc = "I2C Bus Management Status Register\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [i2c_bussts](index.html) module"] pub struct I2C_BUSSTS_SPEC; impl crate::RegisterSpec for I2C_BUSSTS_SPEC { type Ux = u32; } #[doc = "`read()` method returns [i2c_bussts::R](R) reader structure"] impl crate::Readable for I2C_BUSSTS_SPEC { type Reader = R; } #[doc = "`write(|w| ..)` method takes [i2c_bussts::W](W) writer structure"] impl crate::Writable for I2C_BUSSTS_SPEC { type Writer = W; } #[doc = "`reset()` method sets I2C_BUSSTS to value 0"] impl crate::Resettable for I2C_BUSSTS_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0 } }
34.389271
425
0.592496
d9386951588161e2203c277499d2504219f26fe9
2,381
/*! Code hat helps you use the original DLL API. */ use libc::c_int; use std::convert::TryFrom; use std::mem::transmute; use crate::BorrowedPacket; use time::Timespec; use crate::pfring::dll::structs::PFRingPacketHeader; use std::slice::from_raw_parts; /// Safe wrapper around error codes returned by pfring API. #[allow(dead_code)] #[repr(i32)] pub enum PFRingErrCode{ Generic = -1, InvalidArgument = -2, NoPacketsAvailable = -3, NoTxSlotsAvailable = -4, WrongConfiguration = -5, EndOfDemoMode = -6, NotSupported = -7, InvalidLibVersion = -8, UnknownAdapter = -9, NotEnoughMemory = -10, InvalidStatus = -11, RingNotEnabled = -12 } impl TryFrom<c_int> for PFRingErrCode{ type Error = (); fn try_from(value: c_int) -> Result<Self, <Self as TryFrom<c_int>>::Error> { if PFRingErrCode::Generic as c_int >= value && PFRingErrCode::RingNotEnabled as c_int <= value { Ok(unsafe{transmute(value as i32)}) } else { Err(()) } } } impl PFRingErrCode { pub fn to_description(&self) -> &'static str { use self::PFRingErrCode::*; match *self { Generic => "Generic", InvalidArgument => "Invalid argument", NoPacketsAvailable => "No packets available", NoTxSlotsAvailable => "No TX slots available", WrongConfiguration => "Wront configuration", EndOfDemoMode => "End of demo mode", NotSupported => "Not supported", InvalidLibVersion => "Invalid library version", UnknownAdapter => "Unknown adapter", NotEnoughMemory => "Not enough memory", InvalidStatus => "Invalid status", RingNotEnabled => "Ring not enabled" } } } /// Converts pfring error code into human-friendly text. pub fn string_from_pfring_err_code(err_code: c_int) -> String { if let Ok(err) = PFRingErrCode::try_from(err_code){ String::from(err.to_description()) } else { format!("Unknown PF Ring error code: {}", err_code) } } pub fn borrowed_packet_from_header<'a, 'b>(header: &'a PFRingPacketHeader, data: * const u8) -> BorrowedPacket<'b> { unsafe { BorrowedPacket::new(Timespec::new(header.ts.tv_sec as i64, (header.ts.tv_usec * 1000) as i32), from_raw_parts(data, header.caplen as usize)) } }
31.328947
148
0.628307
913ca785546bf898632b715f6495061bf552f056
6,610
//! Packet types use crate::qos::QualityOfService; /// Packet type // INVARIANT: the high 4 bits of the byte must be a valid control type #[derive(Debug, Eq, PartialEq, Copy, Clone)] pub struct PacketType(u8); /// Defined control types #[rustfmt::skip] #[repr(u8)] #[derive(Debug, Eq, PartialEq, Copy, Clone)] pub enum ControlType { /// Client request to connect to Server Connect = value::CONNECT, /// Connect acknowledgment ConnectAcknowledgement = value::CONNACK, /// Publish message Publish = value::PUBLISH, /// Publish acknowledgment PublishAcknowledgement = value::PUBACK, /// Publish received (assured delivery part 1) PublishReceived = value::PUBREC, /// Publish release (assured delivery part 2) PublishRelease = value::PUBREL, /// Publish complete (assured delivery part 3) PublishComplete = value::PUBCOMP, /// Client subscribe request Subscribe = value::SUBSCRIBE, /// Subscribe acknowledgment SubscribeAcknowledgement = value::SUBACK, /// Unsubscribe request Unsubscribe = value::UNSUBSCRIBE, /// Unsubscribe acknowledgment UnsubscribeAcknowledgement = value::UNSUBACK, /// PING request PingRequest = value::PINGREQ, /// PING response PingResponse = value::PINGRESP, /// Client is disconnecting Disconnect = value::DISCONNECT, } impl ControlType { #[inline] fn default_flags(self) -> u8 { match self { ControlType::Connect => 0, ControlType::ConnectAcknowledgement => 0, ControlType::Publish => 0, ControlType::PublishAcknowledgement => 0, ControlType::PublishReceived => 0, ControlType::PublishRelease => 0b0010, ControlType::PublishComplete => 0, ControlType::Subscribe => 0b0010, ControlType::SubscribeAcknowledgement => 0, ControlType::Unsubscribe => 0b0010, ControlType::UnsubscribeAcknowledgement => 0, ControlType::PingRequest => 0, ControlType::PingResponse => 0, ControlType::Disconnect => 0, } } } impl PacketType { /// Creates a packet type. Returns None if `flags` is an invalid value for the given /// ControlType as defined by the [MQTT spec]. /// /// [MQTT spec]: http://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html#_Table_2.2_- pub fn new(t: ControlType, flags: u8) -> Result<PacketType, InvalidFlag> { let flags_ok = match t { ControlType::Publish => { let qos = (flags & 0b0110) >> 1; matches!(qos, 0 | 1 | 2) } _ => t.default_flags() == flags, }; if flags_ok { Ok(PacketType::new_unchecked(t, flags)) } else { Err(InvalidFlag(t, flags)) } } #[inline] fn new_unchecked(t: ControlType, flags: u8) -> PacketType { let byte = (t as u8) << 4 | (flags & 0x0F); #[allow(unused_unsafe)] unsafe { // SAFETY: just constructed from a valid ControlType PacketType(byte) } } /// Creates a packet type with default flags /// /// http://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html#_Table_2.2_- #[inline] pub fn with_default(t: ControlType) -> PacketType { let flags = t.default_flags(); PacketType::new_unchecked(t, flags) } pub(crate) fn publish(qos: QualityOfService) -> PacketType { PacketType::new_unchecked(ControlType::Publish, (qos as u8) << 1) } #[inline] pub(crate) fn update_flags(&mut self, upd: impl FnOnce(u8) -> u8) { let flags = upd(self.flags()); self.0 = (self.0 & !0x0F) | (flags & 0x0F) } /// To code #[inline] pub fn to_u8(self) -> u8 { self.0 } /// From code pub fn from_u8(val: u8) -> Result<PacketType, PacketTypeError> { let type_val = val >> 4; let flags = val & 0x0F; let control_type = get_control_type(type_val).ok_or_else(|| PacketTypeError::ReservedType(type_val, flags))?; Ok(PacketType::new(control_type, flags)?) } #[inline] pub fn control_type(self) -> ControlType { get_control_type(self.0 >> 4).unwrap_or_else(|| { // SAFETY: this is maintained by the invariant for PacketType unsafe { std::hint::unreachable_unchecked() } }) } #[inline] pub fn flags(self) -> u8 { self.0 & 0x0F } } #[inline] fn get_control_type(val: u8) -> Option<ControlType> { let typ = match val { value::CONNECT => ControlType::Connect, value::CONNACK => ControlType::ConnectAcknowledgement, value::PUBLISH => ControlType::Publish, value::PUBACK => ControlType::PublishAcknowledgement, value::PUBREC => ControlType::PublishReceived, value::PUBREL => ControlType::PublishRelease, value::PUBCOMP => ControlType::PublishComplete, value::SUBSCRIBE => ControlType::Subscribe, value::SUBACK => ControlType::SubscribeAcknowledgement, value::UNSUBSCRIBE => ControlType::Unsubscribe, value::UNSUBACK => ControlType::UnsubscribeAcknowledgement, value::PINGREQ => ControlType::PingRequest, value::PINGRESP => ControlType::PingResponse, value::DISCONNECT => ControlType::Disconnect, _ => return None, }; Some(typ) } /// Parsing packet type errors #[derive(Debug, thiserror::Error)] pub enum PacketTypeError { #[error("reserved type {0:?} (flags {1:#X})")] ReservedType(u8, u8), #[error(transparent)] InvalidFlag(#[from] InvalidFlag), } #[derive(Debug, thiserror::Error)] #[error("invalid flag for {0:?} ({1:#X})")] pub struct InvalidFlag(pub ControlType, pub u8); #[rustfmt::skip] mod value { pub const CONNECT: u8 = 1; pub const CONNACK: u8 = 2; pub const PUBLISH: u8 = 3; pub const PUBACK: u8 = 4; pub const PUBREC: u8 = 5; pub const PUBREL: u8 = 6; pub const PUBCOMP: u8 = 7; pub const SUBSCRIBE: u8 = 8; pub const SUBACK: u8 = 9; pub const UNSUBSCRIBE: u8 = 10; pub const UNSUBACK: u8 = 11; pub const PINGREQ: u8 = 12; pub const PINGRESP: u8 = 13; pub const DISCONNECT: u8 = 14; }
29.909502
117
0.582753
1162daa107831c2d1ebd84a08d981111ab964894
210
pub trait ToNanos { fn to_nanos(&self) -> u64; } impl ToNanos for ::std::time::Duration { fn to_nanos(&self) -> u64 { self.as_secs() as u64 * 1_000_000_000 + self.subsec_nanos() as u64 } }
21
74
0.609524
620a1319bf34291b6f6fbb9fc08b57eb50134b6f
1,059
// Licensed under the Apache License, Version 2.0 // <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option. // All files in the project carrying such notice may not be copied, modified, or distributed // except according to those terms. //! This file contains information about NetApiBuffer APIs use shared::lmcons::NET_API_STATUS; use shared::minwindef::{DWORD, LPDWORD, LPVOID}; extern "system" { pub fn NetApiBufferAllocate( ByteCount: DWORD, Buffer: *mut LPVOID, ) -> NET_API_STATUS; pub fn NetApiBufferFree( Buffer: LPVOID, ) -> NET_API_STATUS; pub fn NetApiBufferReallocate( OldBuffer: LPVOID, NewByteCount: DWORD, NewBuffer: *mut LPVOID, ) -> NET_API_STATUS; pub fn NetApiBufferSize( Buffer: LPVOID, ByteCount: LPDWORD, ) -> NET_API_STATUS; pub fn NetapipBufferAllocate( ByteCount: DWORD, Buffer: *mut LPVOID, ) -> NET_API_STATUS; }
34.16129
92
0.67611
5036d3877f9ce85e224a829be74816da1b67b19e
2,537
// Copyright (C) 2022 Scott Lamb <[email protected]> // SPDX-License-Identifier: MIT OR Apache-2.0 use std::path::PathBuf; use bytes::BytesMut; use clap::Parser; use elkm1::{msg, state}; use futures::StreamExt; use pretty_hex::PrettyHex; #[derive(Parser)] enum Cmd { Watch { addr: String }, Read { filename: PathBuf }, } async fn watch(addr: String) { let panel = state::Panel::connect(&addr).await.unwrap(); log::info!("Tracking changes."); tokio::pin!(panel); while let Some(pkt) = panel.next().await { let pkt = pkt.unwrap(); log::debug!("received {:#?}", &pkt); match pkt.change { Some(state::Change::ZoneChange { zone, prior }) => { log::info!( "{}: {:?} -> {:?}", panel.zone_name(zone), prior, panel.zone_statuses().zones[zone.to_index()], ); } Some(state::Change::ArmingStatus { prior }) => { let cur = panel.arming_status(); let area_names = panel.area_names(); for i in 0..msg::NUM_AREAS { if prior.arming_status[i] != cur.arming_status[i] || prior.up_state[i] != cur.up_state[i] || prior.alarm_state[i] != cur.alarm_state[i] { log::info!( "{}: {:?} {:?} {:?} -> {:?} {:?} {:?}", area_names[i], prior.arming_status[i], prior.up_state[i], prior.alarm_state[i], cur.arming_status[i], cur.up_state[i], cur.alarm_state[i], ); } } } _ => {} } } } fn read(filename: PathBuf) { let data = std::fs::read(filename).unwrap(); let mut left = BytesMut::from(&data[..]); while let Some(pkt) = elkm1::pkt::Packet::decode(&mut left) { println!("{:?}", pkt); } if !left.is_empty() { println!("incomplete data: {:?}", left.hex_dump()); } } #[tokio::main(flavor = "current_thread")] async fn main() { env_logger::Builder::from_env(env_logger::Env::new().default_filter_or("info")).init(); match Cmd::parse() { Cmd::Watch { addr } => watch(addr).await, Cmd::Read { filename } => read(filename), } }
32.113924
91
0.456839
f837faa33bf871943e5b27b9258508ff08ecf5cf
421
use std::env; use tiny_renderer::{pipeline::model::Model, renderer::Renderer}; fn main() { let args: Vec<String> = env::args().collect(); let shader_name = &args[1]; let path = &args[2]; let model_path = format!("{}.obj", path); // let shader = make_shader(shader_name, path); Renderer::default() .models(Model::from_obj(&model_path)) .shader(shader_name, path) .run(); }
28.066667
64
0.605701
23cb83544a689362f9f0eae9c69f287e11041be5
15,040
#[doc = "Reader of register TCMR"] pub type R = crate::R<u32, super::TCMR>; #[doc = "Writer for register TCMR"] pub type W = crate::W<u32, super::TCMR>; #[doc = "Register TCMR `reset()`'s with value 0"] impl crate::ResetValue for super::TCMR { #[inline(always)] fn reset_value() -> Self::Ux { 0 } } #[doc = "Transmit Clock Selection\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] #[repr(u8)] pub enum CKS_A { #[doc = "0: Divided Clock"] MCK = 0, #[doc = "1: TK Clock signal"] TK = 1, #[doc = "2: RK pin"] RK = 2 } impl From<CKS_A> for u8 { #[inline(always)] fn from(variant: CKS_A) -> Self { variant as _ } } #[doc = "Reader of field `CKS`"] pub type CKS_R = crate::R<u8, CKS_A>; impl CKS_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> crate::Variant<u8, CKS_A> { use crate::Variant::*; match self.bits { 0 => Val(CKS_A::MCK), 1 => Val(CKS_A::TK), 2 => Val(CKS_A::RK), i => Res(i), } } #[doc = "Checks if the value of the field is `MCK`"] #[inline(always)] pub fn is_mck(&self) -> bool { *self == CKS_A::MCK } #[doc = "Checks if the value of the field is `TK`"] #[inline(always)] pub fn is_tk(&self) -> bool { *self == CKS_A::TK } #[doc = "Checks if the value of the field is `RK`"] #[inline(always)] pub fn is_rk(&self) -> bool { *self == CKS_A::RK } } #[doc = "Write proxy for field `CKS`"] pub struct CKS_W<'a> { w: &'a mut W } impl<'a> CKS_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: CKS_A) -> &'a mut W { unsafe { self.bits(variant.into()) } } #[doc = "Divided Clock"] #[inline(always)] pub fn mck(self) -> &'a mut W { self.variant(CKS_A::MCK) } #[doc = "TK Clock signal"] #[inline(always)] pub fn tk(self) -> &'a mut W { self.variant(CKS_A::TK) } #[doc = "RK pin"] #[inline(always)] pub fn rk(self) -> &'a mut W { self.variant(CKS_A::RK) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !0x03) | ((value as u32) & 0x03); self.w } } #[doc = "Transmit Clock Output Mode Selection\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] #[repr(u8)] pub enum CKO_A { #[doc = "0: None"] NONE = 0, #[doc = "1: Continuous Receive Clock"] CONTINUOUS = 1, #[doc = "2: Transmit Clock only during data transfers"] TRANSFER = 2 } impl From<CKO_A> for u8 { #[inline(always)] fn from(variant: CKO_A) -> Self { variant as _ } } #[doc = "Reader of field `CKO`"] pub type CKO_R = crate::R<u8, CKO_A>; impl CKO_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> crate::Variant<u8, CKO_A> { use crate::Variant::*; match self.bits { 0 => Val(CKO_A::NONE), 1 => Val(CKO_A::CONTINUOUS), 2 => Val(CKO_A::TRANSFER), i => Res(i), } } #[doc = "Checks if the value of the field is `NONE`"] #[inline(always)] pub fn is_none(&self) -> bool { *self == CKO_A::NONE } #[doc = "Checks if the value of the field is `CONTINUOUS`"] #[inline(always)] pub fn is_continuous(&self) -> bool { *self == CKO_A::CONTINUOUS } #[doc = "Checks if the value of the field is `TRANSFER`"] #[inline(always)] pub fn is_transfer(&self) -> bool { *self == CKO_A::TRANSFER } } #[doc = "Write proxy for field `CKO`"] pub struct CKO_W<'a> { w: &'a mut W } impl<'a> CKO_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: CKO_A) -> &'a mut W { unsafe { self.bits(variant.into()) } } #[doc = "None"] #[inline(always)] pub fn none(self) -> &'a mut W { self.variant(CKO_A::NONE) } #[doc = "Continuous Receive Clock"] #[inline(always)] pub fn continuous(self) -> &'a mut W { self.variant(CKO_A::CONTINUOUS) } #[doc = "Transmit Clock only during data transfers"] #[inline(always)] pub fn transfer(self) -> &'a mut W { self.variant(CKO_A::TRANSFER) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x07 << 2)) | (((value as u32) & 0x07) << 2); self.w } } #[doc = "Reader of field `CKI`"] pub type CKI_R = crate::R<bool, bool>; #[doc = "Write proxy for field `CKI`"] pub struct CKI_W<'a> { w: &'a mut W } impl<'a> CKI_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 5)) | (((value as u32) & 0x01) << 5); self.w } } #[doc = "Transmit Clock Gating Selection\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] #[repr(u8)] pub enum CKG_A { #[doc = "0: None"] NONE = 0, #[doc = "1: Transmit Clock enabled only if TF Low"] CONTINUOUS = 1, #[doc = "2: Transmit Clock enabled only if TF High"] TRANSFER = 2 } impl From<CKG_A> for u8 { #[inline(always)] fn from(variant: CKG_A) -> Self { variant as _ } } #[doc = "Reader of field `CKG`"] pub type CKG_R = crate::R<u8, CKG_A>; impl CKG_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> crate::Variant<u8, CKG_A> { use crate::Variant::*; match self.bits { 0 => Val(CKG_A::NONE), 1 => Val(CKG_A::CONTINUOUS), 2 => Val(CKG_A::TRANSFER), i => Res(i), } } #[doc = "Checks if the value of the field is `NONE`"] #[inline(always)] pub fn is_none(&self) -> bool { *self == CKG_A::NONE } #[doc = "Checks if the value of the field is `CONTINUOUS`"] #[inline(always)] pub fn is_continuous(&self) -> bool { *self == CKG_A::CONTINUOUS } #[doc = "Checks if the value of the field is `TRANSFER`"] #[inline(always)] pub fn is_transfer(&self) -> bool { *self == CKG_A::TRANSFER } } #[doc = "Write proxy for field `CKG`"] pub struct CKG_W<'a> { w: &'a mut W } impl<'a> CKG_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: CKG_A) -> &'a mut W { unsafe { self.bits(variant.into()) } } #[doc = "None"] #[inline(always)] pub fn none(self) -> &'a mut W { self.variant(CKG_A::NONE) } #[doc = "Transmit Clock enabled only if TF Low"] #[inline(always)] pub fn continuous(self) -> &'a mut W { self.variant(CKG_A::CONTINUOUS) } #[doc = "Transmit Clock enabled only if TF High"] #[inline(always)] pub fn transfer(self) -> &'a mut W { self.variant(CKG_A::TRANSFER) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x03 << 6)) | (((value as u32) & 0x03) << 6); self.w } } #[doc = "Transmit Start Selection\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] #[repr(u8)] pub enum START_A { #[doc = "0: Continuous, as soon as a word is written in the SSC_THR Register (if Transmit is enabled), and immediately after the end of transfer of the previous data."] CONTINUOUS = 0, #[doc = "1: Receive start"] RECEIVE = 1, #[doc = "2: Detection of a low level on TF signal"] RF_LOW = 2, #[doc = "3: Detection of a high level on TF signal"] RF_HIGH = 3, #[doc = "4: Detection of a falling edge on TF signal"] RF_FALLING = 4, #[doc = "5: Detection of a rising edge on TF signal"] RF_RISING = 5, #[doc = "6: Detection of any level change on TF signal"] RF_LEVEL = 6, #[doc = "7: Detection of any edge on TF signal"] RF_EDGE = 7, #[doc = "8: Compare 0"] CMP_0 = 8 } impl From<START_A> for u8 { #[inline(always)] fn from(variant: START_A) -> Self { variant as _ } } #[doc = "Reader of field `START`"] pub type START_R = crate::R<u8, START_A>; impl START_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> crate::Variant<u8, START_A> { use crate::Variant::*; match self.bits { 0 => Val(START_A::CONTINUOUS), 1 => Val(START_A::RECEIVE), 2 => Val(START_A::RF_LOW), 3 => Val(START_A::RF_HIGH), 4 => Val(START_A::RF_FALLING), 5 => Val(START_A::RF_RISING), 6 => Val(START_A::RF_LEVEL), 7 => Val(START_A::RF_EDGE), 8 => Val(START_A::CMP_0), i => Res(i), } } #[doc = "Checks if the value of the field is `CONTINUOUS`"] #[inline(always)] pub fn is_continuous(&self) -> bool { *self == START_A::CONTINUOUS } #[doc = "Checks if the value of the field is `RECEIVE`"] #[inline(always)] pub fn is_receive(&self) -> bool { *self == START_A::RECEIVE } #[doc = "Checks if the value of the field is `RF_LOW`"] #[inline(always)] pub fn is_rf_low(&self) -> bool { *self == START_A::RF_LOW } #[doc = "Checks if the value of the field is `RF_HIGH`"] #[inline(always)] pub fn is_rf_high(&self) -> bool { *self == START_A::RF_HIGH } #[doc = "Checks if the value of the field is `RF_FALLING`"] #[inline(always)] pub fn is_rf_falling(&self) -> bool { *self == START_A::RF_FALLING } #[doc = "Checks if the value of the field is `RF_RISING`"] #[inline(always)] pub fn is_rf_rising(&self) -> bool { *self == START_A::RF_RISING } #[doc = "Checks if the value of the field is `RF_LEVEL`"] #[inline(always)] pub fn is_rf_level(&self) -> bool { *self == START_A::RF_LEVEL } #[doc = "Checks if the value of the field is `RF_EDGE`"] #[inline(always)] pub fn is_rf_edge(&self) -> bool { *self == START_A::RF_EDGE } #[doc = "Checks if the value of the field is `CMP_0`"] #[inline(always)] pub fn is_cmp_0(&self) -> bool { *self == START_A::CMP_0 } } #[doc = "Write proxy for field `START`"] pub struct START_W<'a> { w: &'a mut W } impl<'a> START_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: START_A) -> &'a mut W { unsafe { self.bits(variant.into()) } } #[doc = "Continuous, as soon as a word is written in the SSC_THR Register (if Transmit is enabled), and immediately after the end of transfer of the previous data."] #[inline(always)] pub fn continuous(self) -> &'a mut W { self.variant(START_A::CONTINUOUS) } #[doc = "Receive start"] #[inline(always)] pub fn receive(self) -> &'a mut W { self.variant(START_A::RECEIVE) } #[doc = "Detection of a low level on TF signal"] #[inline(always)] pub fn rf_low(self) -> &'a mut W { self.variant(START_A::RF_LOW) } #[doc = "Detection of a high level on TF signal"] #[inline(always)] pub fn rf_high(self) -> &'a mut W { self.variant(START_A::RF_HIGH) } #[doc = "Detection of a falling edge on TF signal"] #[inline(always)] pub fn rf_falling(self) -> &'a mut W { self.variant(START_A::RF_FALLING) } #[doc = "Detection of a rising edge on TF signal"] #[inline(always)] pub fn rf_rising(self) -> &'a mut W { self.variant(START_A::RF_RISING) } #[doc = "Detection of any level change on TF signal"] #[inline(always)] pub fn rf_level(self) -> &'a mut W { self.variant(START_A::RF_LEVEL) } #[doc = "Detection of any edge on TF signal"] #[inline(always)] pub fn rf_edge(self) -> &'a mut W { self.variant(START_A::RF_EDGE) } #[doc = "Compare 0"] #[inline(always)] pub fn cmp_0(self) -> &'a mut W { self.variant(START_A::CMP_0) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x0f << 8)) | (((value as u32) & 0x0f) << 8); self.w } } #[doc = "Reader of field `STTDLY`"] pub type STTDLY_R = crate::R<u8, u8>; #[doc = "Write proxy for field `STTDLY`"] pub struct STTDLY_W<'a> { w: &'a mut W } impl<'a> STTDLY_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0xff << 16)) | (((value as u32) & 0xff) << 16); self.w } } #[doc = "Reader of field `PERIOD`"] pub type PERIOD_R = crate::R<u8, u8>; #[doc = "Write proxy for field `PERIOD`"] pub struct PERIOD_W<'a> { w: &'a mut W } impl<'a> PERIOD_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0xff << 24)) | (((value as u32) & 0xff) << 24); self.w } } impl R { #[doc = "Bits 0:1 - Transmit Clock Selection"] #[inline(always)] pub fn cks(&self) -> CKS_R { CKS_R::new((self.bits & 0x03) as u8) } #[doc = "Bits 2:4 - Transmit Clock Output Mode Selection"] #[inline(always)] pub fn cko(&self) -> CKO_R { CKO_R::new(((self.bits >> 2) & 0x07) as u8) } #[doc = "Bit 5 - Transmit Clock Inversion"] #[inline(always)] pub fn cki(&self) -> CKI_R { CKI_R::new(((self.bits >> 5) & 0x01) != 0) } #[doc = "Bits 6:7 - Transmit Clock Gating Selection"] #[inline(always)] pub fn ckg(&self) -> CKG_R { CKG_R::new(((self.bits >> 6) & 0x03) as u8) } #[doc = "Bits 8:11 - Transmit Start Selection"] #[inline(always)] pub fn start(&self) -> START_R { START_R::new(((self.bits >> 8) & 0x0f) as u8) } #[doc = "Bits 16:23 - Transmit Start Delay"] #[inline(always)] pub fn sttdly(&self) -> STTDLY_R { STTDLY_R::new(((self.bits >> 16) & 0xff) as u8) } #[doc = "Bits 24:31 - Transmit Period Divider Selection"] #[inline(always)] pub fn period(&self) -> PERIOD_R { PERIOD_R::new(((self.bits >> 24) & 0xff) as u8) } } impl W { #[doc = "Bits 0:1 - Transmit Clock Selection"] #[inline(always)] pub fn cks(&mut self) -> CKS_W { CKS_W { w: self } } #[doc = "Bits 2:4 - Transmit Clock Output Mode Selection"] #[inline(always)] pub fn cko(&mut self) -> CKO_W { CKO_W { w: self } } #[doc = "Bit 5 - Transmit Clock Inversion"] #[inline(always)] pub fn cki(&mut self) -> CKI_W { CKI_W { w: self } } #[doc = "Bits 6:7 - Transmit Clock Gating Selection"] #[inline(always)] pub fn ckg(&mut self) -> CKG_W { CKG_W { w: self } } #[doc = "Bits 8:11 - Transmit Start Selection"] #[inline(always)] pub fn start(&mut self) -> START_W { START_W { w: self } } #[doc = "Bits 16:23 - Transmit Start Delay"] #[inline(always)] pub fn sttdly(&mut self) -> STTDLY_W { STTDLY_W { w: self } } #[doc = "Bits 24:31 - Transmit Period Divider Selection"] #[inline(always)] pub fn period(&mut self) -> PERIOD_W { PERIOD_W { w: self } } }
38.465473
683
0.578923
6a8fbb97ae2c91ae7bfb1815805b04628fee0976
3,104
use clap::clap_app; fn main() { dotenv::dotenv().ok(); let matches = clap_app!(Ceres => (version: "0.3.6") (author: "mori <[email protected]>") (about: "Ceres is a build tool, script compiler and map preprocessor for WC3 Lua maps.") (@subcommand build => (about: "Uses the build.lua file in the current directory to build a map.") (setting: clap::AppSettings::TrailingVarArg) (@arg dir: --dir -d +takes_value "Sets the project directory.") (@arg BUILD_ARGS: ... "Arguments to pass to the build script.") ) (@subcommand run => (about: "Uses the build.lua file in the current directory to build and run a map.") (setting: clap::AppSettings::TrailingVarArg) (@arg dir: --dir -d +takes_value "Sets the project directory.") (@arg BUILD_ARGS: ... "Arguments to pass to the build script.") ) (@subcommand exec => (about: "Executes the specified lua file using Ceres runtime") (setting: clap::AppSettings::TrailingVarArg) (@arg script: +required +takes_value) (@arg BUILD_ARGS: ... "Arguments to pass to the build script.") ) ) .get_matches(); std::process::exit(match run(matches) { Err(error) => { println!("[ERROR] An error has occured. Error chain:"); println!("{}", error); let mut cause = error.source(); while let Some(inner_cause) = cause { println!("{}", &inner_cause); cause = inner_cause.source(); } 1 } Ok(_) => 0, }); } fn run_build(arg: &clap::ArgMatches, mode: ceres_core::CeresRunMode) -> Result<(), anyhow::Error> { let project_dir = arg .value_of("dir") .map(std::path::PathBuf::from) .unwrap_or_else(|| std::env::current_dir().unwrap()); let script_args = arg .values_of("BUILD_ARGS") .map(std::iter::Iterator::collect) .unwrap_or_else(Vec::new); ceres_core::run_build_script(mode, project_dir, script_args)?; Ok(()) } fn exec(arg: &clap::ArgMatches) -> Result<(), anyhow::Error> { let script = arg .value_of("script") .map(std::path::PathBuf::from) .unwrap(); let script = std::fs::read_to_string(script)?; let script_args = arg .values_of("BUILD_ARGS") .map(std::iter::Iterator::collect) .unwrap_or_else(Vec::new); ceres_core::execute_script(ceres_core::CeresRunMode::Build, script_args, |ctx| { ctx.load(&script).exec()?; Ok(()) })?; Ok(()) } fn run(matches: clap::ArgMatches) -> Result<(), anyhow::Error> { if let Some(arg) = matches.subcommand_matches("build") { run_build(arg, ceres_core::CeresRunMode::Build)?; } else if let Some(arg) = matches.subcommand_matches("run") { run_build(arg, ceres_core::CeresRunMode::RunMap)?; } else if let Some(arg) = matches.subcommand_matches("exec") { exec(arg)?; } Ok(()) }
32
99
0.567655
5d4e33090ad70ce1796a9b57c2dd7128d66f65de
3,010
#[allow(dead_code)] mod util; use std::io; use termion::event::Key; use termion::input::MouseTerminal; use termion::raw::IntoRawMode; use termion::screen::AlternateScreen; use tui::backend::TermionBackend; use tui::layout::{Constraint, Layout}; use tui::style::{Color, Modifier, Style}; use tui::widgets::{Block, Borders, Row, Table, Widget}; use tui::Terminal; use crate::util::event::{Event, Events}; struct App<'a> { items: Vec<Vec<&'a str>>, selected: usize, } impl<'a> App<'a> { fn new() -> App<'a> { App { items: vec![ vec!["Row12", "Row12", "Row13"], vec!["Row21", "Row22", "Row23"], vec!["Row31", "Row32", "Row33"], vec!["Row41", "Row42", "Row43"], vec!["Row51", "Row52", "Row53"], vec!["Row61", "Row62", "Row63"], ], selected: 0, } } } fn main() -> Result<(), failure::Error> { // Terminal initialization let stdout = io::stdout().into_raw_mode()?; let stdout = MouseTerminal::from(stdout); let stdout = AlternateScreen::from(stdout); let backend = TermionBackend::new(stdout); let mut terminal = Terminal::new(backend)?; terminal.hide_cursor()?; let events = Events::new(); // App let mut app = App::new(); // Input loop { terminal.draw(|mut f| { let selected_style = Style::default().fg(Color::Yellow).modifier(Modifier::Bold); let normal_style = Style::default().fg(Color::White); let header = ["Header1", "Header2", "Header3"]; let rows = app.items.iter().enumerate().map(|(i, item)| { if i == app.selected { Row::StyledData(item.into_iter(), selected_style) } else { Row::StyledData(item.into_iter(), normal_style) } }); let rects = Layout::default() .constraints([Constraint::Percentage(100)].as_ref()) .margin(5) .split(f.size()); Table::new(header.into_iter(), rows) .block(Block::default().borders(Borders::ALL).title("Table")) .widths(&[10, 10, 10]) .render(&mut f, rects[0]); })?; match events.next()? { Event::Input(key) => match key { Key::Char('q') => { break; } Key::Down => { app.selected += 1; if app.selected > app.items.len() - 1 { app.selected = 0; } } Key::Up => { if app.selected > 0 { app.selected -= 1; } else { app.selected = app.items.len() - 1; } } _ => {} }, _ => {} }; } Ok(()) }
29.223301
93
0.456146
db4aeecbb8fa66584b4597d40337c75f5ce1fce3
51
pub mod common; pub mod incremental; pub mod full;
12.75
20
0.764706
9c85b4018ebb9062802a0ce43903bca130571d2d
10,822
//! Common supertraits for consensus protocols. use std::collections::BTreeMap; use std::fmt::{Debug, Display}; use std::hash::Hash; use std::iter::once; use failure::Fail; use rand::Rng; use serde::{de::DeserializeOwned, Serialize}; use crate::fault_log::{Fault, FaultLog}; use crate::sender_queue::SenderQueueableMessage; use crate::{Target, TargetedMessage}; /// A transaction, user message, or other user data. pub trait Contribution: Eq + Debug + Hash + Send + Sync {} impl<C> Contribution for C where C: Eq + Debug + Hash + Send + Sync {} /// A peer node's unique identifier. pub trait NodeIdT: Eq + Ord + Clone + Debug + Hash + Send + Sync {} impl<N> NodeIdT for N where N: Eq + Ord + Clone + Debug + Hash + Send + Sync {} /// A consensus protocol fault. pub trait FaultT: Clone + Debug + Fail + PartialEq {} impl<N> FaultT for N where N: Clone + Debug + Fail + PartialEq {} /// Messages. pub trait Message: Debug + Send + Sync {} impl<M> Message for M where M: Debug + Send + Sync {} /// Session identifiers. pub trait SessionIdT: Display + Serialize + Send + Sync + Clone + Debug {} impl<S> SessionIdT for S where S: Display + Serialize + Send + Sync + Clone + Debug {} /// Epochs. pub trait EpochT: Copy + Message + Default + Eq + Ord + Serialize + DeserializeOwned {} impl<E> EpochT for E where E: Copy + Message + Default + Eq + Ord + Serialize + DeserializeOwned {} /// Single algorithm step outcome. /// /// Each time input (typically in the form of user input or incoming network messages) is provided /// to an instance of an algorithm, a `Step` is produced, potentially containing output values, /// a fault log, and network messages. /// /// Any `Step` **must always be used** by the client application; at the very least the resulting /// messages must be queued. /// /// ## Handling unused Steps /// /// In the (rare) case of a `Step` not being of any interest at all, instead of discarding it /// through `let _ = ...` or similar constructs, the implicit assumption should explicitly be /// checked instead: /// /// ```ignore /// assert!(alg.propose(123).expect("Could not propose value").is_empty(), /// "Algorithm will never output anything on first proposal"); /// ``` /// /// If an edge case occurs and outgoing messages are generated as a result, the `assert!` will /// catch it, instead of potentially stalling the algorithm. #[must_use = "The algorithm step result must be used."] #[derive(Debug)] pub struct Step<M, O, N, F: Fail> { /// The algorithm's output, after consensus has been reached. This is guaranteed to be the same /// in all nodes. pub output: Vec<O>, /// A list of nodes that are not following consensus, together with information about the /// detected misbehavior. pub fault_log: FaultLog<N, F>, /// A list of messages that must be sent to other nodes. Each entry contains a message and a /// `Target`. pub messages: Vec<TargetedMessage<M, N>>, } impl<M, O, N, F> Default for Step<M, O, N, F> where F: Fail, { fn default() -> Self { Step { output: Vec::default(), fault_log: FaultLog::default(), messages: Vec::default(), } } } impl<M, O, N, F> Step<M, O, N, F> where F: Fail, { /// Returns the same step, with the given additional output. pub fn with_output<T: Into<Option<O>>>(mut self, output: T) -> Self { self.output.extend(output.into()); self } /// Converts `self` into a step of another type, given conversion methods for output, faults, /// and messages. pub fn map<M2, O2, F2, FO, FF, FM>( self, f_out: FO, f_fail: FF, f_msg: FM, ) -> Step<M2, O2, N, F2> where F2: Fail, FO: Fn(O) -> O2, FF: Fn(F) -> F2, FM: Fn(M) -> M2, { Step { output: self.output.into_iter().map(f_out).collect(), fault_log: self.fault_log.map(f_fail), messages: self.messages.into_iter().map(|tm| tm.map(&f_msg)).collect(), } } /// Extends `self` with `other`s messages and fault logs, and returns `other.output`. #[must_use] pub fn extend_with<M2, O2, F2, FF, FM>( &mut self, other: Step<M2, O2, N, F2>, f_fail: FF, f_msg: FM, ) -> Vec<O2> where F2: Fail, FF: Fn(F2) -> F, FM: Fn(M2) -> M, { let fails = other.fault_log.map(f_fail); self.fault_log.extend(fails); let msgs = other.messages.into_iter().map(|tm| tm.map(&f_msg)); self.messages.extend(msgs); other.output } /// Adds the outputs, fault logs and messages of `other` to `self`. pub fn extend(&mut self, other: Self) { self.output.extend(other.output); self.fault_log.extend(other.fault_log); self.messages.extend(other.messages); } /// Extends this step with `other` and returns the result. pub fn join(mut self, other: Self) -> Self { self.extend(other); self } /// Returns `true` if there are no messages, faults or outputs. pub fn is_empty(&self) -> bool { self.output.is_empty() && self.fault_log.is_empty() && self.messages.is_empty() } } impl<M, O, N, F> From<FaultLog<N, F>> for Step<M, O, N, F> where F: Fail, { fn from(fault_log: FaultLog<N, F>) -> Self { Step { fault_log, ..Step::default() } } } impl<M, O, N, F> From<Fault<N, F>> for Step<M, O, N, F> where F: Fail, { fn from(fault: Fault<N, F>) -> Self { Step { fault_log: fault.into(), ..Step::default() } } } impl<M, O, N, F> From<TargetedMessage<M, N>> for Step<M, O, N, F> where F: Fail, { fn from(msg: TargetedMessage<M, N>) -> Self { Step { messages: once(msg).collect(), ..Step::default() } } } impl<I, M, O, N, F> From<I> for Step<M, O, N, F> where I: IntoIterator<Item = TargetedMessage<M, N>>, F: Fail, { fn from(msgs: I) -> Self { Step { messages: msgs.into_iter().collect(), ..Step::default() } } } /// An interface to objects with epoch numbers. Different algorithms may have different internal /// notion of _epoch_. This interface summarizes the properties that are essential for the message /// sender queue. pub trait Epoched { /// Type of epoch. type Epoch: EpochT; /// Returns the object's epoch number. fn epoch(&self) -> Self::Epoch; } /// An alias for the type of `Step` returned by `D`'s methods. pub type CpStep<D> = Step< <D as ConsensusProtocol>::Message, <D as ConsensusProtocol>::Output, <D as ConsensusProtocol>::NodeId, <D as ConsensusProtocol>::FaultKind, >; impl<'i, M, O, N, F> Step<M, O, N, F> where N: NodeIdT, M: 'i + Clone + SenderQueueableMessage, F: Fail, { /// Removes and returns any messages that are not yet accepted by remote nodes according to the /// mapping `remote_epochs`. This way the returned messages are postponed until later, and the /// remaining messages can be sent to remote nodes without delay. pub fn defer_messages( &mut self, peer_epochs: &BTreeMap<N, M::Epoch>, max_future_epochs: u64, ) -> Vec<(N, M)> { let mut deferred_msgs: Vec<(N, M)> = Vec::new(); let mut passed_msgs: Vec<_> = Vec::new(); for msg in self.messages.drain(..) { match msg.target.clone() { Target::Nodes(mut ids) => { let is_premature = |&them| msg.message.is_premature(them, max_future_epochs); let is_obsolete = |&them| msg.message.is_obsolete(them); for (id, them) in peer_epochs { if ids.contains(id) { if is_premature(them) { deferred_msgs.push((id.clone(), msg.message.clone())); ids.remove(id); } else if is_obsolete(them) { ids.remove(id); } } } if !ids.is_empty() { passed_msgs.push(Target::Nodes(ids).message(msg.message)); } } Target::AllExcept(mut exclude) => { let is_premature = |&them| msg.message.is_premature(them, max_future_epochs); let is_obsolete = |&them| msg.message.is_obsolete(them); for (id, them) in peer_epochs { if !exclude.contains(id) { if is_premature(them) { deferred_msgs.push((id.clone(), msg.message.clone())); exclude.insert(id.clone()); } else if is_obsolete(them) { exclude.insert(id.clone()); } } } passed_msgs.push(Target::AllExcept(exclude).message(msg.message)); } } } self.messages.extend(passed_msgs); deferred_msgs } } /// A consensus protocol that defines a message flow. /// /// Many algorithms require an RNG which must be supplied on each call. It is up to the caller to /// ensure that this random number generator is cryptographically secure. pub trait ConsensusProtocol: Send + Sync { /// Unique node identifier. type NodeId: NodeIdT; /// The input provided by the user. type Input; /// The output type. Some algorithms return an output exactly once, others return multiple /// times. type Output; /// The messages that need to be exchanged between the instances in the participating nodes. type Message: Message; /// The errors that can occur during execution. type Error: Fail; /// The kinds of message faults that can be detected during execution. type FaultKind: FaultT; /// Handles an input provided by the user, and returns fn handle_input<R: Rng>( &mut self, input: Self::Input, rng: &mut R, ) -> Result<CpStep<Self>, Self::Error> where Self: Sized; /// Handles a message received from node `sender_id`. fn handle_message<R: Rng>( &mut self, sender_id: &Self::NodeId, message: Self::Message, rng: &mut R, ) -> Result<CpStep<Self>, Self::Error> where Self: Sized; /// Returns `true` if execution has completed and this instance can be dropped. fn terminated(&self) -> bool; /// Returns this node's own ID. fn our_id(&self) -> &Self::NodeId; }
32.993902
99
0.57762